mirror of
https://github.com/serai-dex/serai.git
synced 2025-03-22 07:09:00 +00:00
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file Cleans it up and makes it more robust. * Have expected_next_batch return an error instead of retrying While convenient to offer an error-free implementation, it potentially caused very long lived lock acquisitions in handle_processor_message. * Unify and clean DkgConfirmer and DkgRemoval Does so via adding a new file for the common code, SigningProtocol. Modifies from_cache to return the preprocess with the machine, as there's no reason not to. Also removes an unused Result around the type. Clarifies the security around deterministic nonces, removing them for saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the DB is not a proper secret store. Moves arguments always present in the protocol from function arguments into the struct itself. Removes the horribly ugly code in DkgRemoval, fixing multiple issues present with it which would cause it to fail on use. * Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol * Remove unnecessary Clone from lambdas in coordinator * Remove the EventDb from Tributary scanner We used per-Transaction DB TXNs so on error, we don't have to rescan the entire block yet only the rest of it. We prevented scanning multiple transactions by tracking which we already had. This is over-engineered and not worth it. * Implement borsh for HasEvents, removing the manual encoding * Merge DkgConfirmer and DkgRemoval into signing_protocol.rs Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes if any validator had multiple key shares. * Strictly type DataSpecification's Label * Correct threshold_i_map_to_keys_and_musig_i_map It didn't include the participant's own index and accordingly was offset. * Create TributaryBlockHandler This struct contains all variables prior passed to handle_block and stops them from being passed around again and again. This also ensures fatal_slash is only called while handling a block, as needed as it expects to operate under perfect consensus. * Inline accumulate, store confirmation nonces with shares Inlining accumulate makes sense due to the amount of data accumulate needed to be passed. Storing confirmation nonces with shares ensures that both are available or neither. Prior, one could be yet the other may not have been (requiring an assert in runtime to ensure we didn't bungle it somehow). * Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs * Move Label into SignData All of our transactions which use SignData end up with the same common usage pattern for Label, justifying this. Removes 3 transactions, explicitly de-duplicating their handlers. * Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair * Remove the manual read/write for TributarySpec for borsh This struct doesn't have any optimizations booned by the manual impl. Using borsh reduces our scope. * Use temporary variables to further minimize LoC in tributary handler * Remove usage of tuples for non-trivial Tributary transactions * Remove serde from dkg serde could be used to deserialize intenrally inconsistent objects which could lead to panics or faults. The BorshDeserialize derives have been replaced with a manual implementation which won't produce inconsistent objects. * Abstract Future generics using new trait definitions in coordinator * Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs * Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
This commit is contained in:
parent
6caf45ea1d
commit
11fdb6da1d
34 changed files with 2531 additions and 2992 deletions
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -1571,7 +1571,6 @@ dependencies = [
|
||||||
"multiexp",
|
"multiexp",
|
||||||
"rand_core",
|
"rand_core",
|
||||||
"schnorr-signatures",
|
"schnorr-signatures",
|
||||||
"serde",
|
|
||||||
"std-shims",
|
"std-shims",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
|
@ -7343,7 +7342,6 @@ dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"modular-frost",
|
"modular-frost",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
"rand_chacha",
|
|
||||||
"rand_core",
|
"rand_core",
|
||||||
"schnorr-signatures",
|
"schnorr-signatures",
|
||||||
"serai-client",
|
"serai-client",
|
||||||
|
|
|
@ -358,7 +358,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||||
_: (),
|
_: (),
|
||||||
_: ThresholdKeys<Secp256k1>,
|
_: ThresholdKeys<Secp256k1>,
|
||||||
_: CachedPreprocess,
|
_: CachedPreprocess,
|
||||||
) -> Result<Self, FrostError> {
|
) -> (Self, Self::Preprocess) {
|
||||||
unimplemented!(
|
unimplemented!(
|
||||||
"Bitcoin transactions don't support caching their preprocesses due to {}",
|
"Bitcoin transactions don't support caching their preprocesses due to {}",
|
||||||
"being already bound to a specific transaction"
|
"being already bound to a specific transaction"
|
||||||
|
|
|
@ -226,7 +226,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_cache(_: (), _: ThresholdKeys<Ed25519>, _: CachedPreprocess) -> Result<Self, FrostError> {
|
fn from_cache(_: (), _: ThresholdKeys<Ed25519>, _: CachedPreprocess) -> (Self, Self::Preprocess) {
|
||||||
unimplemented!(
|
unimplemented!(
|
||||||
"Monero transactions don't support caching their preprocesses due to {}",
|
"Monero transactions don't support caching their preprocesses due to {}",
|
||||||
"being already bound to a specific transaction"
|
"being already bound to a specific transaction"
|
||||||
|
|
|
@ -45,6 +45,7 @@ macro_rules! create_db {
|
||||||
pub struct $field_name;
|
pub struct $field_name;
|
||||||
impl $field_name {
|
impl $field_name {
|
||||||
pub fn key($($arg: $arg_type),*) -> Vec<u8> {
|
pub fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||||
|
use scale::Encode;
|
||||||
$crate::serai_db_key(
|
$crate::serai_db_key(
|
||||||
stringify!($db_name).as_bytes(),
|
stringify!($db_name).as_bytes(),
|
||||||
stringify!($field_name).as_bytes(),
|
stringify!($field_name).as_bytes(),
|
||||||
|
|
|
@ -18,7 +18,6 @@ async-trait = { version = "0.1", default-features = false }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
@ -38,7 +37,7 @@ message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||||
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
|
@ -4,6 +4,7 @@ use blake2::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::NetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{Session, ValidatorSet},
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
|
@ -20,7 +21,6 @@ create_db!(
|
||||||
HandledMessageDb: (network: NetworkId) -> u64,
|
HandledMessageDb: (network: NetworkId) -> u64,
|
||||||
ActiveTributaryDb: () -> Vec<u8>,
|
ActiveTributaryDb: () -> Vec<u8>,
|
||||||
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
||||||
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
|
|
||||||
FirstPreprocessDb: (
|
FirstPreprocessDb: (
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
id_type: RecognizedIdType,
|
id_type: RecognizedIdType,
|
||||||
|
@ -43,7 +43,7 @@ impl ActiveTributaryDb {
|
||||||
|
|
||||||
let mut tributaries = vec![];
|
let mut tributaries = vec![];
|
||||||
while !bytes_ref.is_empty() {
|
while !bytes_ref.is_empty() {
|
||||||
tributaries.push(TributarySpec::read(&mut bytes_ref).unwrap());
|
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
(bytes, tributaries)
|
(bytes, tributaries)
|
||||||
|
@ -57,7 +57,7 @@ impl ActiveTributaryDb {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spec.write(&mut existing_bytes).unwrap();
|
spec.serialize(&mut existing_bytes).unwrap();
|
||||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,28 +72,13 @@ impl ActiveTributaryDb {
|
||||||
|
|
||||||
let mut bytes = vec![];
|
let mut bytes = vec![];
|
||||||
for active in active {
|
for active in active {
|
||||||
active.write(&mut bytes).unwrap();
|
active.serialize(&mut bytes).unwrap();
|
||||||
}
|
}
|
||||||
Self::set(txn, &bytes);
|
Self::set(txn, &bytes);
|
||||||
RetiredTributaryDb::set(txn, set, &());
|
RetiredTributaryDb::set(txn, set, &());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SignedTransactionDb {
|
|
||||||
pub fn take_signed_transaction(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
order: &[u8],
|
|
||||||
nonce: u32,
|
|
||||||
) -> Option<Transaction> {
|
|
||||||
let res = SignedTransactionDb::get(txn, order, nonce)
|
|
||||||
.map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());
|
|
||||||
if res.is_some() {
|
|
||||||
Self::del(txn, order, nonce);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FirstPreprocessDb {
|
impl FirstPreprocessDb {
|
||||||
pub fn save_first_preprocess(
|
pub fn save_first_preprocess(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
|
|
|
@ -31,12 +31,12 @@ use tokio::{
|
||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
|
|
||||||
use ::tributary::{
|
use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary};
|
||||||
ProvidedError, TransactionKind, TransactionError, TransactionTrait, Block, Tributary,
|
|
||||||
};
|
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
use crate::tributary::{TributarySpec, SignData, Transaction, scanner::RecognizedIdType, PlanIds};
|
use crate::tributary::{
|
||||||
|
TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds,
|
||||||
|
};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
|
@ -126,48 +126,6 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_signed_transaction<D: Db, P: P2p>(
|
|
||||||
txn: &mut D::Transaction<'_>,
|
|
||||||
tributary: &Tributary<D, Transaction, P>,
|
|
||||||
tx: Transaction,
|
|
||||||
) {
|
|
||||||
log::debug!("publishing transaction {}", hex::encode(tx.hash()));
|
|
||||||
|
|
||||||
let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() {
|
|
||||||
let signer = signed.signer;
|
|
||||||
|
|
||||||
// Safe as we should deterministically create transactions, meaning if this is already on-disk,
|
|
||||||
// it's what we're saving now
|
|
||||||
SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize());
|
|
||||||
|
|
||||||
(order, signer)
|
|
||||||
} else {
|
|
||||||
panic!("non-signed transaction passed to publish_signed_transaction");
|
|
||||||
};
|
|
||||||
|
|
||||||
// If we're trying to publish 5, when the last transaction published was 3, this will delay
|
|
||||||
// publication until the point in time we publish 4
|
|
||||||
while let Some(tx) = SignedTransactionDb::take_signed_transaction(
|
|
||||||
txn,
|
|
||||||
&order,
|
|
||||||
tributary
|
|
||||||
.next_nonce(&signer, &order)
|
|
||||||
.await
|
|
||||||
.expect("we don't have a nonce, meaning we aren't a participant on this tributary"),
|
|
||||||
) {
|
|
||||||
// We need to return a proper error here to enable that, due to a race condition around
|
|
||||||
// multiple publications
|
|
||||||
match tributary.add_transaction(tx.clone()).await {
|
|
||||||
Ok(_) => {}
|
|
||||||
// Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces
|
|
||||||
Err(TransactionError::InvalidNonce) => {
|
|
||||||
log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?")
|
|
||||||
}
|
|
||||||
Err(e) => panic!("created an invalid transaction: {e:?}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Find a better pattern for this
|
// TODO: Find a better pattern for this
|
||||||
static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||||
|
|
||||||
|
@ -317,7 +275,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone());
|
BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone());
|
||||||
|
|
||||||
// Get the next-to-execute batch ID
|
// Get the next-to-execute batch ID
|
||||||
let mut next = substrate::get_expected_next_batch(serai, network).await;
|
let Ok(mut next) = substrate::expected_next_batch(serai, network).await else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
// Since we have a new batch, publish all batches yet to be published to Serai
|
// Since we have a new batch, publish all batches yet to be published to Serai
|
||||||
// This handles the edge-case where batch n+1 is signed before batch n is
|
// This handles the edge-case where batch n+1 is signed before batch n is
|
||||||
|
@ -329,7 +289,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
|
|
||||||
while let Some(batch) = batches.pop_front() {
|
while let Some(batch) = batches.pop_front() {
|
||||||
// If this Batch should no longer be published, continue
|
// If this Batch should no longer be published, continue
|
||||||
if substrate::get_expected_next_batch(serai, network).await > batch.batch.id {
|
let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
if expected_next_batch > batch.batch.id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -398,7 +361,11 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
let txs = match msg.msg.clone() {
|
let txs = match msg.msg.clone() {
|
||||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||||
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
||||||
vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())]
|
vec![Transaction::DkgCommitments {
|
||||||
|
attempt: id.attempt,
|
||||||
|
commitments,
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
}]
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => {
|
key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => {
|
||||||
// This doesn't need the ID since it's a Provided transaction which everyone will provide
|
// This doesn't need the ID since it's a Provided transaction which everyone will provide
|
||||||
|
@ -411,7 +378,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
||||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt);
|
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
||||||
|
|
||||||
let our_i = spec
|
let our_i = spec
|
||||||
.i(pub_key)
|
.i(pub_key)
|
||||||
|
@ -449,7 +416,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
// As for the safety of calling error_generating_key_pair, the processor is presumed
|
// As for the safety of calling error_generating_key_pair, the processor is presumed
|
||||||
// to only send InvalidShare or GeneratedKeyPair for a given attempt
|
// to only send InvalidShare or GeneratedKeyPair for a given attempt
|
||||||
let mut txs = if let Some(faulty) =
|
let mut txs = if let Some(faulty) =
|
||||||
crate::tributary::error_generating_key_pair::<_>(&txn, key, spec, id.attempt)
|
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
|
||||||
{
|
{
|
||||||
vec![Transaction::RemoveParticipant(faulty)]
|
vec![Transaction::RemoveParticipant(faulty)]
|
||||||
} else {
|
} else {
|
||||||
|
@ -480,7 +447,11 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
|
|
||||||
match share {
|
match share {
|
||||||
Ok(share) => {
|
Ok(share) => {
|
||||||
vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())]
|
vec![Transaction::DkgConfirmed {
|
||||||
|
attempt: id.attempt,
|
||||||
|
confirmation_share: share,
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
}]
|
||||||
}
|
}
|
||||||
Err(p) => {
|
Err(p) => {
|
||||||
vec![Transaction::RemoveParticipant(p)]
|
vec![Transaction::RemoveParticipant(p)]
|
||||||
|
@ -511,18 +482,20 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
|
|
||||||
vec![]
|
vec![]
|
||||||
} else {
|
} else {
|
||||||
vec![Transaction::SignPreprocess(SignData {
|
vec![Transaction::Sign(SignData {
|
||||||
plan: id.id,
|
plan: id.id,
|
||||||
attempt: id.attempt,
|
attempt: id.attempt,
|
||||||
|
label: Label::Preprocess,
|
||||||
data: preprocesses,
|
data: preprocesses,
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})]
|
})]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sign::ProcessorMessage::Share { id, shares } => {
|
sign::ProcessorMessage::Share { id, shares } => {
|
||||||
vec![Transaction::SignShare(SignData {
|
vec![Transaction::Sign(SignData {
|
||||||
plan: id.id,
|
plan: id.id,
|
||||||
attempt: id.attempt,
|
attempt: id.attempt,
|
||||||
|
label: Label::Share,
|
||||||
data: shares,
|
data: shares,
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})]
|
})]
|
||||||
|
@ -555,9 +528,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => {
|
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => {
|
||||||
vec![Transaction::SubstratePreprocess(SignData {
|
vec![Transaction::SubstrateSign(SignData {
|
||||||
plan: id.id,
|
plan: id.id,
|
||||||
attempt: id.attempt,
|
attempt: id.attempt,
|
||||||
|
label: Label::Preprocess,
|
||||||
data: preprocesses.into_iter().map(Into::into).collect(),
|
data: preprocesses.into_iter().map(Into::into).collect(),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})]
|
})]
|
||||||
|
@ -586,13 +560,13 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
preprocesses.into_iter().map(Into::into).collect(),
|
preprocesses.into_iter().map(Into::into).collect(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let intended = Transaction::Batch(
|
let intended = Transaction::Batch {
|
||||||
block.0,
|
block: block.0,
|
||||||
match id.id {
|
batch: match id.id {
|
||||||
SubstrateSignableId::Batch(id) => id,
|
SubstrateSignableId::Batch(id) => id,
|
||||||
_ => panic!("BatchPreprocess did not contain Batch ID"),
|
_ => panic!("BatchPreprocess did not contain Batch ID"),
|
||||||
},
|
},
|
||||||
);
|
};
|
||||||
|
|
||||||
// If this is the new key's first Batch, only create this TX once we verify all
|
// If this is the new key's first Batch, only create this TX once we verify all
|
||||||
// all prior published `Batch`s
|
// all prior published `Batch`s
|
||||||
|
@ -649,18 +623,20 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
vec![Transaction::SubstratePreprocess(SignData {
|
vec![Transaction::SubstrateSign(SignData {
|
||||||
plan: id.id,
|
plan: id.id,
|
||||||
attempt: id.attempt,
|
attempt: id.attempt,
|
||||||
|
label: Label::Preprocess,
|
||||||
data: preprocesses.into_iter().map(Into::into).collect(),
|
data: preprocesses.into_iter().map(Into::into).collect(),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})]
|
})]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
coordinator::ProcessorMessage::SubstrateShare { id, shares } => {
|
coordinator::ProcessorMessage::SubstrateShare { id, shares } => {
|
||||||
vec![Transaction::SubstrateShare(SignData {
|
vec![Transaction::SubstrateSign(SignData {
|
||||||
plan: id.id,
|
plan: id.id,
|
||||||
attempt: id.attempt,
|
attempt: id.attempt,
|
||||||
|
label: Label::Share,
|
||||||
data: shares.into_iter().map(|share| share.to_vec()).collect(),
|
data: shares.into_iter().map(|share| share.to_vec()).collect(),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})]
|
})]
|
||||||
|
@ -706,7 +682,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||||
}
|
}
|
||||||
TransactionKind::Signed(_, _) => {
|
TransactionKind::Signed(_, _) => {
|
||||||
tx.sign(&mut OsRng, genesis, key);
|
tx.sign(&mut OsRng, genesis, key);
|
||||||
publish_signed_transaction(&mut txn, tributary, tx).await;
|
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1079,16 +1055,18 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut tx = match id_type {
|
let mut tx = match id_type {
|
||||||
RecognizedIdType::Batch => Transaction::SubstratePreprocess(SignData {
|
RecognizedIdType::Batch => Transaction::SubstrateSign(SignData {
|
||||||
data: get_preprocess(&raw_db, id_type, &id).await,
|
data: get_preprocess(&raw_db, id_type, &id).await,
|
||||||
plan: SubstrateSignableId::Batch(id.as_slice().try_into().unwrap()),
|
plan: SubstrateSignableId::Batch(id.as_slice().try_into().unwrap()),
|
||||||
|
label: Label::Preprocess,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}),
|
}),
|
||||||
|
|
||||||
RecognizedIdType::Plan => Transaction::SignPreprocess(SignData {
|
RecognizedIdType::Plan => Transaction::Sign(SignData {
|
||||||
data: get_preprocess(&raw_db, id_type, &id).await,
|
data: get_preprocess(&raw_db, id_type, &id).await,
|
||||||
plan: id.try_into().unwrap(),
|
plan: id.try_into().unwrap(),
|
||||||
|
label: Label::Preprocess,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}),
|
}),
|
||||||
|
@ -1119,7 +1097,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
// TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet
|
// TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet
|
||||||
// taking a txn fails to declare its achieved independence
|
// taking a txn fails to declare its achieved independence
|
||||||
let mut txn = raw_db.txn();
|
let mut txn = raw_db.txn();
|
||||||
publish_signed_transaction(&mut txn, tributary, tx).await;
|
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,57 +12,48 @@
|
||||||
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use core::{ops::Deref, time::Duration};
|
|
||||||
use std::{
|
|
||||||
sync::Arc,
|
|
||||||
collections::{HashSet, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
SeraiError, Block, Serai, TemporalSerai,
|
SeraiError, Serai,
|
||||||
primitives::{BlockHash, NetworkId},
|
primitives::NetworkId,
|
||||||
validator_sets::{
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares},
|
|
||||||
ValidatorSetsEvent,
|
|
||||||
},
|
|
||||||
in_instructions::InInstructionsEvent,
|
|
||||||
coins::CoinsEvent,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
use processor_messages::SubstrateContext;
|
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
|
||||||
|
|
||||||
use tokio::{sync::mpsc, time::sleep};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Db,
|
|
||||||
processors::Processors,
|
|
||||||
tributary::{TributarySpec, SeraiBlockNumber},
|
|
||||||
};
|
|
||||||
|
|
||||||
// 5 minutes, expressed in blocks
|
// 5 minutes, expressed in blocks
|
||||||
// TODO: Pull a constant for block time
|
// TODO: Pull a constant for block time
|
||||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
enum HasEvents {
|
||||||
|
KeyGen,
|
||||||
|
Yes,
|
||||||
|
No,
|
||||||
|
}
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
SubstrateCosignDb {
|
SubstrateCosignDb {
|
||||||
CosignTriggered: () -> (),
|
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
IntendedCosign: () -> (u64, Option<u64>),
|
||||||
BlockHasEvents: (block: u64) -> u8,
|
BlockHasEvents: (block: u64) -> HasEvents,
|
||||||
LatestCosignedBlock: () -> u64,
|
LatestCosignedBlock: () -> u64,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
impl IntendedCosign {
|
impl IntendedCosign {
|
||||||
|
// Sets the intended to cosign block, clearing the prior value entirely.
|
||||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
||||||
Self::set(txn, &(intended, None::<u64>));
|
Self::set(txn, &(intended, None::<u64>));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the cosign skipped since the last intended to cosign block.
|
||||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
||||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
||||||
assert!(prior_skipped.is_none());
|
assert!(prior_skipped.is_none());
|
||||||
|
@ -89,12 +80,6 @@ impl CosignTransactions {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
|
||||||
enum HasEvents {
|
|
||||||
KeyGen,
|
|
||||||
Yes,
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
async fn block_has_events(
|
async fn block_has_events(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
|
@ -122,45 +107,112 @@ async fn block_has_events(
|
||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||||
|
|
||||||
let has_events = has_events.encode();
|
BlockHasEvents::set(txn, block, &has_events);
|
||||||
assert_eq!(has_events.len(), 1);
|
|
||||||
BlockHasEvents::set(txn, block, &has_events[0]);
|
|
||||||
Ok(HasEvents::Yes)
|
Ok(HasEvents::Yes)
|
||||||
}
|
}
|
||||||
Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()),
|
Some(code) => Ok(code),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn potentially_cosign_block(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
serai: &Serai,
|
||||||
|
block: u64,
|
||||||
|
skipped_block: Option<u64>,
|
||||||
|
window_end_exclusive: u64,
|
||||||
|
) -> Result<bool, SeraiError> {
|
||||||
|
// The following code regarding marking cosigned if prior block is cosigned expects this block to
|
||||||
|
// not be zero
|
||||||
|
// While we could perform this check there, there's no reason not to optimize the entire function
|
||||||
|
// as such
|
||||||
|
if block == 0 {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
let block_has_events = block_has_events(txn, serai, block).await?;
|
||||||
|
|
||||||
|
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
|
||||||
|
if (block_has_events == HasEvents::No) &&
|
||||||
|
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||||
|
{
|
||||||
|
LatestCosignedBlock::set(txn, &block);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
|
||||||
|
// trigger a cosigning protocol covering it
|
||||||
|
// This means there will be the maximum delay allowed from a block needing cosigning occuring
|
||||||
|
// and a cosign for it triggering
|
||||||
|
let maximally_latent_cosign_block =
|
||||||
|
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
||||||
|
|
||||||
|
// If this block is within the window,
|
||||||
|
if block < window_end_exclusive {
|
||||||
|
// and set a key, cosign it
|
||||||
|
if block_has_events == HasEvents::KeyGen {
|
||||||
|
IntendedCosign::set_intended_cosign(txn, block);
|
||||||
|
// Carry skipped if it isn't included by cosigning this block
|
||||||
|
if let Some(skipped) = skipped_block {
|
||||||
|
if skipped > block {
|
||||||
|
IntendedCosign::set_skipped_cosign(txn, block);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
|
||||||
|
// Since this block was outside the window and had events/was maximally latent, cosign it
|
||||||
|
IntendedCosign::set_intended_cosign(txn, block);
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Advances the cosign protocol as should be done per the latest block.
|
Advances the cosign protocol as should be done per the latest block.
|
||||||
|
|
||||||
A block is considered cosigned if:
|
A block is considered cosigned if:
|
||||||
A) It was cosigned
|
A) It was cosigned
|
||||||
B) It's the parent of a cosigned block
|
B) It's the parent of a cosigned block
|
||||||
C) It immediately follows a cosigned block and has no events requiring cosigning (TODO)
|
C) It immediately follows a cosigned block and has no events requiring cosigning
|
||||||
|
|
||||||
|
This only actually performs advancement within a limited bound (generally until it finds a block
|
||||||
|
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||||
|
`latest_number` doesn't change.
|
||||||
*/
|
*/
|
||||||
async fn advance_cosign_protocol(db: &mut impl Db, serai: &Serai, latest_number: u64) -> Result<(), ()> {
|
pub async fn advance_cosign_protocol(
|
||||||
let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else {
|
db: &mut impl Db,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
serai: &Serai,
|
||||||
|
latest_number: u64,
|
||||||
|
) -> Result<(), SeraiError> {
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, 1);
|
|
||||||
txn.commit();
|
const INITIAL_INTENDED_COSIGN: u64 = 1;
|
||||||
return Ok(());
|
let (last_intended_to_cosign_block, mut skipped_block) = {
|
||||||
|
let intended_cosign = IntendedCosign::get(&txn);
|
||||||
|
// If we haven't prior intended to cosign a block, set the intended cosign to 1
|
||||||
|
if let Some(intended_cosign) = intended_cosign {
|
||||||
|
intended_cosign
|
||||||
|
} else {
|
||||||
|
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
|
||||||
|
IntendedCosign::get(&txn).unwrap()
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// "windows" refers to the window of blocks where even if there's a block which should be
|
||||||
|
// cosigned, it won't be due to proximity due to the prior cosign
|
||||||
|
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||||
|
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||||
|
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||||
|
window_end_exclusive = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we haven't flagged skipped, and a block within the distance had events, flag the first
|
// Check all blocks within the window to see if they should be cosigned
|
||||||
// such block as skipped
|
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||||
let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
// do cosign them
|
||||||
// If we've never triggered a cosign, don't skip any cosigns
|
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||||
if CosignTriggered::get(&txn).is_none() {
|
// the skipped block will cause will cosign all other blocks within this window
|
||||||
distance_end_exclusive = 0;
|
|
||||||
}
|
|
||||||
if skipped_block.is_none() {
|
if skipped_block.is_none() {
|
||||||
for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive {
|
for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
|
||||||
if b > latest_number {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||||
skipped_block = Some(b);
|
skipped_block = Some(b);
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||||
|
@ -170,58 +222,39 @@ if skipped_block.is_none() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut has_no_cosigners = None;
|
// A block which should be cosigned
|
||||||
let mut cosign = vec![];
|
let mut to_cosign = None;
|
||||||
|
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||||
|
let mut cosigning = vec![];
|
||||||
|
|
||||||
// Block we should cosign no matter what if no prior blocks qualified for cosigning
|
|
||||||
let maximally_latent_cosign_block =
|
|
||||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
|
||||||
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
||||||
let actual_block = serai
|
let actual_block = serai
|
||||||
.finalized_block_by_number(block)
|
.finalized_block_by_number(block)
|
||||||
.await?
|
.await?
|
||||||
.expect("couldn't get block which should've been finalized");
|
.expect("couldn't get block which should've been finalized");
|
||||||
|
|
||||||
|
// Save the block number for this block, as needed by the cosigner to perform cosigning
|
||||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
||||||
|
|
||||||
let mut set = false;
|
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
|
||||||
|
|
||||||
let block_has_events = block_has_events(&mut txn, serai, block).await?;
|
|
||||||
// If this block is within the distance,
|
|
||||||
if block < distance_end_exclusive {
|
|
||||||
// and set a key, cosign it
|
|
||||||
if block_has_events == HasEvents::KeyGen {
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, block);
|
|
||||||
set = true;
|
|
||||||
// Carry skipped if it isn't included by cosigning this block
|
|
||||||
if let Some(skipped) = skipped_block {
|
|
||||||
if skipped > block {
|
|
||||||
IntendedCosign::set_skipped_cosign(&mut txn, block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (Some(block) == maximally_latent_cosign_block) ||
|
|
||||||
(block_has_events != HasEvents::No)
|
|
||||||
{
|
{
|
||||||
// Since this block was outside the distance and had events/was maximally latent, cosign it
|
to_cosign = Some((block, actual_block.hash()));
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, block);
|
|
||||||
set = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if set {
|
|
||||||
// Get the keys as of the prior block
|
// Get the keys as of the prior block
|
||||||
// That means if this block is setting new keys (which won't lock in until we process this
|
// If this key sets new keys, the coordinator won't acknowledge so until we process this
|
||||||
// block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block
|
// block
|
||||||
|
// We won't process this block until its co-signed
|
||||||
|
// Using the keys of the prior block ensures this deadlock isn't reached
|
||||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
||||||
|
|
||||||
has_no_cosigners = Some(actual_block.clone());
|
|
||||||
|
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
// Get the latest session to have set keys
|
// Get the latest session to have set keys
|
||||||
|
let set_with_keys = {
|
||||||
let Some(latest_session) = serai.validator_sets().session(network).await? else {
|
let Some(latest_session) = serai.validator_sets().session(network).await? else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||||
let set_with_keys = if serai
|
if serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ValidatorSet { network, session: prior_session })
|
.keys(ValidatorSet { network, session: prior_session })
|
||||||
.await?
|
.await?
|
||||||
|
@ -234,31 +267,33 @@ for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
set
|
set
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Since this is a valid cosigner, don't flag this block as having no cosigners
|
|
||||||
has_no_cosigners = None;
|
|
||||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
||||||
|
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap()));
|
||||||
if in_set(key, &serai, set_with_keys).await?.unwrap() {
|
|
||||||
cosign.push((set_with_keys, block, actual_block.hash()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some((number, hash)) = to_cosign {
|
||||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
||||||
// cosigned
|
// cosigned
|
||||||
if let Some(has_no_cosigners) = has_no_cosigners {
|
if cosigning.is_empty() {
|
||||||
log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number());
|
log::debug!("{} had no cosigners available, marking as cosigned", number);
|
||||||
LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number());
|
LatestCosignedBlock::set(&mut txn, &number);
|
||||||
} else {
|
} else {
|
||||||
CosignTriggered::set(&mut txn, &());
|
for (set, in_set) in cosigning {
|
||||||
for (set, block, hash) in cosign {
|
if in_set {
|
||||||
log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session);
|
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
|
||||||
CosignTransactions::append_cosign(&mut txn, set, block, hash);
|
CosignTransactions::append_cosign(&mut txn, set, number, hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
|
@ -1,61 +1,32 @@
|
||||||
use scale::Encode;
|
use serai_client::primitives::NetworkId;
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::NetworkId,
|
|
||||||
validator_sets::primitives::{Session, ValidatorSet},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
|
|
||||||
|
mod inner_db {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
SubstrateDb {
|
SubstrateDb {
|
||||||
CosignTriggered: () -> (),
|
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
|
||||||
BlockHasEvents: (block: u64) -> u8,
|
|
||||||
LatestCosignedBlock: () -> u64,
|
|
||||||
NextBlock: () -> u64,
|
NextBlock: () -> u64,
|
||||||
EventDb: (id: &[u8], index: u32) -> (),
|
HandledEvent: (block: [u8; 32]) -> u32,
|
||||||
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
|
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
pub use inner_db::{NextBlock, BatchInstructionsHashDb};
|
||||||
|
|
||||||
impl IntendedCosign {
|
pub struct HandledEvent;
|
||||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
impl HandledEvent {
|
||||||
Self::set(txn, &(intended, None::<u64>));
|
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
|
||||||
|
inner_db::HandledEvent::get(getter, block).map(|last| last + 1).unwrap_or(0)
|
||||||
}
|
}
|
||||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
|
||||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
let next = Self::next_to_handle_event(getter, block);
|
||||||
assert!(prior_skipped.is_none());
|
assert!(next >= event_id);
|
||||||
Self::set(txn, &(intended, Some(skipped)));
|
next == event_id
|
||||||
}
|
}
|
||||||
}
|
pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
|
||||||
|
assert!(Self::next_to_handle_event(txn, block) == index);
|
||||||
impl LatestCosignedBlock {
|
inner_db::HandledEvent::set(txn, block, &index);
|
||||||
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
|
|
||||||
Self::get(getter).unwrap_or_default().max(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EventDb {
|
|
||||||
pub fn is_unhandled(getter: &impl Get, id: &[u8], index: u32) -> bool {
|
|
||||||
Self::get(getter, id, index).is_none()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn handle_event(txn: &mut impl DbTxn, id: &[u8], index: u32) {
|
|
||||||
assert!(Self::is_unhandled(txn, id, index));
|
|
||||||
Self::set(txn, id, index, &());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db_channel! {
|
|
||||||
SubstrateDbChannels {
|
|
||||||
CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CosignTransactions {
|
|
||||||
// Append a cosign transaction.
|
|
||||||
pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) {
|
|
||||||
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,12 +8,11 @@ use zeroize::Zeroizing;
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
SeraiError, Block, Serai, TemporalSerai,
|
SeraiError, Block, Serai, TemporalSerai,
|
||||||
primitives::{BlockHash, NetworkId},
|
primitives::{BlockHash, NetworkId},
|
||||||
validator_sets::{
|
validator_sets::{
|
||||||
primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares},
|
primitives::{ValidatorSet, KeyPair, amortize_excess_key_shares},
|
||||||
ValidatorSetsEvent,
|
ValidatorSetsEvent,
|
||||||
},
|
},
|
||||||
in_instructions::InInstructionsEvent,
|
in_instructions::InInstructionsEvent,
|
||||||
|
@ -26,15 +25,14 @@ use processor_messages::SubstrateContext;
|
||||||
|
|
||||||
use tokio::{sync::mpsc, time::sleep};
|
use tokio::{sync::mpsc, time::sleep};
|
||||||
|
|
||||||
use crate::{
|
use crate::{Db, processors::Processors, tributary::TributarySpec};
|
||||||
Db,
|
|
||||||
processors::Processors,
|
|
||||||
tributary::{TributarySpec, SeraiBlockNumber},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
pub use db::*;
|
pub use db::*;
|
||||||
|
|
||||||
|
mod cosign;
|
||||||
|
pub use cosign::*;
|
||||||
|
|
||||||
async fn in_set(
|
async fn in_set(
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
serai: &TemporalSerai<'_>,
|
serai: &TemporalSerai<'_>,
|
||||||
|
@ -110,7 +108,7 @@ async fn handle_new_set<D: Db>(
|
||||||
|
|
||||||
new_tributary_spec.send(spec).unwrap();
|
new_tributary_spec.send(spec).unwrap();
|
||||||
} else {
|
} else {
|
||||||
log::info!("not present in set {:?}", set);
|
log::info!("not present in new set {:?}", set);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -147,8 +145,8 @@ async fn handle_key_gen<Pro: Processors>(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_batch_and_burns<D: Db, Pro: Processors>(
|
async fn handle_batch_and_burns<Pro: Processors>(
|
||||||
db: &mut D,
|
txn: &mut impl DbTxn,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
|
@ -178,9 +176,7 @@ async fn handle_batch_and_burns<D: Db, Pro: Processors>(
|
||||||
{
|
{
|
||||||
network_had_event(&mut burns, &mut batches, network);
|
network_had_event(&mut burns, &mut batches, network);
|
||||||
|
|
||||||
let mut txn = db.txn();
|
BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);
|
||||||
BatchInstructionsHashDb::set(&mut txn, network, id, &instructions_hash);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
// Make sure this is the only Batch event for this network in this Block
|
// Make sure this is the only Batch event for this network in this Block
|
||||||
assert!(batch_block.insert(network, network_block).is_none());
|
assert!(batch_block.insert(network, network_block).is_none());
|
||||||
|
@ -257,8 +253,8 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
|
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
|
||||||
// Individually mark each event as handled so on reboot, we minimize duplicates
|
// Individually mark each event as handled so on reboot, we minimize duplicates
|
||||||
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
|
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
|
||||||
// events will successfully be incrementally handled (though the Serai connection should be
|
// events will successfully be incrementally handled
|
||||||
// stable)
|
// (though the Serai connection should be stable, making this unnecessary)
|
||||||
let ValidatorSetsEvent::NewSet { set } = new_set else {
|
let ValidatorSetsEvent::NewSet { set } = new_set else {
|
||||||
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
||||||
};
|
};
|
||||||
|
@ -269,11 +265,11 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if EventDb::is_unhandled(db, &hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh new set event {:?}", new_set);
|
log::info!("found fresh new set event {:?}", new_set);
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
|
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
|
||||||
EventDb::handle_event(&mut txn, &hash, event_id);
|
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
event_id += 1;
|
event_id += 1;
|
||||||
|
@ -281,7 +277,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
|
|
||||||
// If a key pair was confirmed, inform the processor
|
// If a key pair was confirmed, inform the processor
|
||||||
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
|
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
|
||||||
if EventDb::is_unhandled(db, &hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh key gen event {:?}", key_gen);
|
log::info!("found fresh key gen event {:?}", key_gen);
|
||||||
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
|
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
|
||||||
handle_key_gen(processors, serai, &block, set, key_pair).await?;
|
handle_key_gen(processors, serai, &block, set, key_pair).await?;
|
||||||
|
@ -289,7 +285,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
||||||
}
|
}
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
EventDb::handle_event(&mut txn, &hash, event_id);
|
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
event_id += 1;
|
event_id += 1;
|
||||||
|
@ -304,28 +300,26 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if EventDb::is_unhandled(db, &hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh set retired event {:?}", retired_set);
|
log::info!("found fresh set retired event {:?}", retired_set);
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
|
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
|
||||||
tributary_retired.send(set).unwrap();
|
tributary_retired.send(set).unwrap();
|
||||||
EventDb::handle_event(&mut txn, &hash, event_id);
|
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
event_id += 1;
|
event_id += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, tell the processor of acknowledged blocks/burns
|
// Finally, tell the processor of acknowledged blocks/burns
|
||||||
// This uses a single event as. unlike prior events which individually executed code, all
|
// This uses a single event as unlike prior events which individually executed code, all
|
||||||
// following events share data collection
|
// following events share data collection
|
||||||
// This does break the uniqueness of (hash, event_id) -> one event, yet
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
// (network, (hash, event_id)) remains valid as a unique ID for an event
|
|
||||||
if EventDb::is_unhandled(db, &hash, event_id) {
|
|
||||||
handle_batch_and_burns(db, processors, serai, &block).await?;
|
|
||||||
}
|
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
EventDb::handle_event(&mut txn, &hash, event_id);
|
handle_batch_and_burns(&mut txn, processors, serai, &block).await?;
|
||||||
|
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -342,181 +336,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||||
// Check if there's been a new Substrate block
|
// Check if there's been a new Substrate block
|
||||||
let latest_number = serai.latest_finalized_block().await?.number();
|
let latest_number = serai.latest_finalized_block().await?.number();
|
||||||
|
|
||||||
// TODO: If this block directly builds off a cosigned block *and* doesn't contain events, mark
|
// Advance the cosigning protocol
|
||||||
// cosigned,
|
advance_cosign_protocol(db, key, serai, latest_number).await?;
|
||||||
{
|
|
||||||
// If:
|
|
||||||
// A) This block has events and it's been at least X blocks since the last cosign or
|
|
||||||
// B) This block doesn't have events but it's been X blocks since a skipped block which did
|
|
||||||
// have events or
|
|
||||||
// C) This block key gens (which changes who the cosigners are)
|
|
||||||
// cosign this block.
|
|
||||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6; // 5 minutes, expressed in blocks
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
|
||||||
enum HasEvents {
|
|
||||||
KeyGen,
|
|
||||||
Yes,
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
async fn block_has_events(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
serai: &Serai,
|
|
||||||
block: u64,
|
|
||||||
) -> Result<HasEvents, SeraiError> {
|
|
||||||
let cached = BlockHasEvents::get(txn, block);
|
|
||||||
match cached {
|
|
||||||
None => {
|
|
||||||
let serai = serai.as_of(
|
|
||||||
serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized")
|
|
||||||
.hash(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if !serai.validator_sets().key_gen_events().await?.is_empty() {
|
|
||||||
return Ok(HasEvents::KeyGen);
|
|
||||||
}
|
|
||||||
|
|
||||||
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
|
|
||||||
serai.in_instructions().batch_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().new_set_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().set_retired_events().await?.is_empty();
|
|
||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
|
||||||
|
|
||||||
let has_events = has_events.encode();
|
|
||||||
assert_eq!(has_events.len(), 1);
|
|
||||||
BlockHasEvents::set(txn, block, &has_events[0]);
|
|
||||||
Ok(HasEvents::Yes)
|
|
||||||
}
|
|
||||||
Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = db.txn();
|
|
||||||
let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else {
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, 1);
|
|
||||||
txn.commit();
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// If we haven't flagged skipped, and a block within the distance had events, flag the first
|
|
||||||
// such block as skipped
|
|
||||||
let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
|
||||||
// If we've never triggered a cosign, don't skip any cosigns
|
|
||||||
if CosignTriggered::get(&txn).is_none() {
|
|
||||||
distance_end_exclusive = 0;
|
|
||||||
}
|
|
||||||
if skipped_block.is_none() {
|
|
||||||
for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive {
|
|
||||||
if b > latest_number {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
|
||||||
skipped_block = Some(b);
|
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
|
||||||
IntendedCosign::set_skipped_cosign(&mut txn, b);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut has_no_cosigners = None;
|
|
||||||
let mut cosign = vec![];
|
|
||||||
|
|
||||||
// Block we should cosign no matter what if no prior blocks qualified for cosigning
|
|
||||||
let maximally_latent_cosign_block =
|
|
||||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
|
||||||
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
|
||||||
let actual_block = serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized");
|
|
||||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
|
||||||
|
|
||||||
let mut set = false;
|
|
||||||
|
|
||||||
let block_has_events = block_has_events(&mut txn, serai, block).await?;
|
|
||||||
// If this block is within the distance,
|
|
||||||
if block < distance_end_exclusive {
|
|
||||||
// and set a key, cosign it
|
|
||||||
if block_has_events == HasEvents::KeyGen {
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, block);
|
|
||||||
set = true;
|
|
||||||
// Carry skipped if it isn't included by cosigning this block
|
|
||||||
if let Some(skipped) = skipped_block {
|
|
||||||
if skipped > block {
|
|
||||||
IntendedCosign::set_skipped_cosign(&mut txn, block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (Some(block) == maximally_latent_cosign_block) ||
|
|
||||||
(block_has_events != HasEvents::No)
|
|
||||||
{
|
|
||||||
// Since this block was outside the distance and had events/was maximally latent, cosign it
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, block);
|
|
||||||
set = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if set {
|
|
||||||
// Get the keys as of the prior block
|
|
||||||
// That means if this block is setting new keys (which won't lock in until we process this
|
|
||||||
// block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block
|
|
||||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
|
||||||
|
|
||||||
has_no_cosigners = Some(actual_block.clone());
|
|
||||||
|
|
||||||
for network in serai_client::primitives::NETWORKS {
|
|
||||||
// Get the latest session to have set keys
|
|
||||||
let Some(latest_session) = serai.validator_sets().session(network).await? else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
|
||||||
let set_with_keys = if serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ValidatorSet { network, session: prior_session })
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
ValidatorSet { network, session: prior_session }
|
|
||||||
} else {
|
|
||||||
let set = ValidatorSet { network, session: latest_session };
|
|
||||||
if serai.validator_sets().keys(set).await?.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
set
|
|
||||||
};
|
|
||||||
|
|
||||||
// Since this is a valid cosigner, don't flag this block as having no cosigners
|
|
||||||
has_no_cosigners = None;
|
|
||||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
|
||||||
|
|
||||||
if in_set(key, &serai, set_with_keys).await?.unwrap() {
|
|
||||||
cosign.push((set_with_keys, block, actual_block.hash()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
|
||||||
// cosigned
|
|
||||||
if let Some(has_no_cosigners) = has_no_cosigners {
|
|
||||||
log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number());
|
|
||||||
LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number());
|
|
||||||
} else {
|
|
||||||
CosignTriggered::set(&mut txn, &());
|
|
||||||
for (set, block, hash) in cosign {
|
|
||||||
log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session);
|
|
||||||
CosignTransactions::append_cosign(&mut txn, set, block, hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reduce to the latest cosigned block
|
// Reduce to the latest cosigned block
|
||||||
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
|
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
|
||||||
|
@ -526,24 +347,19 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||||
}
|
}
|
||||||
|
|
||||||
for b in *next_block ..= latest_number {
|
for b in *next_block ..= latest_number {
|
||||||
log::info!("found substrate block {b}");
|
let block = serai
|
||||||
handle_block(
|
|
||||||
db,
|
|
||||||
key,
|
|
||||||
new_tributary_spec,
|
|
||||||
tributary_retired,
|
|
||||||
processors,
|
|
||||||
serai,
|
|
||||||
serai
|
|
||||||
.finalized_block_by_number(b)
|
.finalized_block_by_number(b)
|
||||||
.await?
|
.await?
|
||||||
.expect("couldn't get block before the latest finalized block"),
|
.expect("couldn't get block before the latest finalized block");
|
||||||
)
|
|
||||||
.await?;
|
log::info!("handling substrate block {b}");
|
||||||
|
handle_block(db, key, new_tributary_spec, tributary_retired, processors, serai, block).await?;
|
||||||
*next_block += 1;
|
*next_block += 1;
|
||||||
|
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
NextBlock::set(&mut txn, next_block);
|
NextBlock::set(&mut txn, next_block);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
log::info!("handled substrate block {b}");
|
log::info!("handled substrate block {b}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -578,6 +394,7 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||||
};
|
};
|
||||||
*/
|
*/
|
||||||
// TODO: Restore the above subscription-based system
|
// TODO: Restore the above subscription-based system
|
||||||
|
// That would require moving serai-client from HTTP to websockets
|
||||||
let new_substrate_block_notifier = {
|
let new_substrate_block_notifier = {
|
||||||
let serai = &serai;
|
let serai = &serai;
|
||||||
move |next_substrate_block| async move {
|
move |next_substrate_block| async move {
|
||||||
|
@ -648,22 +465,25 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the expected ID for the next Batch.
|
/// Gets the expected ID for the next Batch.
|
||||||
pub(crate) async fn get_expected_next_batch(serai: &Serai, network: NetworkId) -> u32 {
|
///
|
||||||
let mut first = true;
|
/// Will log an error and apply a slight sleep on error, letting the caller simply immediately
|
||||||
loop {
|
/// retry.
|
||||||
if !first {
|
pub(crate) async fn expected_next_batch(
|
||||||
log::error!("{} {network:?}", "couldn't connect to Serai node to get the next batch ID for",);
|
serai: &Serai,
|
||||||
sleep(Duration::from_secs(5)).await;
|
network: NetworkId,
|
||||||
|
) -> Result<u32, SeraiError> {
|
||||||
|
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
|
||||||
|
let serai = serai.as_of_latest_finalized_block().await?;
|
||||||
|
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
||||||
|
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
||||||
|
}
|
||||||
|
match expected_next_batch_inner(serai, network).await {
|
||||||
|
Ok(next) => Ok(next),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("couldn't get the expected next batch from substrate: {e:?}");
|
||||||
|
sleep(Duration::from_millis(100)).await;
|
||||||
|
Err(e)
|
||||||
}
|
}
|
||||||
first = false;
|
|
||||||
|
|
||||||
let Ok(serai) = serai.as_of_latest_finalized_block().await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Ok(last) = serai.in_instructions().last_batch_for_network(network).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
break if let Some(last) = last { last + 1 } else { 0 };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ use ciphersuite::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use sp_application_crypto::sr25519;
|
use sp_application_crypto::sr25519;
|
||||||
|
use borsh::BorshDeserialize;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::NetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{Session, ValidatorSet},
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
|
@ -58,21 +58,26 @@ pub fn new_spec<R: RngCore + CryptoRng>(
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
||||||
assert_eq!(TributarySpec::read::<&[u8]>(&mut res.serialize().as_ref()).unwrap(), res);
|
assert_eq!(
|
||||||
|
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||||
|
res,
|
||||||
|
);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn new_tributaries(
|
pub async fn new_tributaries(
|
||||||
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
) -> Vec<(LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)> {
|
) -> Vec<(MemDb, LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)> {
|
||||||
let p2p = LocalP2p::new(keys.len());
|
let p2p = LocalP2p::new(keys.len());
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate() {
|
||||||
|
let db = MemDb::new();
|
||||||
res.push((
|
res.push((
|
||||||
|
db.clone(),
|
||||||
p2p[i].clone(),
|
p2p[i].clone(),
|
||||||
Tributary::<_, Transaction, _>::new(
|
Tributary::<_, Transaction, _>::new(
|
||||||
MemDb::new(),
|
db,
|
||||||
spec.genesis(),
|
spec.genesis(),
|
||||||
spec.start_time(),
|
spec.start_time(),
|
||||||
key.clone(),
|
key.clone(),
|
||||||
|
@ -152,7 +157,11 @@ async fn tributary_test() {
|
||||||
let keys = new_keys(&mut OsRng);
|
let keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec).await;
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let mut blocks = 0;
|
let mut blocks = 0;
|
||||||
let mut last_block = spec.genesis();
|
let mut last_block = spec.genesis();
|
||||||
|
|
|
@ -8,7 +8,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use frost::Participant;
|
use frost::Participant;
|
||||||
|
|
||||||
use sp_runtime::traits::Verify;
|
use sp_runtime::traits::Verify;
|
||||||
use serai_client::validator_sets::primitives::KeyPair;
|
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
|
||||||
|
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
@ -34,10 +34,18 @@ use crate::{
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn dkg_test() {
|
async fn dkg_test() {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
let keys = new_keys(&mut OsRng);
|
let keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
|
|
||||||
let tributaries = new_tributaries(&keys, &spec).await;
|
let full_tributaries = new_tributaries(&keys, &spec).await;
|
||||||
|
let mut dbs = vec![];
|
||||||
|
let mut tributaries = vec![];
|
||||||
|
for (db, p2p, tributary) in full_tributaries {
|
||||||
|
dbs.push(db);
|
||||||
|
tributaries.push((p2p, tributary));
|
||||||
|
}
|
||||||
|
|
||||||
// Run the tributaries in the background
|
// Run the tributaries in the background
|
||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||||
|
@ -49,8 +57,11 @@ async fn dkg_test() {
|
||||||
let mut commitments = vec![0; 256];
|
let mut commitments = vec![0; 256];
|
||||||
OsRng.fill_bytes(&mut commitments);
|
OsRng.fill_bytes(&mut commitments);
|
||||||
|
|
||||||
let mut tx =
|
let mut tx = Transaction::DkgCommitments {
|
||||||
Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed());
|
attempt,
|
||||||
|
commitments: vec![commitments],
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
};
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
|
@ -71,7 +82,7 @@ async fn dkg_test() {
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, tx)| {
|
.map(|(i, tx)| {
|
||||||
if let Transaction::DkgCommitments(_, commitments, _) = tx {
|
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
||||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
||||||
} else {
|
} else {
|
||||||
panic!("txs had non-commitments");
|
panic!("txs had non-commitments");
|
||||||
|
@ -80,20 +91,20 @@ async fn dkg_test() {
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
async fn new_processors(
|
async fn new_processors(
|
||||||
|
db: &mut MemDb,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
||||||
) -> (MemDb, MemProcessors) {
|
) -> MemProcessors {
|
||||||
let mut scanner_db = MemDb::new();
|
|
||||||
let processors = MemProcessors::new();
|
let processors = MemProcessors::new();
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
db,
|
||||||
key,
|
key,
|
||||||
|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called in new_processors")
|
panic!("provided TX caused recognized_id to be called in new_processors")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
&|_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
||||||
&|_| async {
|
&|_| async {
|
||||||
panic!(
|
panic!(
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx in new_processors"
|
"test tried to publish a new Tributary TX from handle_application_tx in new_processors"
|
||||||
|
@ -103,11 +114,11 @@ async fn dkg_test() {
|
||||||
&tributary.reader(),
|
&tributary.reader(),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
(scanner_db, processors)
|
processors
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a scanner and verify it has nothing to report
|
// Instantiate a scanner and verify it has nothing to report
|
||||||
let (mut scanner_db, processors) = new_processors(&keys[0], &spec, &tributaries[0].1).await;
|
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||||
assert!(processors.0.read().await.is_empty());
|
assert!(processors.0.read().await.is_empty());
|
||||||
|
|
||||||
// Publish the last commitment
|
// Publish the last commitment
|
||||||
|
@ -117,14 +128,14 @@ async fn dkg_test() {
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
// Verify the scanner emits a KeyGen::Commitments message
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut dbs[0],
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
&|_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
||||||
&|_| async {
|
&|_| async {
|
||||||
panic!(
|
panic!(
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
||||||
|
@ -151,8 +162,8 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify all keys exhibit this scanner behavior
|
// Verify all keys exhibit this scanner behavior
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||||
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
assert_eq!(msgs.len(), 1);
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
|
@ -182,12 +193,14 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut txn = dbs[k].txn();
|
||||||
let mut tx = Transaction::DkgShares {
|
let mut tx = Transaction::DkgShares {
|
||||||
attempt,
|
attempt,
|
||||||
shares,
|
shares,
|
||||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, 0),
|
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
|
txn.commit();
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
|
@ -201,14 +214,14 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
// With just 4 sets of shares, nothing should happen yet
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut dbs[0],
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after some shares")
|
panic!("provided TX caused recognized_id to be called after some shares")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
&|_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
||||||
&|_| async {
|
&|_| async {
|
||||||
panic!(
|
panic!(
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
||||||
|
@ -254,28 +267,30 @@ async fn dkg_test() {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
// Any scanner which has handled the prior blocks should only emit the new event
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
for (i, key) in keys.iter().enumerate() {
|
||||||
&mut scanner_db,
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&keys[0],
|
&mut dbs[i],
|
||||||
|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
key,
|
||||||
|
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||||
&processors,
|
&processors,
|
||||||
|_, _, _| async { panic!("test tried to publish a new Serai TX") },
|
&|_, _, _| async { panic!("test tried to publish a new Serai TX") },
|
||||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||||
&spec,
|
&spec,
|
||||||
&tributaries[0].1.reader(),
|
&tributaries[i].1.reader(),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
{
|
{
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
assert_eq!(msgs.len(), 1);
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(0));
|
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||||
assert!(msgs.is_empty());
|
assert!(msgs.is_empty());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Yet new scanners should emit all events
|
// Yet new scanners should emit all events
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate() {
|
||||||
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
assert_eq!(msgs.len(), 1);
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
|
@ -302,17 +317,16 @@ async fn dkg_test() {
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate() {
|
||||||
let attempt = 0;
|
let attempt = 0;
|
||||||
let mut scanner_db = &mut scanner_db;
|
let mut txn = dbs[i].txn();
|
||||||
let (mut local_scanner_db, _) = new_processors(key, &spec, &tributaries[0].1).await;
|
|
||||||
if i != 0 {
|
|
||||||
scanner_db = &mut local_scanner_db;
|
|
||||||
}
|
|
||||||
let mut txn = scanner_db.txn();
|
|
||||||
let share =
|
let share =
|
||||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed());
|
let mut tx = Transaction::DkgConfirmed {
|
||||||
|
attempt,
|
||||||
|
confirmation_share: share,
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
};
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
|
@ -325,14 +339,14 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The scanner should successfully try to publish a transaction with a validly signed signature
|
// The scanner should successfully try to publish a transaction with a validly signed signature
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut dbs[0],
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx_type, tx| {
|
&|set: ValidatorSet, tx_type, tx: serai_client::Transaction| {
|
||||||
assert_eq!(tx_type, PstTxType::SetKeys);
|
assert_eq!(tx_type, PstTxType::SetKeys);
|
||||||
|
|
||||||
let spec = spec.clone();
|
let spec = spec.clone();
|
||||||
|
|
|
@ -27,7 +27,11 @@ async fn handle_p2p_test() {
|
||||||
let keys = new_keys(&mut OsRng);
|
let keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec).await;
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let mut tributary_senders = vec![];
|
let mut tributary_senders = vec![];
|
||||||
let mut tributary_arcs = vec![];
|
let mut tributary_arcs = vec![];
|
||||||
|
|
|
@ -7,7 +7,7 @@ use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
use tributary::{ReadWrite, tests::random_signed_with_nonce};
|
use tributary::{ReadWrite, tests::random_signed_with_nonce};
|
||||||
|
|
||||||
use crate::tributary::{SignData, Transaction};
|
use crate::tributary::{Label, SignData, Transaction};
|
||||||
|
|
||||||
mod chain;
|
mod chain;
|
||||||
pub use chain::*;
|
pub use chain::*;
|
||||||
|
@ -34,11 +34,12 @@ fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
||||||
fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(
|
fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(
|
||||||
rng: &mut R,
|
rng: &mut R,
|
||||||
plan: Id,
|
plan: Id,
|
||||||
nonce: u32,
|
label: Label,
|
||||||
) -> SignData<Id> {
|
) -> SignData<Id> {
|
||||||
SignData {
|
SignData {
|
||||||
plan,
|
plan,
|
||||||
attempt: random_u32(&mut OsRng),
|
attempt: random_u32(&mut OsRng),
|
||||||
|
label,
|
||||||
|
|
||||||
data: {
|
data: {
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
|
@ -48,7 +49,7 @@ fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + De
|
||||||
res
|
res
|
||||||
},
|
},
|
||||||
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, nonce),
|
signed: random_signed_with_nonce(&mut OsRng, label.nonce()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +88,7 @@ fn serialize_sign_data() {
|
||||||
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: SignData<Id>) {
|
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: SignData<Id>) {
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
value.write(&mut buf).unwrap();
|
value.write(&mut buf).unwrap();
|
||||||
assert_eq!(value, SignData::read(&mut buf.as_slice(), value.signed.nonce).unwrap())
|
assert_eq!(value, SignData::read(&mut buf.as_slice()).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut plan = [0; 3];
|
let mut plan = [0; 3];
|
||||||
|
@ -95,28 +96,28 @@ fn serialize_sign_data() {
|
||||||
test_read_write(random_sign_data::<_, _>(
|
test_read_write(random_sign_data::<_, _>(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
plan,
|
plan,
|
||||||
u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||||
));
|
));
|
||||||
let mut plan = [0; 5];
|
let mut plan = [0; 5];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(random_sign_data::<_, _>(
|
test_read_write(random_sign_data::<_, _>(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
plan,
|
plan,
|
||||||
u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||||
));
|
));
|
||||||
let mut plan = [0; 8];
|
let mut plan = [0; 8];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(random_sign_data::<_, _>(
|
test_read_write(random_sign_data::<_, _>(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
plan,
|
plan,
|
||||||
u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||||
));
|
));
|
||||||
let mut plan = [0; 24];
|
let mut plan = [0; 24];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(random_sign_data::<_, _>(
|
test_read_write(random_sign_data::<_, _>(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
plan,
|
plan,
|
||||||
u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,11 +135,11 @@ fn serialize_transaction() {
|
||||||
OsRng.fill_bytes(&mut temp);
|
OsRng.fill_bytes(&mut temp);
|
||||||
commitments.push(temp);
|
commitments.push(temp);
|
||||||
}
|
}
|
||||||
test_read_write(Transaction::DkgCommitments(
|
test_read_write(Transaction::DkgCommitments {
|
||||||
random_u32(&mut OsRng),
|
attempt: random_u32(&mut OsRng),
|
||||||
commitments,
|
commitments,
|
||||||
random_signed_with_nonce(&mut OsRng, 0),
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
));
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -192,25 +193,25 @@ fn serialize_transaction() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
test_read_write(Transaction::DkgConfirmed(
|
test_read_write(Transaction::DkgConfirmed {
|
||||||
random_u32(&mut OsRng),
|
attempt: random_u32(&mut OsRng),
|
||||||
{
|
confirmation_share: {
|
||||||
let mut share = [0; 32];
|
let mut share = [0; 32];
|
||||||
OsRng.fill_bytes(&mut share);
|
OsRng.fill_bytes(&mut share);
|
||||||
share
|
share
|
||||||
},
|
},
|
||||||
random_signed_with_nonce(&mut OsRng, 2),
|
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||||
));
|
});
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut key = [0; 32];
|
let mut key = [0; 32];
|
||||||
OsRng.fill_bytes(&mut key);
|
OsRng.fill_bytes(&mut key);
|
||||||
test_read_write(Transaction::DkgRemovalPreprocess(random_sign_data(&mut OsRng, key, 0)));
|
test_read_write(Transaction::DkgRemoval(random_sign_data(&mut OsRng, key, Label::Preprocess)));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut key = [0; 32];
|
let mut key = [0; 32];
|
||||||
OsRng.fill_bytes(&mut key);
|
OsRng.fill_bytes(&mut key);
|
||||||
test_read_write(Transaction::DkgRemovalShare(random_sign_data(&mut OsRng, key, 1)));
|
test_read_write(Transaction::DkgRemoval(random_sign_data(&mut OsRng, key, Label::Share)));
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -224,38 +225,38 @@ fn serialize_transaction() {
|
||||||
OsRng.fill_bytes(&mut block);
|
OsRng.fill_bytes(&mut block);
|
||||||
let mut batch = [0; 5];
|
let mut batch = [0; 5];
|
||||||
OsRng.fill_bytes(&mut batch);
|
OsRng.fill_bytes(&mut batch);
|
||||||
test_read_write(Transaction::Batch(block, batch));
|
test_read_write(Transaction::Batch { block, batch });
|
||||||
}
|
}
|
||||||
test_read_write(Transaction::SubstrateBlock(OsRng.next_u64()));
|
test_read_write(Transaction::SubstrateBlock(OsRng.next_u64()));
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut plan = [0; 5];
|
let mut plan = [0; 5];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(Transaction::SubstratePreprocess(random_sign_data(
|
test_read_write(Transaction::SubstrateSign(random_sign_data(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
SubstrateSignableId::Batch(plan),
|
SubstrateSignableId::Batch(plan),
|
||||||
0,
|
Label::Preprocess,
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut plan = [0; 5];
|
let mut plan = [0; 5];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(Transaction::SubstrateShare(random_sign_data(
|
test_read_write(Transaction::SubstrateSign(random_sign_data(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
SubstrateSignableId::Batch(plan),
|
SubstrateSignableId::Batch(plan),
|
||||||
1,
|
Label::Share,
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; 32];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(Transaction::SignPreprocess(random_sign_data(&mut OsRng, plan, 0)));
|
test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; 32];
|
||||||
OsRng.fill_bytes(&mut plan);
|
OsRng.fill_bytes(&mut plan);
|
||||||
test_read_write(Transaction::SignShare(random_sign_data(&mut OsRng, plan, 1)));
|
test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -31,7 +31,11 @@ async fn sync_test() {
|
||||||
// Ensure this can have a node fail
|
// Ensure this can have a node fail
|
||||||
assert!(spec.n() > spec.t());
|
assert!(spec.n() > spec.t());
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec).await;
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Keep a Tributary back, effectively having it offline
|
// Keep a Tributary back, effectively having it offline
|
||||||
let syncer_key = keys.pop().unwrap();
|
let syncer_key = keys.pop().unwrap();
|
||||||
|
|
|
@ -23,7 +23,11 @@ async fn tx_test() {
|
||||||
let keys = new_keys(&mut OsRng);
|
let keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
|
|
||||||
let tributaries = new_tributaries(&keys, &spec).await;
|
let tributaries = new_tributaries(&keys, &spec)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Run the tributaries in the background
|
// Run the tributaries in the background
|
||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||||
|
@ -39,8 +43,11 @@ async fn tx_test() {
|
||||||
|
|
||||||
// Create the TX with a null signature so we can get its sig hash
|
// Create the TX with a null signature so we can get its sig hash
|
||||||
let block_before_tx = tributaries[sender].1.tip().await;
|
let block_before_tx = tributaries[sender].1.tip().await;
|
||||||
let mut tx =
|
let mut tx = Transaction::DkgCommitments {
|
||||||
Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed());
|
attempt,
|
||||||
|
commitments: vec![commitments.clone()],
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
};
|
||||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||||
|
|
||||||
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
|
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
|
||||||
|
|
|
@ -1,23 +1,23 @@
|
||||||
use core::ops::Deref;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use scale::Encode;
|
||||||
use ciphersuite::{Ciphersuite, Ristretto, group::GroupEncoding};
|
|
||||||
use frost::Participant;
|
use frost::Participant;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::KeyPair;
|
use serai_client::validator_sets::primitives::KeyPair;
|
||||||
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
|
||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
|
|
||||||
use crate::tributary::TributarySpec;
|
use tributary::ReadWrite;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
use crate::tributary::{Label, Transaction};
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
Dkg,
|
||||||
|
DkgConfirmation,
|
||||||
DkgRemoval([u8; 32]),
|
DkgRemoval([u8; 32]),
|
||||||
SubstrateSign(SubstrateSignableId),
|
SubstrateSign(SubstrateSignableId),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
|
@ -27,7 +27,7 @@ pub enum Topic {
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||||
pub struct DataSpecification {
|
pub struct DataSpecification {
|
||||||
pub topic: Topic,
|
pub topic: Topic,
|
||||||
pub label: &'static str,
|
pub label: Label,
|
||||||
pub attempt: u32,
|
pub attempt: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,9 +42,9 @@ pub enum Accumulation {
|
||||||
}
|
}
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
NewTributary {
|
Tributary {
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||||
LastBlock: (genesis: [u8; 32]) -> [u8; 32],
|
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||||
|
@ -52,12 +52,13 @@ create_db!(
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
RemovalNonces:
|
RemovalNonces:
|
||||||
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||||
DkgCompleted: (genesis: [u8; 32]) -> (),
|
DkgCompleted: (genesis: [u8; 32]) -> (),
|
||||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||||
EventDb: (id: [u8; 32], index: u32) -> (),
|
|
||||||
|
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -84,78 +85,24 @@ impl AttemptDb {
|
||||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||||
let attempt = Self::get(getter, genesis, &topic);
|
let attempt = Self::get(getter, genesis, &topic);
|
||||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
||||||
if attempt.is_none() && (topic == Topic::Dkg) {
|
if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation)) {
|
||||||
return Some(0);
|
return Some(0);
|
||||||
}
|
}
|
||||||
attempt
|
attempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataDb {
|
impl SignedTransactionDb {
|
||||||
pub fn accumulate(
|
pub fn take_signed_transaction(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
order: &[u8],
|
||||||
spec: &TributarySpec,
|
nonce: u32,
|
||||||
data_spec: &DataSpecification,
|
) -> Option<Transaction> {
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
let res = SignedTransactionDb::get(txn, order, nonce)
|
||||||
data: &Vec<u8>,
|
.map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());
|
||||||
) -> Accumulation {
|
if res.is_some() {
|
||||||
let genesis = spec.genesis();
|
Self::del(txn, order, nonce);
|
||||||
if Self::get(txn, genesis, data_spec, &signer.to_bytes()).is_some() {
|
|
||||||
panic!("accumulating data for a participant multiple times");
|
|
||||||
}
|
}
|
||||||
let signer_shares = {
|
res
|
||||||
let signer_i =
|
|
||||||
spec.i(signer).expect("transaction signed by a non-validator for this tributary");
|
|
||||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
|
||||||
};
|
|
||||||
|
|
||||||
let prior_received = DataReceived::get(txn, genesis, data_spec).unwrap_or_default();
|
|
||||||
let now_received = prior_received + signer_shares;
|
|
||||||
DataReceived::set(txn, genesis, data_spec, &now_received);
|
|
||||||
DataDb::set(txn, genesis, data_spec, &signer.to_bytes(), data);
|
|
||||||
|
|
||||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
|
||||||
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
|
|
||||||
if (prior_received < needed) && (now_received >= needed) {
|
|
||||||
return Accumulation::Ready({
|
|
||||||
let mut data = HashMap::new();
|
|
||||||
for validator in spec.validators().iter().map(|validator| validator.0) {
|
|
||||||
data.insert(
|
|
||||||
spec.i(validator).unwrap().start,
|
|
||||||
if let Some(data) = Self::get(txn, genesis, data_spec, &validator.to_bytes()) {
|
|
||||||
data
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(data.len(), usize::from(needed));
|
|
||||||
|
|
||||||
// Remove our own piece of data, if we were involved
|
|
||||||
if data
|
|
||||||
.remove(
|
|
||||||
&spec
|
|
||||||
.i(Ristretto::generator() * our_key.deref())
|
|
||||||
.expect("handling a message for a Tributary we aren't part of")
|
|
||||||
.start,
|
|
||||||
)
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
DataSet::Participating(data)
|
|
||||||
} else {
|
|
||||||
DataSet::NotParticipating
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Accumulation::NotReady
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EventDb {
|
|
||||||
pub fn handle_event(txn: &mut impl DbTxn, id: [u8; 32], index: u32) {
|
|
||||||
assert!(Self::get(txn, id, index).is_none());
|
|
||||||
Self::set(txn, id, index, &());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,207 +0,0 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use rand_core::SeedableRng;
|
|
||||||
use rand_chacha::ChaCha20Rng;
|
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
|
||||||
use frost::{
|
|
||||||
FrostError,
|
|
||||||
dkg::{Participant, musig::musig},
|
|
||||||
sign::*,
|
|
||||||
};
|
|
||||||
use frost_schnorrkel::Schnorrkel;
|
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message};
|
|
||||||
|
|
||||||
use crate::tributary::TributarySpec;
|
|
||||||
|
|
||||||
/*
|
|
||||||
The following confirms the results of the DKG performed by the Processors onto Substrate.
|
|
||||||
|
|
||||||
This is done by a signature over the generated key pair by the validators' MuSig-aggregated
|
|
||||||
public key. The MuSig-aggregation achieves on-chain efficiency and prevents on-chain censorship
|
|
||||||
of individual validator's DKG results by the Serai validator set.
|
|
||||||
|
|
||||||
Since we're using the validators public keys, as needed for their being the root of trust, the
|
|
||||||
coordinator must perform the signing. This is distinct from all other group-signing operations
|
|
||||||
which are generally done by the processor.
|
|
||||||
|
|
||||||
Instead of maintaining state, the following rebuilds the full state on every call. This is deemed
|
|
||||||
acceptable re: performance as:
|
|
||||||
|
|
||||||
1) The DKG confirmation is only done upon the start of the Tributary.
|
|
||||||
2) This is an O(n) algorithm.
|
|
||||||
3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET.
|
|
||||||
|
|
||||||
Accordingly, this should be infrequently ran and of tolerable algorithmic complexity.
|
|
||||||
|
|
||||||
As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This is in
|
|
||||||
contradiction with our rebuilding which is dependent on deterministic nonces. Safety is derived
|
|
||||||
from the deterministic nonces being context-bound under a BFT protocol. The flow is as follows:
|
|
||||||
|
|
||||||
1) Derive a deterministic nonce by hashing the private key, Tributary parameters, and attempt.
|
|
||||||
2) Publish the nonces' commitments, receiving everyone elses *and the DKG shares determining the
|
|
||||||
message to be signed*.
|
|
||||||
3) Sign and publish the signature share.
|
|
||||||
|
|
||||||
In order for nonce re-use to occur, the received nonce commitments, or the received DKG shares,
|
|
||||||
would have to be distinct and sign would have to be called again.
|
|
||||||
|
|
||||||
Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The
|
|
||||||
only way to operate on distinct received messages would be if:
|
|
||||||
|
|
||||||
1) A logical flaw exists, letting new messages over write prior messages
|
|
||||||
2) A reorganization occured from chain A to chain B, and with it, different messages
|
|
||||||
|
|
||||||
Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While
|
|
||||||
a significant amount of processes may be byzantine, leading to BFT being broken, that still will
|
|
||||||
not trigger a reorganization. The only way to move to a distinct chain, with distinct messages,
|
|
||||||
would be by rebuilding the local process entirely (this time following chain B).
|
|
||||||
|
|
||||||
Accordingly, safety follows if:
|
|
||||||
|
|
||||||
1) The local view of received messages is static
|
|
||||||
2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains
|
|
||||||
|
|
||||||
We assume the former. We can prevent the latter (TODO) by:
|
|
||||||
|
|
||||||
1) Defining a per-build entropy, used so long as a DB is used.
|
|
||||||
2) Checking the initially used commitments for the DKG align with the per-build entropy.
|
|
||||||
|
|
||||||
If a rebuild occurs, which is the only way we could follow a distinct blockchain, our entropy
|
|
||||||
will change (preventing nonce reuse).
|
|
||||||
|
|
||||||
This will allow a validator to still participate in DKGs within a single build, even if they have
|
|
||||||
spontaneous reboots, and on collapse triggering a rebuild, they don't lose safety.
|
|
||||||
|
|
||||||
TODO: We also need to review how we're handling Processor preprocesses and likely implement the
|
|
||||||
same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.
|
|
||||||
*/
|
|
||||||
pub(crate) struct DkgConfirmer;
|
|
||||||
impl DkgConfirmer {
|
|
||||||
// Convert the passed in HashMap, which uses the validators' start index for their `s` threshold
|
|
||||||
// shares, to the indexes needed for MuSig
|
|
||||||
fn from_threshold_i_to_musig_i(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
mut old_map: HashMap<Participant, Vec<u8>>,
|
|
||||||
) -> HashMap<Participant, Vec<u8>> {
|
|
||||||
let mut new_map = HashMap::new();
|
|
||||||
for (new_i, validator) in spec.validators().into_iter().enumerate() {
|
|
||||||
let threshold_i = spec.i(validator.0).unwrap();
|
|
||||||
if let Some(value) = old_map.remove(&threshold_i.start) {
|
|
||||||
new_map.insert(Participant::new(u16::try_from(new_i + 1).unwrap()).unwrap(), value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
new_map
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess_internal(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
|
||||||
let validators = spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let context = musig_context(spec.set());
|
|
||||||
let mut chacha = ChaCha20Rng::from_seed({
|
|
||||||
let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy");
|
|
||||||
entropy_transcript.append_message(b"spec", spec.serialize());
|
|
||||||
entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes()));
|
|
||||||
entropy_transcript.append_message(b"attempt", attempt.to_le_bytes());
|
|
||||||
Zeroizing::new(entropy_transcript).rng_seed(b"preprocess")
|
|
||||||
});
|
|
||||||
let (machine, preprocess) = AlgorithmMachine::new(
|
|
||||||
Schnorrkel::new(b"substrate"),
|
|
||||||
musig(&context, key, &validators)
|
|
||||||
.expect("confirming the DKG for a set we aren't in/validator present multiple times")
|
|
||||||
.into(),
|
|
||||||
)
|
|
||||||
.preprocess(&mut chacha);
|
|
||||||
|
|
||||||
(machine, preprocess.serialize().try_into().unwrap())
|
|
||||||
}
|
|
||||||
// Get the preprocess for this confirmation.
|
|
||||||
pub(crate) fn preprocess(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
) -> [u8; 64] {
|
|
||||||
Self::preprocess_internal(spec, key, attempt).1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn share_internal(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
key_pair: &KeyPair,
|
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
|
||||||
let machine = Self::preprocess_internal(spec, key, attempt).0;
|
|
||||||
let preprocesses = Self::from_threshold_i_to_musig_i(spec, preprocesses)
|
|
||||||
.into_iter()
|
|
||||||
.map(|(p, preprocess)| {
|
|
||||||
machine
|
|
||||||
.read_preprocess(&mut preprocess.as_slice())
|
|
||||||
.map(|preprocess| (p, preprocess))
|
|
||||||
.map_err(|_| p)
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
||||||
let (machine, share) = machine
|
|
||||||
.sign(preprocesses, &set_keys_message(&spec.set(), key_pair))
|
|
||||||
.map_err(|e| match e {
|
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok((machine, share.serialize().try_into().unwrap()))
|
|
||||||
}
|
|
||||||
// Get the share for this confirmation, if the preprocesses are valid.
|
|
||||||
pub(crate) fn share(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
key_pair: &KeyPair,
|
|
||||||
) -> Result<[u8; 32], Participant> {
|
|
||||||
Self::share_internal(spec, key, attempt, preprocesses, key_pair).map(|(_, share)| share)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn complete(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
key_pair: &KeyPair,
|
|
||||||
shares: HashMap<Participant, Vec<u8>>,
|
|
||||||
) -> Result<[u8; 64], Participant> {
|
|
||||||
let machine = Self::share_internal(spec, key, attempt, preprocesses, key_pair)
|
|
||||||
.expect("trying to complete a machine which failed to preprocess")
|
|
||||||
.0;
|
|
||||||
|
|
||||||
let shares = Self::from_threshold_i_to_musig_i(spec, shares)
|
|
||||||
.into_iter()
|
|
||||||
.map(|(p, share)| {
|
|
||||||
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
||||||
let signature = machine.complete(shares).map_err(|e| match e {
|
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(signature.to_bytes())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,241 +0,0 @@
|
||||||
use core::ops::Deref;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use rand_core::SeedableRng;
|
|
||||||
use rand_chacha::ChaCha20Rng;
|
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
use ciphersuite::{
|
|
||||||
group::{Group, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use frost::{
|
|
||||||
FrostError,
|
|
||||||
dkg::{Participant, musig::musig},
|
|
||||||
sign::*,
|
|
||||||
};
|
|
||||||
use frost_schnorrkel::Schnorrkel;
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
Public, SeraiAddress,
|
|
||||||
validator_sets::primitives::{musig_context, remove_participant_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::tributary::TributarySpec;
|
|
||||||
|
|
||||||
/*
|
|
||||||
The following is a clone of DkgConfirmer modified for DKG removals.
|
|
||||||
|
|
||||||
The notable difference is this uses a MuSig key of the first `t` participants to respond with
|
|
||||||
preprocesses, not all `n` participants.
|
|
||||||
|
|
||||||
TODO: Exact same commentary on seeded RNGs. The following can drop its seeded RNG if cached
|
|
||||||
preprocesses are used to carry the preprocess between machines
|
|
||||||
*/
|
|
||||||
pub(crate) struct DkgRemoval;
|
|
||||||
impl DkgRemoval {
|
|
||||||
// Convert the passed in HashMap, which uses the validators' start index for their `s` threshold
|
|
||||||
// shares, to the indexes needed for MuSig
|
|
||||||
fn from_threshold_i_to_musig_i(
|
|
||||||
mut old_map: HashMap<[u8; 32], Vec<u8>>,
|
|
||||||
) -> HashMap<Participant, Vec<u8>> {
|
|
||||||
let mut new_map = HashMap::new();
|
|
||||||
let mut participating = old_map.keys().cloned().collect::<Vec<_>>();
|
|
||||||
participating.sort();
|
|
||||||
for (i, participating) in participating.into_iter().enumerate() {
|
|
||||||
new_map.insert(
|
|
||||||
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
|
|
||||||
old_map.remove(&participating).unwrap(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
new_map
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess_rng(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
) -> ChaCha20Rng {
|
|
||||||
ChaCha20Rng::from_seed({
|
|
||||||
let mut entropy_transcript = RecommendedTranscript::new(b"DkgRemoval Entropy");
|
|
||||||
entropy_transcript.append_message(b"spec", spec.serialize());
|
|
||||||
entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes()));
|
|
||||||
entropy_transcript.append_message(b"attempt", attempt.to_le_bytes());
|
|
||||||
Zeroizing::new(entropy_transcript).rng_seed(b"preprocess")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess_internal(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
participants: Option<&[<Ristretto as Ciphersuite>::G]>,
|
|
||||||
) -> (Option<AlgorithmSignMachine<Ristretto, Schnorrkel>>, [u8; 64]) {
|
|
||||||
// TODO: Diversify this among DkgConfirmer/DkgRemoval?
|
|
||||||
let context = musig_context(spec.set());
|
|
||||||
|
|
||||||
let (_, preprocess) = AlgorithmMachine::new(
|
|
||||||
Schnorrkel::new(b"substrate"),
|
|
||||||
// Preprocess with our key alone as we don't know the signing set yet
|
|
||||||
musig(&context, key, &[<Ristretto as Ciphersuite>::G::generator() * key.deref()])
|
|
||||||
.expect("couldn't get the MuSig key of our key alone")
|
|
||||||
.into(),
|
|
||||||
)
|
|
||||||
.preprocess(&mut Self::preprocess_rng(spec, key, attempt));
|
|
||||||
|
|
||||||
let machine = if let Some(participants) = participants {
|
|
||||||
let (machine, actual_preprocess) = AlgorithmMachine::new(
|
|
||||||
Schnorrkel::new(b"substrate"),
|
|
||||||
// Preprocess with our key alone as we don't know the signing set yet
|
|
||||||
musig(&context, key, participants)
|
|
||||||
.expect(
|
|
||||||
"couldn't create a MuSig key for the DKG removal we're supposedly participating in",
|
|
||||||
)
|
|
||||||
.into(),
|
|
||||||
)
|
|
||||||
.preprocess(&mut Self::preprocess_rng(spec, key, attempt));
|
|
||||||
// Doesn't use assert_eq due to lack of Debug
|
|
||||||
assert!(preprocess == actual_preprocess);
|
|
||||||
Some(machine)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
(machine, preprocess.serialize().try_into().unwrap())
|
|
||||||
}
|
|
||||||
// Get the preprocess for this confirmation.
|
|
||||||
pub(crate) fn preprocess(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
) -> [u8; 64] {
|
|
||||||
Self::preprocess_internal(spec, key, attempt, None).1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn share_internal(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
mut preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
removed: [u8; 32],
|
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
|
||||||
// TODO: Remove this ugly blob
|
|
||||||
let preprocesses = {
|
|
||||||
let mut preprocesses_participants = preprocesses.keys().cloned().collect::<Vec<_>>();
|
|
||||||
preprocesses_participants.sort();
|
|
||||||
let mut actual_keys = vec![];
|
|
||||||
let spec_validators = spec.validators();
|
|
||||||
for participant in &preprocesses_participants {
|
|
||||||
for (validator, _) in &spec_validators {
|
|
||||||
if participant == &spec.i(*validator).unwrap().start {
|
|
||||||
actual_keys.push(*validator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut new_preprocesses = HashMap::new();
|
|
||||||
for (participant, actual_key) in
|
|
||||||
preprocesses_participants.into_iter().zip(actual_keys.into_iter())
|
|
||||||
{
|
|
||||||
new_preprocesses.insert(actual_key, preprocesses.remove(&participant).unwrap());
|
|
||||||
}
|
|
||||||
new_preprocesses
|
|
||||||
};
|
|
||||||
|
|
||||||
let participants = preprocesses.keys().cloned().collect::<Vec<_>>();
|
|
||||||
let preprocesses = Self::from_threshold_i_to_musig_i(
|
|
||||||
preprocesses.into_iter().map(|(key, preprocess)| (key.to_bytes(), preprocess)).collect(),
|
|
||||||
);
|
|
||||||
let machine = Self::preprocess_internal(spec, key, attempt, Some(&participants)).0.unwrap();
|
|
||||||
let preprocesses = preprocesses
|
|
||||||
.into_iter()
|
|
||||||
.map(|(p, preprocess)| {
|
|
||||||
machine
|
|
||||||
.read_preprocess(&mut preprocess.as_slice())
|
|
||||||
.map(|preprocess| (p, preprocess))
|
|
||||||
.map_err(|_| p)
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
||||||
let (machine, share) = machine
|
|
||||||
.sign(preprocesses, &remove_participant_message(&spec.set(), Public(removed)))
|
|
||||||
.map_err(|e| match e {
|
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok((machine, share.serialize().try_into().unwrap()))
|
|
||||||
}
|
|
||||||
// Get the share for this confirmation, if the preprocesses are valid.
|
|
||||||
pub(crate) fn share(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
removed: [u8; 32],
|
|
||||||
) -> Result<[u8; 32], Participant> {
|
|
||||||
Self::share_internal(spec, key, attempt, preprocesses, removed).map(|(_, share)| share)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn complete(
|
|
||||||
spec: &TributarySpec,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
attempt: u32,
|
|
||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
||||||
removed: [u8; 32],
|
|
||||||
mut shares: HashMap<Participant, Vec<u8>>,
|
|
||||||
) -> Result<(Vec<SeraiAddress>, [u8; 64]), Participant> {
|
|
||||||
// TODO: Remove this ugly blob
|
|
||||||
let shares = {
|
|
||||||
let mut shares_participants = shares.keys().cloned().collect::<Vec<_>>();
|
|
||||||
shares_participants.sort();
|
|
||||||
let mut actual_keys = vec![];
|
|
||||||
let spec_validators = spec.validators();
|
|
||||||
for participant in &shares_participants {
|
|
||||||
for (validator, _) in &spec_validators {
|
|
||||||
if participant == &spec.i(*validator).unwrap().start {
|
|
||||||
actual_keys.push(*validator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut new_shares = HashMap::new();
|
|
||||||
for (participant, actual_key) in shares_participants.into_iter().zip(actual_keys.into_iter())
|
|
||||||
{
|
|
||||||
new_shares.insert(actual_key.to_bytes(), shares.remove(&participant).unwrap());
|
|
||||||
}
|
|
||||||
new_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut signers = shares.keys().cloned().map(SeraiAddress).collect::<Vec<_>>();
|
|
||||||
signers.sort();
|
|
||||||
|
|
||||||
let machine = Self::share_internal(spec, key, attempt, preprocesses, removed)
|
|
||||||
.expect("trying to complete a machine which failed to preprocess")
|
|
||||||
.0;
|
|
||||||
|
|
||||||
let shares = Self::from_threshold_i_to_musig_i(shares)
|
|
||||||
.into_iter()
|
|
||||||
.map(|(p, share)| {
|
|
||||||
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
||||||
let signature = machine.complete(shares).map_err(|e| match e {
|
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok((signers, signature.to_bytes()))
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,852 +1,63 @@
|
||||||
use core::{
|
|
||||||
ops::{Deref, Range},
|
|
||||||
fmt::Debug,
|
|
||||||
};
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
|
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use schnorr::SchnorrSignature;
|
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::{NetworkId, PublicKey},
|
|
||||||
validator_sets::primitives::{Session, ValidatorSet},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},
|
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||||
TRANSACTION_SIZE_LIMIT,
|
Tributary,
|
||||||
};
|
};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
pub use db::*;
|
pub use db::*;
|
||||||
|
|
||||||
mod dkg_confirmer;
|
mod spec;
|
||||||
mod dkg_removal;
|
pub use spec::TributarySpec;
|
||||||
|
|
||||||
|
mod transaction;
|
||||||
|
pub use transaction::{Label, SignData, Transaction};
|
||||||
|
|
||||||
|
mod signing_protocol;
|
||||||
|
|
||||||
mod handle;
|
mod handle;
|
||||||
pub use handle::*;
|
pub use handle::*;
|
||||||
|
|
||||||
pub mod scanner;
|
pub mod scanner;
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||||
pub struct TributarySpec {
|
txn: &mut D::Transaction<'_>,
|
||||||
serai_block: [u8; 32],
|
tributary: &Tributary<D, Transaction, P>,
|
||||||
start_time: u64,
|
tx: Transaction,
|
||||||
set: ValidatorSet,
|
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TributarySpec {
|
|
||||||
pub fn new(
|
|
||||||
serai_block: [u8; 32],
|
|
||||||
start_time: u64,
|
|
||||||
set: ValidatorSet,
|
|
||||||
set_participants: Vec<(PublicKey, u16)>,
|
|
||||||
) -> TributarySpec {
|
|
||||||
let mut validators = vec![];
|
|
||||||
for (participant, shares) in set_participants {
|
|
||||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
|
||||||
.expect("invalid key registered as participant");
|
|
||||||
validators.push((participant, shares));
|
|
||||||
}
|
|
||||||
|
|
||||||
Self { serai_block, start_time, set, validators }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set(&self) -> ValidatorSet {
|
|
||||||
self.set
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn genesis(&self) -> [u8; 32] {
|
|
||||||
// Calculate the genesis for this Tributary
|
|
||||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
|
||||||
// This locks it to a specific Serai chain
|
|
||||||
genesis.append_message(b"serai_block", self.serai_block);
|
|
||||||
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
|
||||||
genesis.append_message(b"network", self.set.network.encode());
|
|
||||||
let genesis = genesis.challenge(b"genesis");
|
|
||||||
let genesis_ref: &[u8] = genesis.as_ref();
|
|
||||||
genesis_ref[.. 32].try_into().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_time(&self) -> u64 {
|
|
||||||
self.start_time
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn n(&self) -> u16 {
|
|
||||||
self.validators.iter().map(|(_, weight)| weight).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn t(&self) -> u16 {
|
|
||||||
((2 * self.n()) / 3) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
|
||||||
let mut i = 1;
|
|
||||||
for (validator, weight) in &self.validators {
|
|
||||||
if validator == &key {
|
|
||||||
return Some(Range {
|
|
||||||
start: Participant::new(i).unwrap(),
|
|
||||||
end: Participant::new(i + weight).unwrap(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
i += weight;
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
|
||||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
writer.write_all(&self.serai_block)?;
|
|
||||||
writer.write_all(&self.start_time.to_le_bytes())?;
|
|
||||||
writer.write_all(&self.set.session.0.to_le_bytes())?;
|
|
||||||
let network_encoded = self.set.network.encode();
|
|
||||||
assert_eq!(network_encoded.len(), 1);
|
|
||||||
writer.write_all(&network_encoded)?;
|
|
||||||
writer.write_all(&u32::try_from(self.validators.len()).unwrap().to_le_bytes())?;
|
|
||||||
for validator in &self.validators {
|
|
||||||
writer.write_all(&validator.0.to_bytes())?;
|
|
||||||
writer.write_all(&validator.1.to_le_bytes())?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut res = vec![];
|
|
||||||
self.write(&mut res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let mut serai_block = [0; 32];
|
|
||||||
reader.read_exact(&mut serai_block)?;
|
|
||||||
|
|
||||||
let mut start_time = [0; 8];
|
|
||||||
reader.read_exact(&mut start_time)?;
|
|
||||||
let start_time = u64::from_le_bytes(start_time);
|
|
||||||
|
|
||||||
let mut session = [0; 4];
|
|
||||||
reader.read_exact(&mut session)?;
|
|
||||||
let session = Session(u32::from_le_bytes(session));
|
|
||||||
|
|
||||||
let mut network = [0; 1];
|
|
||||||
reader.read_exact(&mut network)?;
|
|
||||||
let network =
|
|
||||||
NetworkId::decode(&mut &network[..]).map_err(|_| io::Error::other("invalid network"))?;
|
|
||||||
|
|
||||||
let mut validators_len = [0; 4];
|
|
||||||
reader.read_exact(&mut validators_len)?;
|
|
||||||
let validators_len = usize::try_from(u32::from_le_bytes(validators_len)).unwrap();
|
|
||||||
|
|
||||||
let mut validators = Vec::with_capacity(validators_len);
|
|
||||||
for _ in 0 .. validators_len {
|
|
||||||
let key = Ristretto::read_G(reader)?;
|
|
||||||
let mut weight = [0; 2];
|
|
||||||
reader.read_exact(&mut weight)?;
|
|
||||||
validators.push((key, u16::from_le_bytes(weight)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
|
||||||
pub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> {
|
|
||||||
pub plan: Id,
|
|
||||||
pub attempt: u32,
|
|
||||||
|
|
||||||
pub data: Vec<Vec<u8>>,
|
|
||||||
|
|
||||||
pub signed: Signed,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> Debug for SignData<Id> {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt
|
|
||||||
.debug_struct("SignData")
|
|
||||||
.field("id", &hex::encode(self.plan.encode()))
|
|
||||||
.field("attempt", &self.attempt)
|
|
||||||
.field("signer", &hex::encode(self.signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
|
||||||
pub(crate) fn read<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {
|
|
||||||
let plan = Id::decode(&mut scale::IoReader(&mut *reader))
|
|
||||||
.map_err(|_| io::Error::other("invalid plan in SignData"))?;
|
|
||||||
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let data = {
|
|
||||||
let mut data_pieces = [0];
|
|
||||||
reader.read_exact(&mut data_pieces)?;
|
|
||||||
if data_pieces[0] == 0 {
|
|
||||||
Err(io::Error::other("zero pieces of data in SignData"))?;
|
|
||||||
}
|
|
||||||
let mut all_data = vec![];
|
|
||||||
for _ in 0 .. data_pieces[0] {
|
|
||||||
let mut data_len = [0; 2];
|
|
||||||
reader.read_exact(&mut data_len)?;
|
|
||||||
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
|
||||||
reader.read_exact(&mut data)?;
|
|
||||||
all_data.push(data);
|
|
||||||
}
|
|
||||||
all_data
|
|
||||||
};
|
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, nonce)?;
|
|
||||||
|
|
||||||
Ok(SignData { plan, attempt, data, signed })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
writer.write_all(&self.plan.encode())?;
|
|
||||||
writer.write_all(&self.attempt.to_le_bytes())?;
|
|
||||||
|
|
||||||
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
|
||||||
for data in &self.data {
|
|
||||||
if data.len() > u16::MAX.into() {
|
|
||||||
// Currently, the largest individual preprocess is a Monero transaction
|
|
||||||
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
|
||||||
// key image and proof (96 bytes)
|
|
||||||
// Even with all of that, we could support 227 inputs in a single TX
|
|
||||||
// Monero is limited to ~120 inputs per TX
|
|
||||||
//
|
|
||||||
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
|
|
||||||
Err(io::Error::other("signing data exceeded 65535 bytes"))?;
|
|
||||||
}
|
|
||||||
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
|
|
||||||
writer.write_all(data)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
|
||||||
pub enum Transaction {
|
|
||||||
RemoveParticipant(Participant),
|
|
||||||
|
|
||||||
// Once this completes successfully, no more instances should be created.
|
|
||||||
DkgCommitments(u32, Vec<Vec<u8>>, Signed),
|
|
||||||
DkgShares {
|
|
||||||
attempt: u32,
|
|
||||||
// Sending Participant, Receiving Participant, Share
|
|
||||||
shares: Vec<Vec<Vec<u8>>>,
|
|
||||||
confirmation_nonces: [u8; 64],
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
InvalidDkgShare {
|
|
||||||
attempt: u32,
|
|
||||||
accuser: Participant,
|
|
||||||
faulty: Participant,
|
|
||||||
blame: Option<Vec<u8>>,
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
DkgConfirmed(u32, [u8; 32], Signed),
|
|
||||||
|
|
||||||
DkgRemovalPreprocess(SignData<[u8; 32]>),
|
|
||||||
DkgRemovalShare(SignData<[u8; 32]>),
|
|
||||||
|
|
||||||
// Co-sign a Substrate block.
|
|
||||||
CosignSubstrateBlock([u8; 32]),
|
|
||||||
|
|
||||||
// When we have synchrony on a batch, we can allow signing it
|
|
||||||
// TODO (never?): This is less efficient compared to an ExternalBlock provided transaction,
|
|
||||||
// which would be binding over the block hash and automatically achieve synchrony on all
|
|
||||||
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
|
||||||
// with the current processor, yet it would still be an improvement.
|
|
||||||
Batch([u8; 32], [u8; 5]),
|
|
||||||
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
|
||||||
// IDs
|
|
||||||
SubstrateBlock(u64),
|
|
||||||
|
|
||||||
SubstratePreprocess(SignData<SubstrateSignableId>),
|
|
||||||
SubstrateShare(SignData<SubstrateSignableId>),
|
|
||||||
|
|
||||||
SignPreprocess(SignData<[u8; 32]>),
|
|
||||||
SignShare(SignData<[u8; 32]>),
|
|
||||||
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
|
||||||
// reporters (who should all report the same thing)
|
|
||||||
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
|
||||||
// many TXs without penalty
|
|
||||||
// Here, they're denoted as the first_signer, as only the signer of the first TX to be included
|
|
||||||
// with this pairing will be remembered on-chain
|
|
||||||
SignCompleted {
|
|
||||||
plan: [u8; 32],
|
|
||||||
tx_hash: Vec<u8>,
|
|
||||||
first_signer: <Ristretto as Ciphersuite>::G,
|
|
||||||
signature: SchnorrSignature<Ristretto>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for Transaction {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant(participant) => fmt
|
|
||||||
.debug_struct("Transaction::RemoveParticipant")
|
|
||||||
.field("participant", participant)
|
|
||||||
.finish(),
|
|
||||||
Transaction::DkgCommitments(attempt, _, signed) => fmt
|
|
||||||
.debug_struct("Transaction::DkgCommitments")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::DkgShares { attempt, signed, .. } => fmt
|
|
||||||
.debug_struct("Transaction::DkgShares")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
|
||||||
.debug_struct("Transaction::InvalidDkgShare")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("accuser", accuser)
|
|
||||||
.field("faulty", faulty)
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::DkgConfirmed(attempt, _, signed) => fmt
|
|
||||||
.debug_struct("Transaction::DkgConfirmed")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::DkgRemovalPreprocess(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::DkgRemovalPreprocess").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::DkgRemovalShare(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::DkgRemovalShare").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::CosignSubstrateBlock(block) => fmt
|
|
||||||
.debug_struct("Transaction::CosignSubstrateBlock")
|
|
||||||
.field("block", &hex::encode(block))
|
|
||||||
.finish(),
|
|
||||||
Transaction::Batch(block, batch) => fmt
|
|
||||||
.debug_struct("Transaction::Batch")
|
|
||||||
.field("block", &hex::encode(block))
|
|
||||||
.field("batch", &hex::encode(batch))
|
|
||||||
.finish(),
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
|
||||||
fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish()
|
|
||||||
}
|
|
||||||
Transaction::SubstratePreprocess(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::SubstratePreprocess").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::SubstrateShare(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::SubstrateShare").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::SignPreprocess(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::SignPreprocess").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::SignShare(sign_data) => {
|
|
||||||
fmt.debug_struct("Transaction::SignShare").field("sign_data", sign_data).finish()
|
|
||||||
}
|
|
||||||
Transaction::SignCompleted { plan, tx_hash, .. } => fmt
|
|
||||||
.debug_struct("Transaction::SignCompleted")
|
|
||||||
.field("plan", &hex::encode(plan))
|
|
||||||
.field("tx_hash", &hex::encode(tx_hash))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadWrite for Transaction {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let mut kind = [0];
|
|
||||||
reader.read_exact(&mut kind)?;
|
|
||||||
|
|
||||||
match kind[0] {
|
|
||||||
0 => Ok(Transaction::RemoveParticipant({
|
|
||||||
let mut participant = [0; 2];
|
|
||||||
reader.read_exact(&mut participant)?;
|
|
||||||
Participant::new(u16::from_le_bytes(participant))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))?
|
|
||||||
})),
|
|
||||||
|
|
||||||
1 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let commitments = {
|
|
||||||
let mut commitments_len = [0; 1];
|
|
||||||
reader.read_exact(&mut commitments_len)?;
|
|
||||||
let commitments_len = usize::from(commitments_len[0]);
|
|
||||||
if commitments_len == 0 {
|
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut each_commitments_len = [0; 2];
|
|
||||||
reader.read_exact(&mut each_commitments_len)?;
|
|
||||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
|
||||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
|
||||||
Err(io::Error::other(
|
|
||||||
"commitments present in transaction exceeded transaction size limit",
|
|
||||||
))?;
|
|
||||||
}
|
|
||||||
let mut commitments = vec![vec![]; commitments_len];
|
|
||||||
for commitments in &mut commitments {
|
|
||||||
*commitments = vec![0; each_commitments_len];
|
|
||||||
reader.read_exact(commitments)?;
|
|
||||||
}
|
|
||||||
commitments
|
|
||||||
};
|
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
|
||||||
|
|
||||||
Ok(Transaction::DkgCommitments(attempt, commitments, signed))
|
|
||||||
}
|
|
||||||
|
|
||||||
2 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let shares = {
|
|
||||||
let mut share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut share_quantity)?;
|
|
||||||
|
|
||||||
let mut key_share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut key_share_quantity)?;
|
|
||||||
|
|
||||||
let mut share_len = [0; 2];
|
|
||||||
reader.read_exact(&mut share_len)?;
|
|
||||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
|
||||||
|
|
||||||
let mut all_shares = vec![];
|
|
||||||
for _ in 0 .. share_quantity[0] {
|
|
||||||
let mut shares = vec![];
|
|
||||||
for _ in 0 .. key_share_quantity[0] {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
reader.read_exact(&mut share)?;
|
|
||||||
shares.push(share);
|
|
||||||
}
|
|
||||||
all_shares.push(shares);
|
|
||||||
}
|
|
||||||
all_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut confirmation_nonces = [0; 64];
|
|
||||||
reader.read_exact(&mut confirmation_nonces)?;
|
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
|
||||||
|
|
||||||
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
|
||||||
}
|
|
||||||
|
|
||||||
3 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let mut accuser = [0; 2];
|
|
||||||
reader.read_exact(&mut accuser)?;
|
|
||||||
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut faulty = [0; 2];
|
|
||||||
reader.read_exact(&mut faulty)?;
|
|
||||||
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut blame_len = [0; 2];
|
|
||||||
reader.read_exact(&mut blame_len)?;
|
|
||||||
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
|
||||||
reader.read_exact(&mut blame)?;
|
|
||||||
|
|
||||||
// This shares a nonce with DkgConfirmed as only one is expected
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
|
||||||
|
|
||||||
Ok(Transaction::InvalidDkgShare {
|
|
||||||
attempt,
|
|
||||||
accuser,
|
|
||||||
faulty,
|
|
||||||
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
|
||||||
signed,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
4 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let mut confirmation_share = [0; 32];
|
|
||||||
reader.read_exact(&mut confirmation_share)?;
|
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
|
||||||
|
|
||||||
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
|
|
||||||
}
|
|
||||||
|
|
||||||
5 => SignData::read(reader, 0).map(Transaction::DkgRemovalPreprocess),
|
|
||||||
6 => SignData::read(reader, 1).map(Transaction::DkgRemovalShare),
|
|
||||||
|
|
||||||
7 => {
|
|
||||||
let mut block = [0; 32];
|
|
||||||
reader.read_exact(&mut block)?;
|
|
||||||
Ok(Transaction::CosignSubstrateBlock(block))
|
|
||||||
}
|
|
||||||
|
|
||||||
8 => {
|
|
||||||
let mut block = [0; 32];
|
|
||||||
reader.read_exact(&mut block)?;
|
|
||||||
let mut batch = [0; 5];
|
|
||||||
reader.read_exact(&mut batch)?;
|
|
||||||
Ok(Transaction::Batch(block, batch))
|
|
||||||
}
|
|
||||||
|
|
||||||
9 => {
|
|
||||||
let mut block = [0; 8];
|
|
||||||
reader.read_exact(&mut block)?;
|
|
||||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
|
||||||
}
|
|
||||||
|
|
||||||
10 => SignData::read(reader, 0).map(Transaction::SubstratePreprocess),
|
|
||||||
11 => SignData::read(reader, 1).map(Transaction::SubstrateShare),
|
|
||||||
|
|
||||||
12 => SignData::read(reader, 0).map(Transaction::SignPreprocess),
|
|
||||||
13 => SignData::read(reader, 1).map(Transaction::SignShare),
|
|
||||||
|
|
||||||
14 => {
|
|
||||||
let mut plan = [0; 32];
|
|
||||||
reader.read_exact(&mut plan)?;
|
|
||||||
|
|
||||||
let mut tx_hash_len = [0];
|
|
||||||
reader.read_exact(&mut tx_hash_len)?;
|
|
||||||
let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];
|
|
||||||
reader.read_exact(&mut tx_hash)?;
|
|
||||||
|
|
||||||
let first_signer = Ristretto::read_G(reader)?;
|
|
||||||
let signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
|
||||||
|
|
||||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => Err(io::Error::other("invalid transaction type")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant(i) => {
|
|
||||||
writer.write_all(&[0])?;
|
|
||||||
writer.write_all(&u16::from(*i).to_le_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
|
||||||
writer.write_all(&[1])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
if commitments.is_empty() {
|
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
|
||||||
for commitments_i in commitments {
|
|
||||||
if commitments_i.len() != commitments[0].len() {
|
|
||||||
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
|
||||||
for commitments in commitments {
|
|
||||||
writer.write_all(commitments)?;
|
|
||||||
}
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
|
||||||
writer.write_all(&[2])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
|
|
||||||
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
|
|
||||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
|
||||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
|
||||||
// This assumes at least one share is being sent to another party
|
|
||||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
|
||||||
let share_len = shares[0][0].len();
|
|
||||||
// For BLS12-381 G2, this would be:
|
|
||||||
// - A 32-byte share
|
|
||||||
// - A 96-byte ephemeral key
|
|
||||||
// - A 128-byte signature
|
|
||||||
// Hence why this has to be u16
|
|
||||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
|
||||||
|
|
||||||
for these_shares in shares {
|
|
||||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
|
||||||
for share in these_shares {
|
|
||||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
|
||||||
writer.write_all(share)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_all(confirmation_nonces)?;
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
|
||||||
writer.write_all(&[3])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
|
||||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
|
||||||
|
|
||||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
|
||||||
assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0);
|
|
||||||
let blame_len =
|
|
||||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
|
||||||
writer.write_all(&blame_len.to_le_bytes())?;
|
|
||||||
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
|
||||||
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgConfirmed(attempt, share, signed) => {
|
|
||||||
writer.write_all(&[4])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
writer.write_all(share)?;
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgRemovalPreprocess(data) => {
|
|
||||||
writer.write_all(&[5])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
Transaction::DkgRemovalShare(data) => {
|
|
||||||
writer.write_all(&[6])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(block) => {
|
|
||||||
writer.write_all(&[7])?;
|
|
||||||
writer.write_all(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Batch(block, batch) => {
|
|
||||||
writer.write_all(&[8])?;
|
|
||||||
writer.write_all(block)?;
|
|
||||||
writer.write_all(batch)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
|
||||||
writer.write_all(&[9])?;
|
|
||||||
writer.write_all(&block.to_le_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SubstratePreprocess(data) => {
|
|
||||||
writer.write_all(&[10])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
Transaction::SubstrateShare(data) => {
|
|
||||||
writer.write_all(&[11])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SignPreprocess(data) => {
|
|
||||||
writer.write_all(&[12])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
Transaction::SignShare(data) => {
|
|
||||||
writer.write_all(&[13])?;
|
|
||||||
data.write(writer)
|
|
||||||
}
|
|
||||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
|
||||||
writer.write_all(&[14])?;
|
|
||||||
writer.write_all(plan)?;
|
|
||||||
writer
|
|
||||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
|
||||||
writer.write_all(tx_hash)?;
|
|
||||||
writer.write_all(&first_signer.to_bytes())?;
|
|
||||||
signature.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TransactionTrait for Transaction {
|
|
||||||
fn kind(&self) -> TransactionKind<'_> {
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"),
|
|
||||||
|
|
||||||
Transaction::DkgCommitments(attempt, _, signed) => {
|
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
|
||||||
}
|
|
||||||
Transaction::DkgShares { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
|
||||||
}
|
|
||||||
Transaction::InvalidDkgShare { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmed(attempt, _, signed) => {
|
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgRemovalPreprocess(data) => {
|
|
||||||
TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
Transaction::DkgRemovalShare(data) => {
|
|
||||||
TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
|
||||||
|
|
||||||
Transaction::Batch(_, _) => TransactionKind::Provided("batch"),
|
|
||||||
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"),
|
|
||||||
|
|
||||||
Transaction::SubstratePreprocess(data) => {
|
|
||||||
TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
Transaction::SubstrateShare(data) => {
|
|
||||||
TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SignPreprocess(data) => {
|
|
||||||
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
Transaction::SignShare(data) => {
|
|
||||||
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
|
|
||||||
}
|
|
||||||
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash(&self) -> [u8; 32] {
|
|
||||||
let mut tx = self.serialize();
|
|
||||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
|
||||||
// Make sure the part we're cutting off is the signature
|
|
||||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
|
||||||
}
|
|
||||||
Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify(&self) -> Result<(), TransactionError> {
|
|
||||||
if let Transaction::SubstrateShare(data) = self {
|
|
||||||
for data in &data.data {
|
|
||||||
if data.len() != 32 {
|
|
||||||
Err(TransactionError::InvalidContent)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
|
|
||||||
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
|
||||||
Err(TransactionError::InvalidContent)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Transaction {
|
|
||||||
// Used to initially construct transactions so we can then get sig hashes and perform signing
|
|
||||||
pub fn empty_signed() -> Signed {
|
|
||||||
Signed {
|
|
||||||
signer: Ristretto::generator(),
|
|
||||||
nonce: 0,
|
|
||||||
signature: SchnorrSignature::<Ristretto> {
|
|
||||||
R: Ristretto::generator(),
|
|
||||||
s: <Ristretto as Ciphersuite>::F::ZERO,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign a transaction
|
|
||||||
pub fn sign<R: RngCore + CryptoRng>(
|
|
||||||
&mut self,
|
|
||||||
rng: &mut R,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) {
|
) {
|
||||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
log::debug!("publishing transaction {}", hex::encode(tx.hash()));
|
||||||
let nonce = match tx {
|
|
||||||
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
|
||||||
|
|
||||||
Transaction::DkgCommitments(_, _, _) => 0,
|
let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() {
|
||||||
Transaction::DkgShares { .. } => 1,
|
let signer = signed.signer;
|
||||||
Transaction::InvalidDkgShare { .. } => 2,
|
|
||||||
Transaction::DkgConfirmed(_, _, _) => 2,
|
|
||||||
|
|
||||||
Transaction::DkgRemovalPreprocess(_) => 0,
|
// Safe as we should deterministically create transactions, meaning if this is already on-disk,
|
||||||
Transaction::DkgRemovalShare(_) => 1,
|
// it's what we're saving now
|
||||||
|
SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize());
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
(order, signer)
|
||||||
|
} else {
|
||||||
Transaction::Batch(_, _) => panic!("signing Batch"),
|
panic!("non-signed transaction passed to publish_signed_transaction");
|
||||||
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
|
||||||
|
|
||||||
Transaction::SubstratePreprocess(_) => 0,
|
|
||||||
Transaction::SubstrateShare(_) => 1,
|
|
||||||
|
|
||||||
Transaction::SignPreprocess(_) => 0,
|
|
||||||
Transaction::SignShare(_) => 1,
|
|
||||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
(
|
// If we're trying to publish 5, when the last transaction published was 3, this will delay
|
||||||
nonce,
|
// publication until the point in time we publish 4
|
||||||
match tx {
|
while let Some(tx) = SignedTransactionDb::take_signed_transaction(
|
||||||
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
txn,
|
||||||
|
&order,
|
||||||
Transaction::DkgCommitments(_, _, ref mut signed) => signed,
|
tributary
|
||||||
Transaction::DkgShares { ref mut signed, .. } => signed,
|
.next_nonce(&signer, &order)
|
||||||
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
|
.await
|
||||||
Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
|
.expect("we don't have a nonce, meaning we aren't a participant on this tributary"),
|
||||||
|
) {
|
||||||
Transaction::DkgRemovalPreprocess(ref mut data) => &mut data.signed,
|
// We need to return a proper error here to enable that, due to a race condition around
|
||||||
Transaction::DkgRemovalShare(ref mut data) => &mut data.signed,
|
// multiple publications
|
||||||
|
match tributary.add_transaction(tx.clone()).await {
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Ok(_) => {}
|
||||||
|
// Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces
|
||||||
Transaction::Batch(_, _) => panic!("signing Batch"),
|
Err(TransactionError::InvalidNonce) => {
|
||||||
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?")
|
||||||
|
|
||||||
Transaction::SubstratePreprocess(ref mut data) => &mut data.signed,
|
|
||||||
Transaction::SubstrateShare(ref mut data) => &mut data.signed,
|
|
||||||
|
|
||||||
Transaction::SignPreprocess(ref mut data) => &mut data.signed,
|
|
||||||
Transaction::SignShare(ref mut data) => &mut data.signed,
|
|
||||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
Err(e) => panic!("created an invalid transaction: {e:?}"),
|
||||||
let (nonce, signed_ref) = signed(self);
|
|
||||||
signed_ref.signer = Ristretto::generator() * key.deref();
|
|
||||||
signed_ref.nonce = nonce;
|
|
||||||
|
|
||||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
|
||||||
signed(self).1.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
|
||||||
let sig_hash = self.sig_hash(genesis);
|
|
||||||
signed(self).1.signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {
|
|
||||||
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {
|
|
||||||
let mut transcript =
|
|
||||||
RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted");
|
|
||||||
transcript.append_message(b"plan", plan);
|
|
||||||
transcript.append_message(b"tx_hash", tx_hash);
|
|
||||||
transcript.append_message(b"signer", first_signer.to_bytes());
|
|
||||||
transcript.append_message(b"nonce", signature.R.to_bytes());
|
|
||||||
Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge"))
|
|
||||||
} else {
|
|
||||||
panic!("sign_completed_challenge called on transaction which wasn't SignCompleted")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use core::{future::Future, time::Duration};
|
use core::{marker::PhantomData, future::Future, time::Duration};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
use frost::Participant;
|
||||||
|
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
@ -22,9 +25,11 @@ use tributary::{
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Db,
|
Db,
|
||||||
tributary::handle::{fatal_slash, handle_application_tx},
|
|
||||||
processors::Processors,
|
processors::Processors,
|
||||||
tributary::{TributarySpec, Transaction, LastBlock, EventDb},
|
tributary::{
|
||||||
|
TributarySpec, Label, SignData, Transaction, Topic, AttemptDb, LastHandledBlock,
|
||||||
|
FatallySlashed, DkgCompleted, signing_protocol::DkgRemoval,
|
||||||
|
},
|
||||||
P2p,
|
P2p,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -34,13 +39,31 @@ pub enum RecognizedIdType {
|
||||||
Plan,
|
Plan,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) trait RIDTrait<FRid>:
|
#[async_trait::async_trait]
|
||||||
Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid
|
pub trait RIDTrait {
|
||||||
{
|
async fn recognized_id(
|
||||||
|
&self,
|
||||||
|
set: ValidatorSet,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
kind: RecognizedIdType,
|
||||||
|
id: Vec<u8>,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid> RIDTrait<FRid>
|
#[async_trait::async_trait]
|
||||||
for F
|
impl<
|
||||||
|
FRid: Send + Future<Output = ()>,
|
||||||
|
F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
||||||
|
> RIDTrait for F
|
||||||
{
|
{
|
||||||
|
async fn recognized_id(
|
||||||
|
&self,
|
||||||
|
set: ValidatorSet,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
kind: RecognizedIdType,
|
||||||
|
id: Vec<u8>,
|
||||||
|
) {
|
||||||
|
(self)(set, genesis, kind, id).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
|
@ -49,42 +72,123 @@ pub enum PstTxType {
|
||||||
RemoveParticipant([u8; 32]),
|
RemoveParticipant([u8; 32]),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle a specific Tributary block
|
#[async_trait::async_trait]
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub trait PSTTrait {
|
||||||
async fn handle_block<
|
async fn publish_serai_tx(
|
||||||
D: Db,
|
&self,
|
||||||
Pro: Processors,
|
set: ValidatorSet,
|
||||||
FPst: Future<Output = ()>,
|
kind: PstTxType,
|
||||||
PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst,
|
tx: serai_client::Transaction,
|
||||||
FPtt: Future<Output = ()>,
|
);
|
||||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
}
|
||||||
FRid: Future<Output = ()>,
|
#[async_trait::async_trait]
|
||||||
RID: RIDTrait<FRid>,
|
impl<
|
||||||
P: P2p,
|
FPst: Send + Future<Output = ()>,
|
||||||
>(
|
F: Sync + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst,
|
||||||
db: &mut D,
|
> PSTTrait for F
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
{
|
||||||
recognized_id: RID,
|
async fn publish_serai_tx(
|
||||||
processors: &Pro,
|
&self,
|
||||||
publish_serai_tx: PST,
|
set: ValidatorSet,
|
||||||
publish_tributary_tx: &PTT,
|
kind: PstTxType,
|
||||||
spec: &TributarySpec,
|
tx: serai_client::Transaction,
|
||||||
block: Block<Transaction>,
|
|
||||||
) {
|
) {
|
||||||
log::info!("found block for Tributary {:?}", spec.set());
|
(self)(set, kind, tx).await
|
||||||
|
}
|
||||||
let hash = block.hash();
|
|
||||||
|
|
||||||
let mut event_id = 0;
|
|
||||||
#[allow(clippy::explicit_counter_loop)] // event_id isn't TX index. It just currently lines up
|
|
||||||
for tx in block.transactions {
|
|
||||||
if EventDb::get(db, hash, event_id).is_some() {
|
|
||||||
event_id += 1;
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut txn = db.txn();
|
#[async_trait::async_trait]
|
||||||
|
pub trait PTTTrait {
|
||||||
|
async fn publish_tributary_tx(&self, tx: Transaction);
|
||||||
|
}
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl<FPtt: Send + Future<Output = ()>, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F {
|
||||||
|
async fn publish_tributary_tx(&self, tx: Transaction) {
|
||||||
|
(self)(tx).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TributaryBlockHandler<
|
||||||
|
'a,
|
||||||
|
T: DbTxn,
|
||||||
|
Pro: Processors,
|
||||||
|
PST: PSTTrait,
|
||||||
|
PTT: PTTTrait,
|
||||||
|
RID: RIDTrait,
|
||||||
|
P: P2p,
|
||||||
|
> {
|
||||||
|
pub txn: &'a mut T,
|
||||||
|
pub our_key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
pub recognized_id: &'a RID,
|
||||||
|
pub processors: &'a Pro,
|
||||||
|
pub publish_serai_tx: &'a PST,
|
||||||
|
pub publish_tributary_tx: &'a PTT,
|
||||||
|
pub spec: &'a TributarySpec,
|
||||||
|
block: Block<Transaction>,
|
||||||
|
_p2p: PhantomData<P>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: DbTxn, Pro: Processors, PST: PSTTrait, PTT: PTTTrait, RID: RIDTrait, P: P2p>
|
||||||
|
TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
||||||
|
{
|
||||||
|
pub async fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
|
||||||
|
let genesis = self.spec.genesis();
|
||||||
|
|
||||||
|
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
|
||||||
|
FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing);
|
||||||
|
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||||
|
|
||||||
|
// TODO: If during DKG, trigger a re-attempt
|
||||||
|
// Despite triggering a re-attempt, this DKG may still complete and may become in-use
|
||||||
|
|
||||||
|
// If during a DKG, remove the participant
|
||||||
|
if DkgCompleted::get(self.txn, genesis).is_none() {
|
||||||
|
AttemptDb::recognize_topic(self.txn, genesis, Topic::DkgRemoval(slashing));
|
||||||
|
let preprocess = (DkgRemoval {
|
||||||
|
spec: self.spec,
|
||||||
|
key: self.our_key,
|
||||||
|
txn: self.txn,
|
||||||
|
removing: slashing,
|
||||||
|
attempt: 0,
|
||||||
|
})
|
||||||
|
.preprocess();
|
||||||
|
let mut tx = Transaction::DkgRemoval(SignData {
|
||||||
|
plan: slashing,
|
||||||
|
attempt: 0,
|
||||||
|
label: Label::Preprocess,
|
||||||
|
data: vec![preprocess.to_vec()],
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
});
|
||||||
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
||||||
|
// Tributary post-DKG
|
||||||
|
// https://github.com/serai-dex/serai/issues/426
|
||||||
|
|
||||||
|
pub async fn fatal_slash_with_participant_index(&mut self, i: Participant, reason: &str) {
|
||||||
|
// Resolve from Participant to <Ristretto as Ciphersuite>::G
|
||||||
|
let i = u16::from(i);
|
||||||
|
let mut validator = None;
|
||||||
|
for (potential, _) in self.spec.validators() {
|
||||||
|
let v_i = self.spec.i(potential).unwrap();
|
||||||
|
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
|
||||||
|
validator = Some(potential);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let validator = validator.unwrap();
|
||||||
|
|
||||||
|
self.fatal_slash(validator.to_bytes(), reason).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle<D: Db>(mut self) {
|
||||||
|
log::info!("found block for Tributary {:?}", self.spec.set());
|
||||||
|
|
||||||
|
let transactions = self.block.transactions.clone();
|
||||||
|
for tx in transactions {
|
||||||
match tx {
|
match tx {
|
||||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||||
// Since the evidence is on the chain, it should already have been validated
|
// Since the evidence is on the chain, it should already have been validated
|
||||||
|
@ -107,65 +211,42 @@ async fn handle_block<
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal errors,
|
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||||
// mark the node as fatally slashed
|
// errors, mark the node as fatally slashed
|
||||||
fatal_slash::<D, _, _>(
|
self
|
||||||
&mut txn,
|
.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {:?}", msgs))
|
||||||
spec,
|
|
||||||
publish_tributary_tx,
|
|
||||||
key,
|
|
||||||
msgs.0.msg.sender,
|
|
||||||
&format!("invalid tendermint messages: {:?}", msgs),
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
TributaryTransaction::Application(tx) => {
|
TributaryTransaction::Application(tx) => {
|
||||||
handle_application_tx::<D, _, _, _, _, _, _, _>(
|
self.handle_application_tx(tx).await;
|
||||||
tx,
|
|
||||||
spec,
|
|
||||||
processors,
|
|
||||||
publish_serai_tx.clone(),
|
|
||||||
publish_tributary_tx,
|
|
||||||
key,
|
|
||||||
recognized_id.clone(),
|
|
||||||
&mut txn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EventDb::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
event_id += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Trigger any necessary re-attempts
|
// TODO: Trigger any necessary re-attempts
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) async fn handle_new_blocks<
|
pub(crate) async fn handle_new_blocks<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
FPst: Future<Output = ()>,
|
PST: PSTTrait,
|
||||||
PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst,
|
PTT: PTTTrait,
|
||||||
FPtt: Future<Output = ()>,
|
RID: RIDTrait,
|
||||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
|
||||||
FRid: Future<Output = ()>,
|
|
||||||
RID: RIDTrait<FRid>,
|
|
||||||
P: P2p,
|
P: P2p,
|
||||||
>(
|
>(
|
||||||
db: &mut D,
|
db: &mut D,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: RID,
|
recognized_id: &RID,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: &PST,
|
||||||
publish_tributary_tx: &PTT,
|
publish_tributary_tx: &PTT,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
tributary: &TributaryReader<D, Transaction>,
|
tributary: &TributaryReader<D, Transaction>,
|
||||||
) {
|
) {
|
||||||
let genesis = tributary.genesis();
|
let genesis = tributary.genesis();
|
||||||
let mut last_block = LastBlock::get(db, genesis).unwrap_or(genesis);
|
let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis);
|
||||||
while let Some(next) = tributary.block_after(&last_block) {
|
while let Some(next) = tributary.block_after(&last_block) {
|
||||||
let block = tributary.block(&next).unwrap();
|
let block = tributary.block(&next).unwrap();
|
||||||
|
|
||||||
|
@ -182,20 +263,22 @@ pub(crate) async fn handle_new_blocks<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handle_block::<_, _, _, _, _, _, _, _, P>(
|
let mut txn = db.txn();
|
||||||
db,
|
(TributaryBlockHandler {
|
||||||
key,
|
txn: &mut txn,
|
||||||
recognized_id.clone(),
|
|
||||||
processors,
|
|
||||||
publish_serai_tx.clone(),
|
|
||||||
publish_tributary_tx,
|
|
||||||
spec,
|
spec,
|
||||||
|
our_key: key,
|
||||||
|
recognized_id,
|
||||||
|
processors,
|
||||||
|
publish_serai_tx,
|
||||||
|
publish_tributary_tx,
|
||||||
block,
|
block,
|
||||||
)
|
_p2p: PhantomData::<P>,
|
||||||
|
})
|
||||||
|
.handle::<D>()
|
||||||
.await;
|
.await;
|
||||||
last_block = next;
|
last_block = next;
|
||||||
let mut txn = db.txn();
|
LastHandledBlock::set(&mut txn, genesis, &next);
|
||||||
LastBlock::set(&mut txn, genesis, &next);
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,8 +287,7 @@ pub(crate) async fn scan_tributaries_task<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
P: P2p,
|
P: P2p,
|
||||||
FRid: Send + Future<Output = ()>,
|
RID: 'static + Send + Sync + Clone + RIDTrait,
|
||||||
RID: 'static + Send + Sync + RIDTrait<FRid>,
|
|
||||||
>(
|
>(
|
||||||
raw_db: D,
|
raw_db: D,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
@ -240,12 +322,12 @@ pub(crate) async fn scan_tributaries_task<
|
||||||
// the next block occurs
|
// the next block occurs
|
||||||
let next_block_notification = tributary.next_block_notification().await;
|
let next_block_notification = tributary.next_block_notification().await;
|
||||||
|
|
||||||
handle_new_blocks::<_, _, _, _, _, _, _, _, P>(
|
handle_new_blocks::<_, _, _, _, _, P>(
|
||||||
&mut tributary_db,
|
&mut tributary_db,
|
||||||
&key,
|
&key,
|
||||||
recognized_id.clone(),
|
&recognized_id,
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx_type, tx| {
|
&|set, tx_type, tx| {
|
||||||
let serai = serai.clone();
|
let serai = serai.clone();
|
||||||
async move {
|
async move {
|
||||||
loop {
|
loop {
|
||||||
|
@ -314,7 +396,7 @@ pub(crate) async fn scan_tributaries_task<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
&|tx| {
|
&|tx: Transaction| {
|
||||||
let tributary = tributary.clone();
|
let tributary = tributary.clone();
|
||||||
async move {
|
async move {
|
||||||
match tributary.add_transaction(tx.clone()).await {
|
match tributary.add_transaction(tx.clone()).await {
|
||||||
|
|
395
coordinator/src/tributary/signing_protocol.rs
Normal file
395
coordinator/src/tributary/signing_protocol.rs
Normal file
|
@ -0,0 +1,395 @@
|
||||||
|
/*
|
||||||
|
A MuSig-based signing protocol executed with the validators' keys.
|
||||||
|
|
||||||
|
This is used for confirming the results of a DKG on-chain, an operation requiring all validators,
|
||||||
|
and for removing another validator before the DKG completes, an operation requiring a
|
||||||
|
supermajority of validators.
|
||||||
|
|
||||||
|
Since we're using the validator's keys, as needed for their being the root of trust, the
|
||||||
|
coordinator must perform the signing. This is distinct from all other group-signing operations,
|
||||||
|
as they're all done by the processor.
|
||||||
|
|
||||||
|
The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern.
|
||||||
|
While we could individually tack votes, that'd require logic to prevent voting multiple times and
|
||||||
|
tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and
|
||||||
|
the list's weight exceeds the threshold.
|
||||||
|
|
||||||
|
Instead of maintaining state in memory, a combination of the DB and re-execution are used. This
|
||||||
|
is deemed acceptable re: performance as:
|
||||||
|
|
||||||
|
1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent.
|
||||||
|
2) This is an O(n) algorithm.
|
||||||
|
3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET.
|
||||||
|
|
||||||
|
Accordingly, this should be tolerable.
|
||||||
|
|
||||||
|
As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises
|
||||||
|
concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from
|
||||||
|
the nonces being context-bound under a BFT protocol. The flow is as follows:
|
||||||
|
|
||||||
|
1) Decide the nonce.
|
||||||
|
2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be
|
||||||
|
signed*.
|
||||||
|
3) Sign and publish the signature share.
|
||||||
|
|
||||||
|
In order for nonce re-use to occur, the received nonce commitments (or the message to be signed)
|
||||||
|
would have to be distinct and sign would have to be called again.
|
||||||
|
|
||||||
|
Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The
|
||||||
|
only way to operate on distinct received messages would be if:
|
||||||
|
|
||||||
|
1) A logical flaw exists, letting new messages over write prior messages
|
||||||
|
2) A reorganization occured from chain A to chain B, and with it, different messages
|
||||||
|
|
||||||
|
Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While
|
||||||
|
a significant amount of processes may be byzantine, leading to BFT being broken, that still will
|
||||||
|
not trigger a reorganization. The only way to move to a distinct chain, with distinct messages,
|
||||||
|
would be by rebuilding the local process (this time following chain B). Upon any complete
|
||||||
|
rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial
|
||||||
|
rebuilds which is accepted.
|
||||||
|
|
||||||
|
Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the
|
||||||
|
commitments generated from the decided nonces are in fact its commitments on-chain (TODO).
|
||||||
|
|
||||||
|
TODO: We also need to review how we're handling Processor preprocesses and likely implement the
|
||||||
|
same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.
|
||||||
|
*/
|
||||||
|
|
||||||
|
use core::ops::Deref;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{ff::PrimeField, Group, GroupEncoding},
|
||||||
|
Ciphersuite, Ristretto,
|
||||||
|
};
|
||||||
|
use frost::{
|
||||||
|
FrostError,
|
||||||
|
dkg::{Participant, musig::musig},
|
||||||
|
ThresholdKeys,
|
||||||
|
sign::*,
|
||||||
|
};
|
||||||
|
use frost_schnorrkel::Schnorrkel;
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
Public, SeraiAddress,
|
||||||
|
validator_sets::primitives::{
|
||||||
|
KeyPair, musig_context, set_keys_message, remove_participant_message,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
|
||||||
|
use crate::tributary::TributarySpec;
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
SigningProtocolDb {
|
||||||
|
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
struct SigningProtocol<'a, T: DbTxn, C: Encode> {
|
||||||
|
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
pub(crate) spec: &'a TributarySpec,
|
||||||
|
pub(crate) txn: &'a mut T,
|
||||||
|
pub(crate) context: C,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||||
|
fn preprocess_internal(
|
||||||
|
&mut self,
|
||||||
|
participants: &[<Ristretto as Ciphersuite>::G],
|
||||||
|
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||||
|
// Encrypt the cached preprocess as recovery of it will enable recovering the private key
|
||||||
|
// While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and
|
||||||
|
// shouldn't be trusted as one
|
||||||
|
let mut encryption_key = {
|
||||||
|
let mut encryption_key_preimage =
|
||||||
|
Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec());
|
||||||
|
encryption_key_preimage.extend(self.context.encode());
|
||||||
|
let repr = Zeroizing::new(self.key.to_repr());
|
||||||
|
encryption_key_preimage.extend(repr.deref());
|
||||||
|
Blake2s256::digest(&encryption_key_preimage)
|
||||||
|
};
|
||||||
|
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
||||||
|
|
||||||
|
let algorithm = Schnorrkel::new(b"substrate");
|
||||||
|
let keys: ThresholdKeys<Ristretto> =
|
||||||
|
musig(&musig_context(self.spec.set()), self.key, participants)
|
||||||
|
.expect("signing for a set we aren't in/validator present multiple times")
|
||||||
|
.into();
|
||||||
|
|
||||||
|
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
||||||
|
let (machine, _) =
|
||||||
|
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
||||||
|
|
||||||
|
let mut cache = machine.cache();
|
||||||
|
assert_eq!(cache.0.len(), 32);
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
|
for b in 0 .. 32 {
|
||||||
|
cache.0[b] ^= encryption_key_slice[b];
|
||||||
|
}
|
||||||
|
|
||||||
|
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
||||||
|
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
|
for b in 0 .. 32 {
|
||||||
|
cached[b] ^= encryption_key_slice[b];
|
||||||
|
}
|
||||||
|
encryption_key_slice.zeroize();
|
||||||
|
let (machine, preprocess) =
|
||||||
|
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
||||||
|
|
||||||
|
(machine, preprocess.serialize().try_into().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn share_internal(
|
||||||
|
&mut self,
|
||||||
|
participants: &[<Ristretto as Ciphersuite>::G],
|
||||||
|
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
msg: &[u8],
|
||||||
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
|
let machine = self.preprocess_internal(participants).0;
|
||||||
|
|
||||||
|
let mut participants = serialized_preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||||
|
participants.sort();
|
||||||
|
let mut preprocesses = HashMap::new();
|
||||||
|
for participant in participants {
|
||||||
|
preprocesses.insert(
|
||||||
|
participant,
|
||||||
|
machine
|
||||||
|
.read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice())
|
||||||
|
.map_err(|_| participant)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
||||||
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||||
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((machine, share.serialize().try_into().unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn complete_internal(
|
||||||
|
&mut self,
|
||||||
|
machine: AlgorithmSignatureMachine<Ristretto, Schnorrkel>,
|
||||||
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<[u8; 64], Participant> {
|
||||||
|
let shares = shares
|
||||||
|
.into_iter()
|
||||||
|
.map(|(p, share)| {
|
||||||
|
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
||||||
|
})
|
||||||
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||||
|
let signature = machine.complete(shares).map_err(|e| match e {
|
||||||
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||||
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
|
})?;
|
||||||
|
Ok(signature.to_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
||||||
|
// the MuSig is.
|
||||||
|
//
|
||||||
|
// If sort_by_keys = true, the MuSig is will index the keys once sorted. Else, the MuSig is will
|
||||||
|
// index the validators in the order they've been defined.
|
||||||
|
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
mut map: HashMap<Participant, Vec<u8>>,
|
||||||
|
sort_by_keys: bool,
|
||||||
|
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||||
|
// Insert our own index so calculations aren't offset
|
||||||
|
let our_threshold_i =
|
||||||
|
spec.i(<Ristretto as Ciphersuite>::generator() * our_key.deref()).unwrap().start;
|
||||||
|
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||||
|
|
||||||
|
let spec_validators = spec.validators();
|
||||||
|
let key_from_threshold_i = |threshold_i| {
|
||||||
|
for (key, _) in &spec_validators {
|
||||||
|
if threshold_i == spec.i(*key).unwrap().start {
|
||||||
|
return *key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic!("requested info for threshold i which doesn't exist")
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut sorted = vec![];
|
||||||
|
let mut threshold_is = map.keys().cloned().collect::<Vec<_>>();
|
||||||
|
threshold_is.sort();
|
||||||
|
for threshold_i in threshold_is {
|
||||||
|
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
||||||
|
}
|
||||||
|
if sort_by_keys {
|
||||||
|
// Substrate expects these signers to be sorted by key
|
||||||
|
sorted.sort_by(|(key1, _), (key2, _)| key1.to_bytes().cmp(&key2.to_bytes()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||||
|
let mut participants = vec![];
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
||||||
|
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
||||||
|
participants.push(key);
|
||||||
|
map.insert(Participant::new(musig_i).unwrap(), share);
|
||||||
|
}
|
||||||
|
|
||||||
|
map.remove(&our_threshold_i).unwrap();
|
||||||
|
|
||||||
|
(participants, map)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||||
|
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
pub(crate) spec: &'a TributarySpec,
|
||||||
|
pub(crate) txn: &'a mut T,
|
||||||
|
pub(crate) attempt: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||||
|
fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> {
|
||||||
|
let context = (b"DkgConfirmer", self.attempt);
|
||||||
|
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||||
|
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||||
|
self.signing_protocol().preprocess_internal(&participants)
|
||||||
|
}
|
||||||
|
// Get the preprocess for this confirmation.
|
||||||
|
pub(crate) fn preprocess(&mut self) -> [u8; 64] {
|
||||||
|
self.preprocess_internal().1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn share_internal(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
key_pair: &KeyPair,
|
||||||
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
|
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||||
|
let preprocesses =
|
||||||
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, false).1;
|
||||||
|
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||||
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
|
}
|
||||||
|
// Get the share for this confirmation, if the preprocesses are valid.
|
||||||
|
pub(crate) fn share(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
key_pair: &KeyPair,
|
||||||
|
) -> Result<[u8; 32], Participant> {
|
||||||
|
self.share_internal(preprocesses, key_pair).map(|(_, share)| share)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn complete(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
key_pair: &KeyPair,
|
||||||
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<[u8; 64], Participant> {
|
||||||
|
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, false).1;
|
||||||
|
|
||||||
|
let machine = self
|
||||||
|
.share_internal(preprocesses, key_pair)
|
||||||
|
.expect("trying to complete a machine which failed to preprocess")
|
||||||
|
.0;
|
||||||
|
|
||||||
|
self.signing_protocol().complete_internal(machine, shares)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct DkgRemoval<'a, T: DbTxn> {
|
||||||
|
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
pub(crate) spec: &'a TributarySpec,
|
||||||
|
pub(crate) txn: &'a mut T,
|
||||||
|
pub(crate) removing: [u8; 32],
|
||||||
|
pub(crate) attempt: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: DbTxn> DkgRemoval<'_, T> {
|
||||||
|
fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 10], [u8; 32], u32)> {
|
||||||
|
let context = (b"DkgRemoval", self.removing, self.attempt);
|
||||||
|
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess_internal(
|
||||||
|
&mut self,
|
||||||
|
participants: Option<&[<Ristretto as Ciphersuite>::G]>,
|
||||||
|
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||||
|
// We won't know the participants when we first preprocess
|
||||||
|
// If we don't, we use our key alone as the participant
|
||||||
|
let just_us = [<Ristretto as Ciphersuite>::G::generator() * self.key.deref()];
|
||||||
|
let to_musig = if let Some(participants) = participants { participants } else { &just_us };
|
||||||
|
|
||||||
|
let (machine, preprocess) = self.signing_protocol().preprocess_internal(to_musig);
|
||||||
|
|
||||||
|
// If we're now specifying participants, confirm the commitments were the same
|
||||||
|
if participants.is_some() {
|
||||||
|
let (_, theoretical_preprocess) = self.signing_protocol().preprocess_internal(&just_us);
|
||||||
|
assert_eq!(theoretical_preprocess, preprocess);
|
||||||
|
}
|
||||||
|
|
||||||
|
(machine, preprocess)
|
||||||
|
}
|
||||||
|
// Get the preprocess for this confirmation.
|
||||||
|
pub(crate) fn preprocess(&mut self) -> [u8; 64] {
|
||||||
|
self.preprocess_internal(None).1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn share_internal(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
|
let (participants, preprocesses) =
|
||||||
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, true);
|
||||||
|
let msg = remove_participant_message(&self.spec.set(), Public(self.removing));
|
||||||
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
|
}
|
||||||
|
// Get the share for this confirmation, if the preprocesses are valid.
|
||||||
|
pub(crate) fn share(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<[u8; 32], Participant> {
|
||||||
|
self.share_internal(preprocesses).map(|(_, share)| share)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn complete(
|
||||||
|
&mut self,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<(Vec<SeraiAddress>, [u8; 64]), Participant> {
|
||||||
|
let (participants, shares) =
|
||||||
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, true);
|
||||||
|
let signers = participants.iter().map(|key| SeraiAddress(key.to_bytes())).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let machine = self
|
||||||
|
.share_internal(preprocesses)
|
||||||
|
.expect("trying to complete a machine which failed to preprocess")
|
||||||
|
.0;
|
||||||
|
|
||||||
|
let signature = self.signing_protocol().complete_internal(machine, shares)?;
|
||||||
|
Ok((signers, signature))
|
||||||
|
}
|
||||||
|
}
|
116
coordinator/src/tributary/spec.rs
Normal file
116
coordinator/src/tributary/spec.rs
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
use core::{ops::Range, fmt::Debug};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
use frost::Participant;
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet};
|
||||||
|
|
||||||
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
|
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
writer: &mut W,
|
||||||
|
) -> Result<(), io::Error> {
|
||||||
|
let len = u16::try_from(validators.len()).unwrap();
|
||||||
|
BorshSerialize::serialize(&len, writer)?;
|
||||||
|
for validator in validators {
|
||||||
|
BorshSerialize::serialize(&validator.0.to_bytes(), writer)?;
|
||||||
|
BorshSerialize::serialize(&validator.1, writer)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn borsh_deserialize_validators<R: io::Read>(
|
||||||
|
reader: &mut R,
|
||||||
|
) -> Result<Vec<(<Ristretto as Ciphersuite>::G, u16)>, io::Error> {
|
||||||
|
let len: u16 = BorshDeserialize::deserialize_reader(reader)?;
|
||||||
|
let mut res = vec![];
|
||||||
|
for _ in 0 .. len {
|
||||||
|
let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?;
|
||||||
|
let point = Option::from(<Ristretto as Ciphersuite>::G::from_bytes(&compressed))
|
||||||
|
.ok_or_else(|| io::Error::other("invalid point for validator"))?;
|
||||||
|
let weight: u16 = BorshDeserialize::deserialize_reader(reader)?;
|
||||||
|
res.push((point, weight));
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct TributarySpec {
|
||||||
|
serai_block: [u8; 32],
|
||||||
|
start_time: u64,
|
||||||
|
set: ValidatorSet,
|
||||||
|
#[borsh(
|
||||||
|
serialize_with = "borsh_serialize_validators",
|
||||||
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
|
)]
|
||||||
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TributarySpec {
|
||||||
|
pub fn new(
|
||||||
|
serai_block: [u8; 32],
|
||||||
|
start_time: u64,
|
||||||
|
set: ValidatorSet,
|
||||||
|
set_participants: Vec<(PublicKey, u16)>,
|
||||||
|
) -> TributarySpec {
|
||||||
|
let mut validators = vec![];
|
||||||
|
for (participant, shares) in set_participants {
|
||||||
|
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
||||||
|
.expect("invalid key registered as participant");
|
||||||
|
validators.push((participant, shares));
|
||||||
|
}
|
||||||
|
|
||||||
|
Self { serai_block, start_time, set, validators }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set(&self) -> ValidatorSet {
|
||||||
|
self.set
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
|
// Calculate the genesis for this Tributary
|
||||||
|
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
||||||
|
// This locks it to a specific Serai chain
|
||||||
|
genesis.append_message(b"serai_block", self.serai_block);
|
||||||
|
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
||||||
|
genesis.append_message(b"network", self.set.network.encode());
|
||||||
|
let genesis = genesis.challenge(b"genesis");
|
||||||
|
let genesis_ref: &[u8] = genesis.as_ref();
|
||||||
|
genesis_ref[.. 32].try_into().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_time(&self) -> u64 {
|
||||||
|
self.start_time
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn n(&self) -> u16 {
|
||||||
|
self.validators.iter().map(|(_, weight)| weight).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn t(&self) -> u16 {
|
||||||
|
((2 * self.n()) / 3) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
||||||
|
let mut i = 1;
|
||||||
|
for (validator, weight) in &self.validators {
|
||||||
|
if validator == &key {
|
||||||
|
return Some(Range {
|
||||||
|
start: Participant::new(i).unwrap(),
|
||||||
|
end: Participant::new(i + weight).unwrap(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
i += weight;
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||||
|
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||||
|
}
|
||||||
|
}
|
693
coordinator/src/tributary/transaction.rs
Normal file
693
coordinator/src/tributary/transaction.rs
Normal file
|
@ -0,0 +1,693 @@
|
||||||
|
use core::{ops::Deref, fmt::Debug};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{ff::Field, GroupEncoding},
|
||||||
|
Ciphersuite, Ristretto,
|
||||||
|
};
|
||||||
|
use schnorr::SchnorrSignature;
|
||||||
|
use frost::Participant;
|
||||||
|
|
||||||
|
use scale::{Encode, Decode};
|
||||||
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
|
use tributary::{
|
||||||
|
TRANSACTION_SIZE_LIMIT, ReadWrite,
|
||||||
|
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||||
|
pub enum Label {
|
||||||
|
Preprocess,
|
||||||
|
Share,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Label {
|
||||||
|
// TODO: Should nonces be u8 thanks to our use of topics?
|
||||||
|
pub fn nonce(&self) -> u32 {
|
||||||
|
match self {
|
||||||
|
Label::Preprocess => 0,
|
||||||
|
Label::Share => 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
|
pub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> {
|
||||||
|
pub plan: Id,
|
||||||
|
pub attempt: u32,
|
||||||
|
pub label: Label,
|
||||||
|
|
||||||
|
pub data: Vec<Vec<u8>>,
|
||||||
|
|
||||||
|
pub signed: Signed,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> Debug for SignData<Id> {
|
||||||
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
|
fmt
|
||||||
|
.debug_struct("SignData")
|
||||||
|
.field("id", &hex::encode(self.plan.encode()))
|
||||||
|
.field("attempt", &self.attempt)
|
||||||
|
.field("label", &self.label)
|
||||||
|
.field("signer", &hex::encode(self.signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
||||||
|
pub(crate) fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let plan = Id::decode(&mut scale::IoReader(&mut *reader))
|
||||||
|
.map_err(|_| io::Error::other("invalid plan in SignData"))?;
|
||||||
|
|
||||||
|
let mut attempt = [0; 4];
|
||||||
|
reader.read_exact(&mut attempt)?;
|
||||||
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
|
let mut label = [0; 1];
|
||||||
|
reader.read_exact(&mut label)?;
|
||||||
|
let label = match label[0] {
|
||||||
|
0 => Label::Preprocess,
|
||||||
|
1 => Label::Share,
|
||||||
|
_ => Err(io::Error::other("invalid label in SignData"))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let data = {
|
||||||
|
let mut data_pieces = [0];
|
||||||
|
reader.read_exact(&mut data_pieces)?;
|
||||||
|
if data_pieces[0] == 0 {
|
||||||
|
Err(io::Error::other("zero pieces of data in SignData"))?;
|
||||||
|
}
|
||||||
|
let mut all_data = vec![];
|
||||||
|
for _ in 0 .. data_pieces[0] {
|
||||||
|
let mut data_len = [0; 2];
|
||||||
|
reader.read_exact(&mut data_len)?;
|
||||||
|
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
||||||
|
reader.read_exact(&mut data)?;
|
||||||
|
all_data.push(data);
|
||||||
|
}
|
||||||
|
all_data
|
||||||
|
};
|
||||||
|
|
||||||
|
let signed = Signed::read_without_nonce(reader, label.nonce())?;
|
||||||
|
|
||||||
|
Ok(SignData { plan, attempt, label, data, signed })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_all(&self.plan.encode())?;
|
||||||
|
writer.write_all(&self.attempt.to_le_bytes())?;
|
||||||
|
writer.write_all(&[match self.label {
|
||||||
|
Label::Preprocess => 0,
|
||||||
|
Label::Share => 1,
|
||||||
|
}])?;
|
||||||
|
|
||||||
|
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
||||||
|
for data in &self.data {
|
||||||
|
if data.len() > u16::MAX.into() {
|
||||||
|
// Currently, the largest individual preprocess is a Monero transaction
|
||||||
|
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
||||||
|
// key image and proof (96 bytes)
|
||||||
|
// Even with all of that, we could support 227 inputs in a single TX
|
||||||
|
// Monero is limited to ~120 inputs per TX
|
||||||
|
//
|
||||||
|
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
|
||||||
|
Err(io::Error::other("signing data exceeded 65535 bytes"))?;
|
||||||
|
}
|
||||||
|
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
|
||||||
|
writer.write_all(data)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.signed.write_without_nonce(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
|
pub enum Transaction {
|
||||||
|
RemoveParticipant(Participant),
|
||||||
|
|
||||||
|
// Once this completes successfully, no more instances should be created.
|
||||||
|
DkgCommitments {
|
||||||
|
attempt: u32,
|
||||||
|
commitments: Vec<Vec<u8>>,
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
DkgShares {
|
||||||
|
attempt: u32,
|
||||||
|
// Sending Participant, Receiving Participant, Share
|
||||||
|
shares: Vec<Vec<Vec<u8>>>,
|
||||||
|
confirmation_nonces: [u8; 64],
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
InvalidDkgShare {
|
||||||
|
attempt: u32,
|
||||||
|
accuser: Participant,
|
||||||
|
faulty: Participant,
|
||||||
|
blame: Option<Vec<u8>>,
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
DkgConfirmed {
|
||||||
|
attempt: u32,
|
||||||
|
confirmation_share: [u8; 32],
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
|
||||||
|
DkgRemoval(SignData<[u8; 32]>),
|
||||||
|
|
||||||
|
// Co-sign a Substrate block.
|
||||||
|
CosignSubstrateBlock([u8; 32]),
|
||||||
|
|
||||||
|
// When we have synchrony on a batch, we can allow signing it
|
||||||
|
// TODO (never?): This is less efficient compared to an ExternalBlock provided transaction,
|
||||||
|
// which would be binding over the block hash and automatically achieve synchrony on all
|
||||||
|
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
||||||
|
// with the current processor, yet it would still be an improvement.
|
||||||
|
Batch {
|
||||||
|
block: [u8; 32],
|
||||||
|
batch: [u8; 5],
|
||||||
|
},
|
||||||
|
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
||||||
|
// IDs
|
||||||
|
SubstrateBlock(u64),
|
||||||
|
|
||||||
|
SubstrateSign(SignData<SubstrateSignableId>),
|
||||||
|
Sign(SignData<[u8; 32]>),
|
||||||
|
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
||||||
|
// reporters (who should all report the same thing)
|
||||||
|
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
||||||
|
// many TXs without penalty
|
||||||
|
// Here, they're denoted as the first_signer, as only the signer of the first TX to be included
|
||||||
|
// with this pairing will be remembered on-chain
|
||||||
|
SignCompleted {
|
||||||
|
plan: [u8; 32],
|
||||||
|
tx_hash: Vec<u8>,
|
||||||
|
first_signer: <Ristretto as Ciphersuite>::G,
|
||||||
|
signature: SchnorrSignature<Ristretto>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for Transaction {
|
||||||
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
|
match self {
|
||||||
|
Transaction::RemoveParticipant(participant) => fmt
|
||||||
|
.debug_struct("Transaction::RemoveParticipant")
|
||||||
|
.field("participant", participant)
|
||||||
|
.finish(),
|
||||||
|
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
||||||
|
.debug_struct("Transaction::DkgCommitments")
|
||||||
|
.field("attempt", attempt)
|
||||||
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgShares { attempt, signed, .. } => fmt
|
||||||
|
.debug_struct("Transaction::DkgShares")
|
||||||
|
.field("attempt", attempt)
|
||||||
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
||||||
|
.debug_struct("Transaction::InvalidDkgShare")
|
||||||
|
.field("attempt", attempt)
|
||||||
|
.field("accuser", accuser)
|
||||||
|
.field("faulty", faulty)
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
|
||||||
|
.debug_struct("Transaction::DkgConfirmed")
|
||||||
|
.field("attempt", attempt)
|
||||||
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgRemoval(sign_data) => {
|
||||||
|
fmt.debug_struct("Transaction::DkgRemoval").field("sign_data", sign_data).finish()
|
||||||
|
}
|
||||||
|
Transaction::CosignSubstrateBlock(block) => fmt
|
||||||
|
.debug_struct("Transaction::CosignSubstrateBlock")
|
||||||
|
.field("block", &hex::encode(block))
|
||||||
|
.finish(),
|
||||||
|
Transaction::Batch { block, batch } => fmt
|
||||||
|
.debug_struct("Transaction::Batch")
|
||||||
|
.field("block", &hex::encode(block))
|
||||||
|
.field("batch", &hex::encode(batch))
|
||||||
|
.finish(),
|
||||||
|
Transaction::SubstrateBlock(block) => {
|
||||||
|
fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish()
|
||||||
|
}
|
||||||
|
Transaction::SubstrateSign(sign_data) => {
|
||||||
|
fmt.debug_struct("Transaction::Substrate").field("sign_data", sign_data).finish()
|
||||||
|
}
|
||||||
|
Transaction::Sign(sign_data) => {
|
||||||
|
fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish()
|
||||||
|
}
|
||||||
|
Transaction::SignCompleted { plan, tx_hash, .. } => fmt
|
||||||
|
.debug_struct("Transaction::SignCompleted")
|
||||||
|
.field("plan", &hex::encode(plan))
|
||||||
|
.field("tx_hash", &hex::encode(tx_hash))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadWrite for Transaction {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut kind = [0];
|
||||||
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
|
match kind[0] {
|
||||||
|
0 => Ok(Transaction::RemoveParticipant({
|
||||||
|
let mut participant = [0; 2];
|
||||||
|
reader.read_exact(&mut participant)?;
|
||||||
|
Participant::new(u16::from_le_bytes(participant))
|
||||||
|
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))?
|
||||||
|
})),
|
||||||
|
|
||||||
|
1 => {
|
||||||
|
let mut attempt = [0; 4];
|
||||||
|
reader.read_exact(&mut attempt)?;
|
||||||
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
|
let commitments = {
|
||||||
|
let mut commitments_len = [0; 1];
|
||||||
|
reader.read_exact(&mut commitments_len)?;
|
||||||
|
let commitments_len = usize::from(commitments_len[0]);
|
||||||
|
if commitments_len == 0 {
|
||||||
|
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut each_commitments_len = [0; 2];
|
||||||
|
reader.read_exact(&mut each_commitments_len)?;
|
||||||
|
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
||||||
|
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
||||||
|
Err(io::Error::other(
|
||||||
|
"commitments present in transaction exceeded transaction size limit",
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
let mut commitments = vec![vec![]; commitments_len];
|
||||||
|
for commitments in &mut commitments {
|
||||||
|
*commitments = vec![0; each_commitments_len];
|
||||||
|
reader.read_exact(commitments)?;
|
||||||
|
}
|
||||||
|
commitments
|
||||||
|
};
|
||||||
|
|
||||||
|
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||||
|
|
||||||
|
Ok(Transaction::DkgCommitments { attempt, commitments, signed })
|
||||||
|
}
|
||||||
|
|
||||||
|
2 => {
|
||||||
|
let mut attempt = [0; 4];
|
||||||
|
reader.read_exact(&mut attempt)?;
|
||||||
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
|
let shares = {
|
||||||
|
let mut share_quantity = [0; 1];
|
||||||
|
reader.read_exact(&mut share_quantity)?;
|
||||||
|
|
||||||
|
let mut key_share_quantity = [0; 1];
|
||||||
|
reader.read_exact(&mut key_share_quantity)?;
|
||||||
|
|
||||||
|
let mut share_len = [0; 2];
|
||||||
|
reader.read_exact(&mut share_len)?;
|
||||||
|
let share_len = usize::from(u16::from_le_bytes(share_len));
|
||||||
|
|
||||||
|
let mut all_shares = vec![];
|
||||||
|
for _ in 0 .. share_quantity[0] {
|
||||||
|
let mut shares = vec![];
|
||||||
|
for _ in 0 .. key_share_quantity[0] {
|
||||||
|
let mut share = vec![0; share_len];
|
||||||
|
reader.read_exact(&mut share)?;
|
||||||
|
shares.push(share);
|
||||||
|
}
|
||||||
|
all_shares.push(shares);
|
||||||
|
}
|
||||||
|
all_shares
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut confirmation_nonces = [0; 64];
|
||||||
|
reader.read_exact(&mut confirmation_nonces)?;
|
||||||
|
|
||||||
|
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||||
|
|
||||||
|
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
||||||
|
}
|
||||||
|
|
||||||
|
3 => {
|
||||||
|
let mut attempt = [0; 4];
|
||||||
|
reader.read_exact(&mut attempt)?;
|
||||||
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
|
let mut accuser = [0; 2];
|
||||||
|
reader.read_exact(&mut accuser)?;
|
||||||
|
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
||||||
|
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||||
|
|
||||||
|
let mut faulty = [0; 2];
|
||||||
|
reader.read_exact(&mut faulty)?;
|
||||||
|
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
||||||
|
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||||
|
|
||||||
|
let mut blame_len = [0; 2];
|
||||||
|
reader.read_exact(&mut blame_len)?;
|
||||||
|
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
||||||
|
reader.read_exact(&mut blame)?;
|
||||||
|
|
||||||
|
// This shares a nonce with DkgConfirmed as only one is expected
|
||||||
|
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||||
|
|
||||||
|
Ok(Transaction::InvalidDkgShare {
|
||||||
|
attempt,
|
||||||
|
accuser,
|
||||||
|
faulty,
|
||||||
|
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
||||||
|
signed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
4 => {
|
||||||
|
let mut attempt = [0; 4];
|
||||||
|
reader.read_exact(&mut attempt)?;
|
||||||
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
|
let mut confirmation_share = [0; 32];
|
||||||
|
reader.read_exact(&mut confirmation_share)?;
|
||||||
|
|
||||||
|
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||||
|
|
||||||
|
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })
|
||||||
|
}
|
||||||
|
|
||||||
|
5 => SignData::read(reader).map(Transaction::DkgRemoval),
|
||||||
|
|
||||||
|
6 => {
|
||||||
|
let mut block = [0; 32];
|
||||||
|
reader.read_exact(&mut block)?;
|
||||||
|
Ok(Transaction::CosignSubstrateBlock(block))
|
||||||
|
}
|
||||||
|
|
||||||
|
7 => {
|
||||||
|
let mut block = [0; 32];
|
||||||
|
reader.read_exact(&mut block)?;
|
||||||
|
let mut batch = [0; 5];
|
||||||
|
reader.read_exact(&mut batch)?;
|
||||||
|
Ok(Transaction::Batch { block, batch })
|
||||||
|
}
|
||||||
|
|
||||||
|
8 => {
|
||||||
|
let mut block = [0; 8];
|
||||||
|
reader.read_exact(&mut block)?;
|
||||||
|
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||||
|
}
|
||||||
|
|
||||||
|
9 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||||
|
10 => SignData::read(reader).map(Transaction::Sign),
|
||||||
|
|
||||||
|
11 => {
|
||||||
|
let mut plan = [0; 32];
|
||||||
|
reader.read_exact(&mut plan)?;
|
||||||
|
|
||||||
|
let mut tx_hash_len = [0];
|
||||||
|
reader.read_exact(&mut tx_hash_len)?;
|
||||||
|
let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];
|
||||||
|
reader.read_exact(&mut tx_hash)?;
|
||||||
|
|
||||||
|
let first_signer = Ristretto::read_G(reader)?;
|
||||||
|
let signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||||
|
|
||||||
|
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => Err(io::Error::other("invalid transaction type")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
Transaction::RemoveParticipant(i) => {
|
||||||
|
writer.write_all(&[0])?;
|
||||||
|
writer.write_all(&u16::from(*i).to_le_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
if commitments.is_empty() {
|
||||||
|
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
||||||
|
}
|
||||||
|
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
||||||
|
for commitments_i in commitments {
|
||||||
|
if commitments_i.len() != commitments[0].len() {
|
||||||
|
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
||||||
|
for commitments in commitments {
|
||||||
|
writer.write_all(commitments)?;
|
||||||
|
}
|
||||||
|
signed.write_without_nonce(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
||||||
|
writer.write_all(&[2])?;
|
||||||
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
|
||||||
|
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
|
||||||
|
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
||||||
|
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
||||||
|
// This assumes at least one share is being sent to another party
|
||||||
|
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
||||||
|
let share_len = shares[0][0].len();
|
||||||
|
// For BLS12-381 G2, this would be:
|
||||||
|
// - A 32-byte share
|
||||||
|
// - A 96-byte ephemeral key
|
||||||
|
// - A 128-byte signature
|
||||||
|
// Hence why this has to be u16
|
||||||
|
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
||||||
|
|
||||||
|
for these_shares in shares {
|
||||||
|
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
||||||
|
for share in these_shares {
|
||||||
|
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
||||||
|
writer.write_all(share)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.write_all(confirmation_nonces)?;
|
||||||
|
signed.write_without_nonce(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||||
|
writer.write_all(&[3])?;
|
||||||
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
||||||
|
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
||||||
|
|
||||||
|
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
||||||
|
assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0);
|
||||||
|
let blame_len =
|
||||||
|
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
||||||
|
writer.write_all(&blame_len.to_le_bytes())?;
|
||||||
|
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
||||||
|
|
||||||
|
signed.write_without_nonce(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||||
|
writer.write_all(&[4])?;
|
||||||
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
writer.write_all(confirmation_share)?;
|
||||||
|
signed.write_without_nonce(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgRemoval(data) => {
|
||||||
|
writer.write_all(&[5])?;
|
||||||
|
data.write(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::CosignSubstrateBlock(block) => {
|
||||||
|
writer.write_all(&[6])?;
|
||||||
|
writer.write_all(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::Batch { block, batch } => {
|
||||||
|
writer.write_all(&[7])?;
|
||||||
|
writer.write_all(block)?;
|
||||||
|
writer.write_all(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::SubstrateBlock(block) => {
|
||||||
|
writer.write_all(&[8])?;
|
||||||
|
writer.write_all(&block.to_le_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::SubstrateSign(data) => {
|
||||||
|
writer.write_all(&[9])?;
|
||||||
|
data.write(writer)
|
||||||
|
}
|
||||||
|
Transaction::Sign(data) => {
|
||||||
|
writer.write_all(&[10])?;
|
||||||
|
data.write(writer)
|
||||||
|
}
|
||||||
|
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||||
|
writer.write_all(&[11])?;
|
||||||
|
writer.write_all(plan)?;
|
||||||
|
writer
|
||||||
|
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||||
|
writer.write_all(tx_hash)?;
|
||||||
|
writer.write_all(&first_signer.to_bytes())?;
|
||||||
|
signature.write(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactionTrait for Transaction {
|
||||||
|
fn kind(&self) -> TransactionKind<'_> {
|
||||||
|
match self {
|
||||||
|
Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"),
|
||||||
|
|
||||||
|
Transaction::DkgCommitments { attempt, commitments: _, signed } => {
|
||||||
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
|
}
|
||||||
|
Transaction::DkgShares { attempt, signed, .. } => {
|
||||||
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
|
}
|
||||||
|
Transaction::InvalidDkgShare { attempt, signed, .. } => {
|
||||||
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
|
}
|
||||||
|
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
||||||
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgRemoval(data) => {
|
||||||
|
TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||||
|
|
||||||
|
Transaction::Batch { .. } => TransactionKind::Provided("batch"),
|
||||||
|
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"),
|
||||||
|
|
||||||
|
Transaction::SubstrateSign(data) => {
|
||||||
|
TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed)
|
||||||
|
}
|
||||||
|
Transaction::Sign(data) => {
|
||||||
|
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
|
||||||
|
}
|
||||||
|
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash(&self) -> [u8; 32] {
|
||||||
|
let mut tx = self.serialize();
|
||||||
|
if let TransactionKind::Signed(_, signed) = self.kind() {
|
||||||
|
// Make sure the part we're cutting off is the signature
|
||||||
|
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
||||||
|
}
|
||||||
|
Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify(&self) -> Result<(), TransactionError> {
|
||||||
|
// TODO: Check DkgRemoval and SubstrateSign's lengths here
|
||||||
|
|
||||||
|
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
|
||||||
|
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
||||||
|
Err(TransactionError::InvalidContent)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Transaction {
|
||||||
|
// Used to initially construct transactions so we can then get sig hashes and perform signing
|
||||||
|
pub fn empty_signed() -> Signed {
|
||||||
|
Signed {
|
||||||
|
signer: Ristretto::generator(),
|
||||||
|
nonce: 0,
|
||||||
|
signature: SchnorrSignature::<Ristretto> {
|
||||||
|
R: Ristretto::generator(),
|
||||||
|
s: <Ristretto as Ciphersuite>::F::ZERO,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign a transaction
|
||||||
|
pub fn sign<R: RngCore + CryptoRng>(
|
||||||
|
&mut self,
|
||||||
|
rng: &mut R,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
) {
|
||||||
|
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||||
|
let nonce = match tx {
|
||||||
|
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
||||||
|
|
||||||
|
Transaction::DkgCommitments { .. } => 0,
|
||||||
|
Transaction::DkgShares { .. } => 1,
|
||||||
|
Transaction::InvalidDkgShare { .. } => 2,
|
||||||
|
Transaction::DkgConfirmed { .. } => 2,
|
||||||
|
|
||||||
|
Transaction::DkgRemoval(data) => data.label.nonce(),
|
||||||
|
|
||||||
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
|
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||||
|
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
||||||
|
|
||||||
|
Transaction::SubstrateSign(data) => data.label.nonce(),
|
||||||
|
Transaction::Sign(data) => data.label.nonce(),
|
||||||
|
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||||
|
};
|
||||||
|
|
||||||
|
(
|
||||||
|
nonce,
|
||||||
|
match tx {
|
||||||
|
Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"),
|
||||||
|
|
||||||
|
Transaction::DkgCommitments { ref mut signed, .. } => signed,
|
||||||
|
Transaction::DkgShares { ref mut signed, .. } => signed,
|
||||||
|
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
|
||||||
|
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
||||||
|
|
||||||
|
Transaction::DkgRemoval(ref mut data) => &mut data.signed,
|
||||||
|
|
||||||
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
|
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||||
|
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
||||||
|
|
||||||
|
Transaction::SubstrateSign(ref mut data) => &mut data.signed,
|
||||||
|
Transaction::Sign(ref mut data) => &mut data.signed,
|
||||||
|
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let (nonce, signed_ref) = signed(self);
|
||||||
|
signed_ref.signer = Ristretto::generator() * key.deref();
|
||||||
|
signed_ref.nonce = nonce;
|
||||||
|
|
||||||
|
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
||||||
|
signed(self).1.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||||
|
let sig_hash = self.sig_hash(genesis);
|
||||||
|
signed(self).1.signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {
|
||||||
|
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {
|
||||||
|
let mut transcript =
|
||||||
|
RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted");
|
||||||
|
transcript.append_message(b"plan", plan);
|
||||||
|
transcript.append_message(b"tx_hash", tx_hash);
|
||||||
|
transcript.append_message(b"signer", first_signer.to_bytes());
|
||||||
|
transcript.append_message(b"nonce", signature.R.to_bytes());
|
||||||
|
Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge"))
|
||||||
|
} else {
|
||||||
|
panic!("sign_completed_challenge called on transaction which wasn't SignCompleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,7 +23,6 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der
|
||||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||||
|
|
||||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
||||||
serde = { version = "1", default-features = false, features = ["derive"], optional = true }
|
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||||
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
|
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
|
||||||
|
@ -47,7 +46,6 @@ std = [
|
||||||
"std-shims/std",
|
"std-shims/std",
|
||||||
|
|
||||||
"borsh?/std",
|
"borsh?/std",
|
||||||
"serde?/std",
|
|
||||||
|
|
||||||
"transcript/std",
|
"transcript/std",
|
||||||
"chacha20/std",
|
"chacha20/std",
|
||||||
|
@ -61,6 +59,5 @@ std = [
|
||||||
"dleq/serialize"
|
"dleq/serialize"
|
||||||
]
|
]
|
||||||
borsh = ["dep:borsh"]
|
borsh = ["dep:borsh"]
|
||||||
serde = ["dep:serde"]
|
|
||||||
tests = ["rand_core/getrandom"]
|
tests = ["rand_core/getrandom"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|
|
@ -31,8 +31,7 @@ pub mod tests;
|
||||||
|
|
||||||
/// The ID of a participant, defined as a non-zero u16.
|
/// The ID of a participant, defined as a non-zero u16.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
|
||||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]
|
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
pub struct Participant(pub(crate) u16);
|
pub struct Participant(pub(crate) u16);
|
||||||
impl Participant {
|
impl Participant {
|
||||||
/// Create a new Participant identifier from a u16.
|
/// Create a new Participant identifier from a u16.
|
||||||
|
@ -118,6 +117,14 @@ mod lib {
|
||||||
Ciphersuite,
|
Ciphersuite,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "borsh")]
|
||||||
|
impl borsh::BorshDeserialize for Participant {
|
||||||
|
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
Participant::new(u16::deserialize_reader(reader)?)
|
||||||
|
.ok_or_else(|| io::Error::other("invalid participant"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Validate a map of values to have the expected included participants
|
// Validate a map of values to have the expected included participants
|
||||||
pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
|
pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
|
||||||
map: &HashMap<Participant, T>,
|
map: &HashMap<Participant, T>,
|
||||||
|
@ -147,8 +154,7 @@ mod lib {
|
||||||
/// Parameters for a multisig.
|
/// Parameters for a multisig.
|
||||||
// These fields should not be made public as they should be static
|
// These fields should not be made public as they should be static
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]
|
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
pub struct ThresholdParams {
|
pub struct ThresholdParams {
|
||||||
/// Participants needed to sign on behalf of the group.
|
/// Participants needed to sign on behalf of the group.
|
||||||
pub(crate) t: u16,
|
pub(crate) t: u16,
|
||||||
|
@ -189,6 +195,16 @@ mod lib {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "borsh")]
|
||||||
|
impl borsh::BorshDeserialize for ThresholdParams {
|
||||||
|
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let t = u16::deserialize_reader(reader)?;
|
||||||
|
let n = u16::deserialize_reader(reader)?;
|
||||||
|
let i = Participant::deserialize_reader(reader)?;
|
||||||
|
ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Calculate the lagrange coefficient for a signing set.
|
/// Calculate the lagrange coefficient for a signing set.
|
||||||
pub fn lagrange<F: PrimeField>(i: Participant, included: &[Participant]) -> F {
|
pub fn lagrange<F: PrimeField>(i: Participant, included: &[Participant]) -> F {
|
||||||
let i_f = F::from(u64::from(u16::from(i)));
|
let i_f = F::from(u64::from(u16::from(i)));
|
||||||
|
|
|
@ -224,13 +224,15 @@ pub trait SignMachine<S>: Send + Sync + Sized {
|
||||||
/// security as your private key share.
|
/// security as your private key share.
|
||||||
fn cache(self) -> CachedPreprocess;
|
fn cache(self) -> CachedPreprocess;
|
||||||
|
|
||||||
/// Create a sign machine from a cached preprocess. After this, the preprocess must be deleted so
|
/// Create a sign machine from a cached preprocess.
|
||||||
/// it's never reused. Any reuse would cause the signer to leak their secret share.
|
|
||||||
|
/// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably
|
||||||
|
/// cause the signer to leak their secret share.
|
||||||
fn from_cache(
|
fn from_cache(
|
||||||
params: Self::Params,
|
params: Self::Params,
|
||||||
keys: Self::Keys,
|
keys: Self::Keys,
|
||||||
cache: CachedPreprocess,
|
cache: CachedPreprocess,
|
||||||
) -> Result<Self, FrostError>;
|
) -> (Self, Self::Preprocess);
|
||||||
|
|
||||||
/// Read a Preprocess message. Despite taking self, this does not save the preprocess.
|
/// Read a Preprocess message. Despite taking self, this does not save the preprocess.
|
||||||
/// It must be externally cached and passed into sign.
|
/// It must be externally cached and passed into sign.
|
||||||
|
@ -277,9 +279,8 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
|
||||||
algorithm: A,
|
algorithm: A,
|
||||||
keys: ThresholdKeys<C>,
|
keys: ThresholdKeys<C>,
|
||||||
cache: CachedPreprocess,
|
cache: CachedPreprocess,
|
||||||
) -> Result<Self, FrostError> {
|
) -> (Self, Self::Preprocess) {
|
||||||
let (machine, _) = AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache);
|
AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache)
|
||||||
Ok(machine)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||||
|
|
|
@ -183,7 +183,7 @@ pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
|
||||||
let cache = machines.remove(&i).unwrap().cache();
|
let cache = machines.remove(&i).unwrap().cache();
|
||||||
machines.insert(
|
machines.insert(
|
||||||
i,
|
i,
|
||||||
M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).unwrap(),
|
M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).0,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ use frost_schnorrkel::Schnorrkel;
|
||||||
|
|
||||||
use log::{info, warn};
|
use log::{info, warn};
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use serai_client::validator_sets::primitives::Session;
|
use serai_client::validator_sets::primitives::Session;
|
||||||
|
|
||||||
use messages::coordinator::*;
|
use messages::coordinator::*;
|
||||||
|
|
|
@ -16,7 +16,6 @@ use frost::{
|
||||||
|
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use serai_client::validator_sets::primitives::{Session, KeyPair};
|
use serai_client::validator_sets::primitives::{Session, KeyPair};
|
||||||
use messages::key_gen::*;
|
use messages::key_gen::*;
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@ use frost::{
|
||||||
|
|
||||||
use log::{info, debug, warn, error};
|
use log::{info, debug, warn, error};
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use serai_client::validator_sets::primitives::Session;
|
use serai_client::validator_sets::primitives::Session;
|
||||||
use messages::sign::*;
|
use messages::sign::*;
|
||||||
|
|
||||||
|
|
|
@ -34,5 +34,19 @@ serai-signals-primitives = { path = "../signals/primitives", version = "0.1" }
|
||||||
frame-support = { git = "https://github.com/serai-dex/substrate" }
|
frame-support = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
borsh = ["dep:borsh"]
|
borsh = [
|
||||||
serde = ["dep:serde"]
|
"dep:borsh",
|
||||||
|
"serai-primitives/borsh",
|
||||||
|
"serai-coins-primitives/borsh",
|
||||||
|
"serai-validator-sets-primitives/borsh",
|
||||||
|
"serai-in-instructions-primitives/borsh",
|
||||||
|
"serai-signals-primitives/borsh",
|
||||||
|
]
|
||||||
|
serde = [
|
||||||
|
"dep:serde",
|
||||||
|
"serai-primitives/serde",
|
||||||
|
"serai-coins-primitives/serde",
|
||||||
|
"serai-validator-sets-primitives/serde",
|
||||||
|
"serai-in-instructions-primitives/serde",
|
||||||
|
"serai-signals-primitives/serde",
|
||||||
|
]
|
||||||
|
|
|
@ -54,6 +54,7 @@ serai-docker-tests = { path = "../../tests/docker" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
serai = ["thiserror", "serde", "serde_json", "sp-core", "sp-runtime", "frame-system", "simple-request"]
|
serai = ["thiserror", "serde", "serde_json", "sp-core", "sp-runtime", "frame-system", "simple-request"]
|
||||||
|
borsh = ["serai-abi/borsh"]
|
||||||
|
|
||||||
networks = []
|
networks = []
|
||||||
bitcoin = ["networks", "dep:bitcoin"]
|
bitcoin = ["networks", "dep:bitcoin"]
|
||||||
|
|
Loading…
Reference in a new issue