Perform MuSig signing of generated keys

This commit is contained in:
Luke Parker 2023-08-14 06:08:55 -04:00
parent 6b41c91dc2
commit 5e02f936e4
No known key found for this signature in database
12 changed files with 653 additions and 175 deletions

3
Cargo.lock generated
View file

@ -8001,6 +8001,7 @@ dependencies = [
"ciphersuite",
"env_logger",
"flexible-transcript",
"frost-schnorrkel",
"futures",
"hex",
"lazy_static",
@ -8008,6 +8009,7 @@ dependencies = [
"log",
"modular-frost",
"parity-scale-codec",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
"schnorr-signatures",
"serai-client",
@ -8017,6 +8019,7 @@ dependencies = [
"serai-processor-messages",
"serde_json",
"sp-application-crypto",
"sp-runtime",
"tokio",
"tributary-chain",
"zeroize",

View file

@ -19,6 +19,7 @@ lazy_static = "1"
zeroize = "^1.5"
rand_core = "0.6"
rand_chacha = "0.3"
blake2 = "0.10"
@ -26,6 +27,7 @@ transcript = { package = "flexible-transcript", path = "../crypto/transcript", f
ciphersuite = { path = "../crypto/ciphersuite" }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr" }
frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
scale = { package = "parity-scale-codec", version = "3", features = ["derive"] }
@ -53,3 +55,4 @@ libp2p = { version = "0.52", features = ["tokio", "tcp", "noise", "yamux", "goss
[dev-dependencies]
futures = "0.3"
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false }

View file

@ -17,7 +17,7 @@ use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
use serai_db::{DbTxn, Db};
use serai_env as env;
use serai_client::{Public, Signature, Serai};
use serai_client::{Public, Serai};
use message_queue::{Service, client::MessageQueue};
@ -102,7 +102,7 @@ pub async fn scan_substrate<D: Db, Pro: Processors>(
db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
processors: Pro,
serai: Serai,
serai: Arc<Serai>,
) {
log::info!("scanning substrate");
@ -150,6 +150,7 @@ pub async fn scan_tributaries<D: Db, Pro: Processors, P: P2p>(
recognized_id_send: UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
p2p: P,
processors: Pro,
serai: Arc<Serai>,
tributaries: Arc<RwLock<Tributaries<D, P>>>,
) {
log::info!("scanning tributaries");
@ -184,11 +185,30 @@ pub async fn scan_tributaries<D: Db, Pro: Processors, P: P2p>(
}
for (spec, reader) in &tributary_readers {
tributary::scanner::handle_new_blocks::<_, _>(
tributary::scanner::handle_new_blocks(
&mut tributary_db,
&key,
&recognized_id_send,
&processors,
|set, tx| {
let serai = serai.clone();
async move {
loop {
match serai.publish(&tx).await {
Ok(hash) => {
log::info!("set key pair for {:?} in TX {}", set, hex::encode(hash))
}
// This is assumed to be some ephemeral error due to the assumed fault-free
// creation
// TODO: Differentiate connection errors from already published to an invariant
Err(e) => {
log::error!("couldn't connect to Serai node to publish vote TX: {:?}", e);
tokio::time::sleep(Duration::from_secs(10)).await;
}
}
}
}
},
spec,
reader,
)
@ -395,7 +415,7 @@ pub async fn publish_transaction<D: Db, P: P2p>(
pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
mut db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: Serai,
serai: Arc<Serai>,
mut processors: Pro,
tributaries: Arc<RwLock<Tributaries<D, P>>>,
) {
@ -406,32 +426,41 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
// TODO2: This is slow, and only works as long as a network only has a single Tributary
// (which means there's a lack of multisig rotation)
let (genesis, my_i) = {
let mut genesis = None;
let mut my_i = None;
let spec = {
let mut spec = None;
for tributary in tributaries.read().await.values() {
if tributary.spec.set().network == msg.network {
genesis = Some(tributary.spec.genesis());
// TODO: We probably want to NOP here, not panic?
my_i = Some(
tributary
.spec
.i(pub_key)
.expect("processor message for network we aren't a validator in"),
);
spec = Some(tributary.spec.clone());
break;
}
}
(genesis.unwrap(), my_i.unwrap())
spec.unwrap()
};
let genesis = spec.genesis();
// TODO: We probably want to NOP here, not panic?
let my_i = spec.i(pub_key).expect("processor message for network we aren't a validator in");
let tx = match msg.msg.clone() {
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, commitments } => {
Some(Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed()))
}
key_gen::ProcessorMessage::Shares { id, shares } => {
Some(Transaction::DkgShares(id.attempt, my_i, shares, Transaction::empty_signed()))
// Create a MuSig-based machine to inform Substrate of this key generation
// DkgConfirmer has a TODO noting it's only secure for a single usage, yet this ensures
// the TODO is resolved before unsafe usage
if id.attempt != 0 {
panic!("attempt wasn't 0");
}
let nonces = crate::tributary::scanner::dkg_confirmation_nonces(&key, &spec);
Some(Transaction::DkgShares {
attempt: id.attempt,
sender_i: my_i,
shares,
confirmation_nonces: nonces,
signed: Transaction::empty_signed(),
})
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
assert_eq!(
@ -440,34 +469,22 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
);
// TODO: Also check the other KeyGenId fields
// TODO: Sign a MuSig signature here
let tx = Serai::set_validator_set_keys(
id.set.network,
(
Public(substrate_key),
network_key
.try_into()
.expect("external key from processor exceeded max external key length"),
),
Signature([0; 64]), // TODO
// Tell the Tributary the key pair, get back the share for the MuSig signature
let mut txn = db.txn();
let share = crate::tributary::scanner::generated_key_pair::<D>(
&mut txn,
&key,
&spec,
&(Public(substrate_key), network_key.try_into().unwrap()),
);
txn.commit();
loop {
match serai.publish(&tx).await {
Ok(hash) => {
log::info!("voted on key pair for {:?} in TX {}", id.set, hex::encode(hash))
}
// This is assumed to be some ephemeral error due to the assumed fault-free creation
// TODO: Differentiate connection errors from already published to an invariant
Err(e) => {
log::error!("couldn't connect to Serai node to publish vote TX: {:?}", e);
tokio::time::sleep(Duration::from_secs(10)).await;
}
match share {
Ok(share) => {
Some(Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed()))
}
Err(p) => todo!("participant {p:?} sent invalid DKG confirmation preprocesses"),
}
None
}
},
ProcessorMessage::Sign(msg) => match msg {
@ -625,6 +642,8 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
processors: Pro,
serai: Serai,
) {
let serai = Arc::new(serai);
// Handle new Substrate blocks
tokio::spawn(scan_substrate(raw_db.clone(), key.clone(), processors.clone(), serai.clone()));
@ -647,6 +666,8 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
}
// Handle new blocks for each Tributary
// TODO: This channel is unsafe. The Tributary may send an event, which then is marked handled,
// before it actually is. This must be a blocking function.
let (recognized_id_send, mut recognized_id_recv) = mpsc::unbounded_channel();
{
let raw_db = raw_db.clone();
@ -656,6 +677,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
recognized_id_send,
p2p.clone(),
processors.clone(),
serai.clone(),
tributaries.clone(),
));
}

View file

@ -4,12 +4,16 @@ use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
use ciphersuite::{Ciphersuite, Ristretto};
use scale::Decode;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use sp_runtime::traits::Verify;
use tokio::{time::sleep, sync::mpsc};
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use processor_messages::{
key_gen::{self, KeyGenId},
@ -85,6 +89,7 @@ async fn dkg_test() {
key,
&mpsc::unbounded_channel().0,
&processors,
|_, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
spec,
&tributary.reader(),
)
@ -108,6 +113,7 @@ async fn dkg_test() {
&keys[0],
&mpsc::unbounded_channel().0,
&processors,
|_, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
&spec,
&tributaries[0].1.reader(),
)
@ -160,12 +166,13 @@ async fn dkg_test() {
}
}
let mut tx = Transaction::DkgShares(
let mut tx = Transaction::DkgShares {
attempt,
Participant::new((k + 1).try_into().unwrap()).unwrap(),
sender_i: Participant::new((k + 1).try_into().unwrap()).unwrap(),
shares,
Transaction::empty_signed(),
);
confirmation_nonces: crate::tributary::scanner::dkg_confirmation_nonces(key, &spec),
signed: Transaction::empty_signed(),
};
tx.sign(&mut OsRng, spec.genesis(), key, 1);
txs.push(tx);
}
@ -184,6 +191,7 @@ async fn dkg_test() {
&keys[0],
&mpsc::unbounded_channel().0,
&processors,
|_, _| async { panic!("test tried to publish a new Serai TX after some shares") },
&spec,
&tributaries[0].1.reader(),
)
@ -205,7 +213,7 @@ async fn dkg_test() {
.iter()
.enumerate()
.filter_map(|(l, tx)| {
if let Transaction::DkgShares(_, _, shares, _) = tx {
if let Transaction::DkgShares { shares, .. } = tx {
shares
.get(&Participant::new((i + 1).try_into().unwrap()).unwrap())
.cloned()
@ -224,6 +232,7 @@ async fn dkg_test() {
&keys[0],
&mpsc::unbounded_channel().0,
&processors,
|_, _| async { panic!("test tried to publish a new Serai TX") },
&spec,
&tributaries[0].1.reader(),
)
@ -254,4 +263,99 @@ async fn dkg_test() {
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
// Send DkgConfirmed
let mut substrate_key = [0; 32];
OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
OsRng.fill_bytes(&mut network_key);
let key_pair = (serai_client::Public(substrate_key), network_key.try_into().unwrap());
let mut txs = vec![];
for (k, key) in keys.iter().enumerate() {
let attempt = 0;
// This is fine to re-use the one DB as such, due to exactly how this specific call is coded,
// albeit poor
let mut txn = scanner_db.0.txn();
let share =
crate::tributary::scanner::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair)
.unwrap();
txn.commit();
let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), key, 2);
txs.push(tx);
}
let block_before_tx = tributaries[0].1.tip().await;
for (i, tx) in txs.iter().enumerate() {
assert!(tributaries[i].1.add_transaction(tx.clone()).await);
}
for tx in txs.iter() {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// The scanner should successfully try to publish a transaction with a validly signed signature
handle_new_blocks(
&mut scanner_db,
&keys[0],
&mpsc::unbounded_channel().0,
&processors,
|set, tx| {
let spec = spec.clone();
let key_pair = key_pair.clone();
async move {
let tx = tx.0;
// Version, Pallet, Call, Network, Key Pair, Signature
let expected_len = 1 + 1 + 1 + 1 + 32 + 1 + key_pair.1.len() + 64;
// It's length prefixed
assert_eq!(tx.len(), 2 + expected_len);
let expected_len = u16::try_from(expected_len).unwrap();
// Check the encoded length
let bottom_six = expected_len & 0b111111;
let upper_eight = expected_len >> 6;
assert_eq!(u8::try_from((bottom_six << 2) | 1).unwrap(), tx[0]);
assert_eq!(u8::try_from(upper_eight).unwrap(), tx[1]);
// Version
assert_eq!(tx[2], 4);
// Pallet
// TODO
// Call
type Call = serai_client::runtime::validator_sets::Call<serai_client::runtime::Runtime>;
let tx = Call::decode(&mut &tx[4 ..]).unwrap();
match tx {
Call::set_keys { network, key_pair: set_key_pair, signature } => {
assert_eq!(set, spec.set());
assert_eq!(set.network, network);
assert_eq!(key_pair, set_key_pair);
assert!(signature.verify(
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
&serai_client::Public(
frost::dkg::musig::musig_key::<Ristretto>(
&serai_client::validator_sets::primitives::musig_context(set),
&spec
.validators()
.into_iter()
.map(|(validator, _)| validator)
.collect::<Vec<_>>()
)
.unwrap()
.to_bytes()
),
));
}
_ => panic!("Serai TX wasn't to set_keys"),
}
}
},
&spec,
&tributaries[0].1.reader(),
)
.await;
{
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
}
}

View file

@ -79,14 +79,29 @@ fn serialize_transaction() {
shares.insert(Participant::new(u16::try_from(i + 1).unwrap()).unwrap(), share);
}
test_read_write(Transaction::DkgShares(
random_u32(&mut OsRng),
Participant::new(random_u16(&mut OsRng).saturating_add(1)).unwrap(),
test_read_write(Transaction::DkgShares {
attempt: random_u32(&mut OsRng),
sender_i: Participant::new(random_u16(&mut OsRng).saturating_add(1)).unwrap(),
shares,
random_signed(&mut OsRng),
));
confirmation_nonces: {
let mut nonces = [0; 64];
OsRng.fill_bytes(&mut nonces);
nonces
},
signed: random_signed(&mut OsRng),
});
}
test_read_write(Transaction::DkgConfirmed(
random_u32(&mut OsRng),
{
let mut share = [0; 32];
OsRng.fill_bytes(&mut share);
share
},
random_signed(&mut OsRng),
));
{
let mut ext_block = [0; 32];
OsRng.fill_bytes(&mut ext_block);

View file

@ -1,7 +1,11 @@
use std::io::Read;
use scale::{Encode, Decode};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::validator_sets::primitives::KeyPair;
pub use serai_db::*;
#[derive(Debug)]
@ -51,6 +55,22 @@ impl<D: Db> TributaryDb<D> {
})
}
fn currently_completing_key_pair_key(genesis: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"currently_completing_key_pair", genesis)
}
pub fn save_currently_completing_key_pair(
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],
key_pair: &KeyPair,
) {
txn.put(Self::currently_completing_key_pair_key(genesis), key_pair.encode())
}
pub fn currently_completing_key_pair<G: Get>(getter: &G, genesis: [u8; 32]) -> Option<KeyPair> {
getter
.get(Self::currently_completing_key_pair_key(genesis))
.map(|bytes| KeyPair::decode(&mut bytes.as_slice()).unwrap())
}
fn recognized_id_key(label: &'static str, genesis: [u8; 32], id: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"recognized", [label.as_bytes(), genesis.as_ref(), id.as_ref()].concat())
}

View file

@ -218,7 +218,14 @@ impl ReadWrite for SignData {
pub enum Transaction {
// Once this completes successfully, no more instances should be created.
DkgCommitments(u32, Vec<u8>, Signed),
DkgShares(u32, Participant, HashMap<Participant, Vec<u8>>, Signed),
DkgShares {
attempt: u32,
sender_i: Participant,
shares: HashMap<Participant, Vec<u8>>,
confirmation_nonces: [u8; 64],
signed: Signed,
},
DkgConfirmed(u32, [u8; 32], Signed),
// When an external block is finalized, we can allow the associated batch IDs
// Commits to the full block so eclipsed nodes don't continue on their eclipsed state
@ -289,36 +296,53 @@ impl ReadWrite for Transaction {
shares
};
let mut confirmation_nonces = [0; 64];
reader.read_exact(&mut confirmation_nonces)?;
let signed = Signed::read(reader)?;
Ok(Transaction::DkgShares(
Ok(Transaction::DkgShares {
attempt,
Participant::new(sender_i)
sender_i: Participant::new(sender_i)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid sender participant"))?,
shares,
confirmation_nonces,
signed,
))
})
}
2 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut confirmation_share = [0; 32];
reader.read_exact(&mut confirmation_share)?;
let signed = Signed::read(reader)?;
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
}
3 => {
let mut block = [0; 32];
reader.read_exact(&mut block)?;
Ok(Transaction::ExternalBlock(block))
}
3 => {
4 => {
let mut block = [0; 8];
reader.read_exact(&mut block)?;
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
}
4 => SignData::read(reader).map(Transaction::BatchPreprocess),
5 => SignData::read(reader).map(Transaction::BatchShare),
5 => SignData::read(reader).map(Transaction::BatchPreprocess),
6 => SignData::read(reader).map(Transaction::BatchShare),
6 => SignData::read(reader).map(Transaction::SignPreprocess),
7 => SignData::read(reader).map(Transaction::SignShare),
7 => SignData::read(reader).map(Transaction::SignPreprocess),
8 => SignData::read(reader).map(Transaction::SignShare),
8 => {
9 => {
let mut plan = [0; 32];
reader.read_exact(&mut plan)?;
@ -349,7 +373,7 @@ impl ReadWrite for Transaction {
signed.write(writer)
}
Transaction::DkgShares(attempt, sender_i, shares, signed) => {
Transaction::DkgShares { attempt, sender_i, shares, confirmation_nonces, signed } => {
writer.write_all(&[1])?;
writer.write_all(&attempt.to_le_bytes())?;
@ -389,38 +413,46 @@ impl ReadWrite for Transaction {
writer.write_all(share)?;
}
writer.write_all(confirmation_nonces)?;
signed.write(writer)
}
Transaction::DkgConfirmed(attempt, share, signed) => {
writer.write_all(&[2])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(share)?;
signed.write(writer)
}
Transaction::ExternalBlock(block) => {
writer.write_all(&[2])?;
writer.write_all(&[3])?;
writer.write_all(block)
}
Transaction::SubstrateBlock(block) => {
writer.write_all(&[3])?;
writer.write_all(&[4])?;
writer.write_all(&block.to_le_bytes())
}
Transaction::BatchPreprocess(data) => {
writer.write_all(&[4])?;
writer.write_all(&[5])?;
data.write(writer)
}
Transaction::BatchShare(data) => {
writer.write_all(&[5])?;
writer.write_all(&[6])?;
data.write(writer)
}
Transaction::SignPreprocess(data) => {
writer.write_all(&[6])?;
data.write(writer)
}
Transaction::SignShare(data) => {
writer.write_all(&[7])?;
data.write(writer)
}
Transaction::SignCompleted(plan, tx, signed) => {
Transaction::SignShare(data) => {
writer.write_all(&[8])?;
data.write(writer)
}
Transaction::SignCompleted(plan, tx, signed) => {
writer.write_all(&[9])?;
writer.write_all(plan)?;
writer.write_all(&[u8::try_from(tx.len()).expect("tx hash length exceed 255 bytes")])?;
writer.write_all(tx)?;
@ -434,7 +466,8 @@ impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> {
match self {
Transaction::DkgCommitments(_, _, signed) => TransactionKind::Signed(signed),
Transaction::DkgShares(_, _, _, signed) => TransactionKind::Signed(signed),
Transaction::DkgShares { signed, .. } => TransactionKind::Signed(signed),
Transaction::DkgConfirmed(_, _, signed) => TransactionKind::Signed(signed),
Transaction::ExternalBlock(_) => TransactionKind::Provided("external"),
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"),
@ -492,7 +525,8 @@ impl Transaction {
fn signed(tx: &mut Transaction) -> &mut Signed {
match tx {
Transaction::DkgCommitments(_, _, ref mut signed) => signed,
Transaction::DkgShares(_, _, _, ref mut signed) => signed,
Transaction::DkgShares { ref mut signed, .. } => signed,
Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
Transaction::ExternalBlock(_) => panic!("signing ExternalBlock"),
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),

View file

@ -1,9 +1,26 @@
use core::ops::Deref;
use core::{ops::Deref, future::Future};
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::SeedableRng;
use rand_chacha::ChaCha20Rng;
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::{Ciphersuite, Ristretto};
use frost::{
FrostError,
dkg::{Participant, musig::musig},
sign::*,
};
use frost_schnorrkel::Schnorrkel;
use serai_client::{
Signature,
validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message},
subxt::utils::Encoded,
Serai,
};
use tokio::sync::mpsc::UnboundedSender;
@ -15,7 +32,7 @@ use processor_messages::{
coordinator, CoordinatorMessage,
};
use serai_db::DbTxn;
use serai_db::{Get, DbTxn};
use crate::{
Db,
@ -23,6 +40,188 @@ use crate::{
tributary::{TributaryDb, TributarySpec, Transaction},
};
const DKG_CONFIRMATION_NONCES: &[u8] = b"dkg_confirmation_nonces";
const DKG_CONFIRMATION_SHARES: &[u8] = b"dkg_confirmation_shares";
// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call.
// This simplifies data flow and prevents requiring multiple paths.
// While more expensive, this only runs an O(n) algorithm, which is tolerable to run multiple
// times.
struct DkgConfirmer;
impl DkgConfirmer {
fn preprocess_internal(
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
// TODO: Does Substrate already have a validator-uniqueness check?
let validators = spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
let context = musig_context(spec.set());
let mut chacha = ChaCha20Rng::from_seed({
let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy");
entropy_transcript.append_message(b"spec", spec.serialize());
entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes()));
// TODO: This is incredibly insecure unless message-bound (or bound via the attempt)
Zeroizing::new(entropy_transcript).rng_seed(b"preprocess")
});
let (machine, preprocess) = AlgorithmMachine::new(
Schnorrkel::new(b"substrate"),
musig(&context, key, &validators)
.expect("confirming the DKG for a set we aren't in/validator present multiple times")
.into(),
)
.preprocess(&mut chacha);
(machine, preprocess.serialize().try_into().unwrap())
}
// Get the preprocess for this confirmation.
fn preprocess(spec: &TributarySpec, key: &Zeroizing<<Ristretto as Ciphersuite>::F>) -> [u8; 64] {
Self::preprocess_internal(spec, key).1
}
fn share_internal(
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair,
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let machine = Self::preprocess_internal(spec, key).0;
let preprocesses = preprocesses
.into_iter()
.map(|(p, preprocess)| {
machine
.read_preprocess(&mut preprocess.as_slice())
.map(|preprocess| (p, preprocess))
.map_err(|_| p)
})
.collect::<Result<HashMap<_, _>, _>>()?;
let (machine, share) = machine
.sign(preprocesses, &set_keys_message(&spec.set(), key_pair))
.map_err(|e| match e {
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
})?;
Ok((machine, share.serialize().try_into().unwrap()))
}
// Get the share for this confirmation, if the preprocesses are valid.
fn share(
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair,
) -> Result<[u8; 32], Participant> {
Self::share_internal(spec, key, preprocesses, key_pair).map(|(_, share)| share)
}
fn complete(
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair,
shares: HashMap<Participant, Vec<u8>>,
) -> Result<[u8; 64], Participant> {
let machine = Self::share_internal(spec, key, preprocesses, key_pair)
.expect("trying to complete a machine which failed to preprocess")
.0;
let shares = shares
.into_iter()
.map(|(p, share)| {
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
})
.collect::<Result<HashMap<_, _>, _>>()?;
let signature = machine.complete(shares).map_err(|e| match e {
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
})?;
Ok(signature.to_bytes())
}
}
#[allow(clippy::too_many_arguments)] // TODO
fn read_known_to_exist_data<D: Db, G: Get>(
getter: &G,
spec: &TributarySpec,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
label: &'static [u8],
id: [u8; 32],
needed: u16,
attempt: u32,
bytes: Vec<u8>,
signed: Option<&Signed>,
) -> HashMap<Participant, Vec<u8>> {
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
if Some(&validator) == signed.map(|signed| &signed.signer) {
bytes.clone()
} else if let Some(data) =
TributaryDb::<D>::data(label, getter, spec.genesis(), id, attempt, validator)
{
data
} else {
continue;
},
);
}
assert_eq!(data.len(), usize::from(needed));
// Remove our own piece of data
assert!(data
.remove(
&spec
.i(Ristretto::generator() * key.deref())
.expect("handling a message for a Tributary we aren't part of")
)
.is_some());
data
}
pub fn dkg_confirmation_nonces(
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &TributarySpec,
) -> [u8; 64] {
DkgConfirmer::preprocess(spec, key)
}
#[allow(clippy::needless_pass_by_ref_mut)]
pub fn generated_key_pair<D: Db>(
txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &TributarySpec,
key_pair: &KeyPair,
) -> Result<[u8; 32], Participant> {
TributaryDb::<D>::save_currently_completing_key_pair(txn, spec.genesis(), key_pair);
let attempt = 0; // TODO
let preprocesses = read_known_to_exist_data::<D, _>(
txn,
spec,
key,
DKG_CONFIRMATION_NONCES,
[0; 32],
spec.n(),
attempt,
vec![],
None,
);
DkgConfirmer::share(spec, key, preprocesses, key_pair)
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum RecognizedIdType {
Block,
@ -31,11 +230,17 @@ pub enum RecognizedIdType {
// Handle a specific Tributary block
#[allow(clippy::needless_pass_by_ref_mut)] // False positive?
async fn handle_block<D: Db, Pro: Processors>(
async fn handle_block<
D: Db,
Pro: Processors,
F: Future<Output = ()>,
PST: Fn(ValidatorSet, Encoded) -> F,
>(
db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
recognized_id: &UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
processors: &Pro,
publish_serai_tx: PST,
spec: &TributarySpec,
block: Block<Transaction>,
) {
@ -70,92 +275,79 @@ async fn handle_block<D: Db, Pro: Processors>(
}
}
let mut handle = |zone: Zone,
label,
needed,
id,
attempt,
mut bytes: Vec<u8>,
signed: Signed| {
if zone == Zone::Dkg {
// Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32]
assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID");
} else if !TributaryDb::<D>::recognized_id(&txn, zone.label(), genesis, id) {
// TODO: Full slash
todo!();
}
// If they've already published a TX for this attempt, slash
if let Some(data) = TributaryDb::<D>::data(label, &txn, genesis, id, attempt, signed.signer)
{
if data != bytes {
let handle =
|txn: &mut _, zone: Zone, label, needed, id, attempt, bytes: Vec<u8>, signed: &Signed| {
if zone == Zone::Dkg {
// Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32]
assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID");
} else if !TributaryDb::<D>::recognized_id(txn, zone.label(), genesis, id) {
// TODO: Full slash
todo!();
}
// TODO: Slash
return None;
}
// If they've already published a TX for this attempt, slash
if let Some(data) =
TributaryDb::<D>::data(label, txn, genesis, id, attempt, signed.signer)
{
if data != bytes {
// TODO: Full slash
todo!();
}
// If the attempt is lesser than the blockchain's, slash
let curr_attempt = TributaryDb::<D>::attempt(&txn, genesis, id);
if attempt < curr_attempt {
// TODO: Slash for being late
return None;
}
if attempt > curr_attempt {
// TODO: Full slash
todo!();
}
// TODO: We can also full slash if shares before all commitments, or share before the
// necessary preprocesses
// TODO: If this is shares, we need to check they are part of the selected signing set
// Store this data
let received =
TributaryDb::<D>::set_data(label, &mut txn, genesis, id, attempt, signed.signer, &bytes);
// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
if received == needed {
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
if validator == signed.signer {
bytes.split_off(0)
} else if let Some(data) =
TributaryDb::<D>::data(label, &txn, genesis, id, attempt, validator)
{
data
} else {
continue;
},
);
// TODO: Slash
return None;
}
assert_eq!(data.len(), usize::from(needed));
// Remove our own piece of data
assert!(data
.remove(
&spec
.i(Ristretto::generator() * key.deref())
.expect("handling a message for a Tributary we aren't part of")
)
.is_some());
// If the attempt is lesser than the blockchain's, slash
let curr_attempt = TributaryDb::<D>::attempt(txn, genesis, id);
if attempt < curr_attempt {
// TODO: Slash for being late
return None;
}
if attempt > curr_attempt {
// TODO: Full slash
todo!();
}
return Some(data);
}
None
};
// TODO: We can also full slash if shares before all commitments, or share before the
// necessary preprocesses
// TODO: If this is shares, we need to check they are part of the selected signing set
// Store this data
let received =
TributaryDb::<D>::set_data(label, txn, genesis, id, attempt, signed.signer, &bytes);
// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
if received == needed {
return Some(read_known_to_exist_data::<D, _>(
txn,
spec,
key,
label,
id,
needed,
attempt,
bytes,
Some(signed),
));
}
None
};
match tx {
Transaction::DkgCommitments(attempt, bytes, signed) => {
if let Some(commitments) =
handle(Zone::Dkg, b"dkg_commitments", spec.n(), [0; 32], attempt, bytes, signed)
{
if let Some(commitments) = handle(
&mut txn,
Zone::Dkg,
b"dkg_commitments",
spec.n(),
[0; 32],
attempt,
bytes,
&signed,
) {
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
processors
.send(
@ -169,7 +361,7 @@ async fn handle_block<D: Db, Pro: Processors>(
}
}
Transaction::DkgShares(attempt, sender_i, mut shares, signed) => {
Transaction::DkgShares { attempt, sender_i, mut shares, confirmation_nonces, signed } => {
if sender_i !=
spec
.i(signed.signer)
@ -192,10 +384,21 @@ async fn handle_block<D: Db, Pro: Processors>(
// within the valid range will be the sender's i
let bytes = if sender_i == our_i { vec![] } else { shares.remove(&our_i).unwrap() };
let confirmation_nonces = handle(
&mut txn,
Zone::Dkg,
DKG_CONFIRMATION_NONCES,
spec.n(),
[0; 32],
attempt,
confirmation_nonces.to_vec(),
&signed,
);
if let Some(shares) =
handle(Zone::Dkg, b"dkg_shares", spec.n(), [0; 32], attempt, bytes, signed)
handle(&mut txn, Zone::Dkg, b"dkg_shares", spec.n(), [0; 32], attempt, bytes, &signed)
{
log::info!("got all DkgShares for {}", hex::encode(genesis));
assert!(confirmation_nonces.is_some());
processors
.send(
spec.set().network,
@ -205,6 +408,53 @@ async fn handle_block<D: Db, Pro: Processors>(
}),
)
.await;
} else {
assert!(confirmation_nonces.is_none());
}
}
Transaction::DkgConfirmed(attempt, shares, signed) => {
if let Some(shares) = handle(
&mut txn,
Zone::Dkg,
DKG_CONFIRMATION_SHARES,
spec.n(),
[0; 32],
attempt,
shares.to_vec(),
&signed,
) {
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
let preprocesses = read_known_to_exist_data::<D, _>(
&txn,
spec,
key,
DKG_CONFIRMATION_NONCES,
[0; 32],
spec.n(),
attempt,
vec![],
None,
);
let key_pair = TributaryDb::<D>::currently_completing_key_pair(&txn, genesis)
.unwrap_or_else(|| {
panic!(
"in DkgConfirmed handling, which happens after everyone {}",
"(including us) fires DkgConfirmed, yet no confirming key pair"
)
});
let Ok(sig) = DkgConfirmer::complete(spec, key, preprocesses, &key_pair, shares) else {
// TODO: Full slash
todo!();
};
publish_serai_tx(
spec.set(),
Serai::set_validator_set_keys(spec.set().network, key_pair, Signature(sig)),
)
.await;
}
}
@ -232,13 +482,14 @@ async fn handle_block<D: Db, Pro: Processors>(
Transaction::BatchPreprocess(data) => {
if let Some(preprocesses) = handle(
&mut txn,
Zone::Batch,
b"batch_preprocess",
spec.t(),
data.plan,
data.attempt,
data.data,
data.signed,
&data.signed,
) {
processors
.send(
@ -255,13 +506,14 @@ async fn handle_block<D: Db, Pro: Processors>(
}
Transaction::BatchShare(data) => {
if let Some(shares) = handle(
&mut txn,
Zone::Batch,
b"batch_share",
spec.t(),
data.plan,
data.attempt,
data.data,
data.signed,
&data.signed,
) {
processors
.send(
@ -280,13 +532,14 @@ async fn handle_block<D: Db, Pro: Processors>(
Transaction::SignPreprocess(data) => {
if let Some(preprocesses) = handle(
&mut txn,
Zone::Sign,
b"sign_preprocess",
spec.t(),
data.plan,
data.attempt,
data.data,
data.signed,
&data.signed,
) {
processors
.send(
@ -301,13 +554,14 @@ async fn handle_block<D: Db, Pro: Processors>(
}
Transaction::SignShare(data) => {
if let Some(shares) = handle(
&mut txn,
Zone::Sign,
b"sign_share",
spec.t(),
data.plan,
data.attempt,
data.data,
data.signed,
&data.signed,
) {
processors
.send(
@ -332,11 +586,17 @@ async fn handle_block<D: Db, Pro: Processors>(
// TODO: Trigger any necessary re-attempts
}
pub async fn handle_new_blocks<D: Db, Pro: Processors>(
pub async fn handle_new_blocks<
D: Db,
Pro: Processors,
F: Future<Output = ()>,
PST: Clone + Fn(ValidatorSet, Encoded) -> F,
>(
db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
recognized_id: &UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
processors: &Pro,
publish_serai_tx: PST,
spec: &TributarySpec,
tributary: &TributaryReader<D, Transaction>,
) {
@ -344,7 +604,7 @@ pub async fn handle_new_blocks<D: Db, Pro: Processors>(
let mut last_block = db.last_block(genesis);
while let Some(next) = tributary.block_after(&last_block) {
let block = tributary.block(&next).unwrap();
handle_block(db, key, recognized_id, processors, spec, block).await;
handle_block(db, key, recognized_id, processors, publish_serai_tx.clone(), spec, block).await;
last_block = next;
db.set_last_block(genesis, next);
}

View file

@ -13,17 +13,6 @@ On `validator_sets::pallet::Event::NewSet`, the coordinator spawns a tributary
for the new set. It additionally sends the processor
`key_gen::CoordinatorMessage::GenerateKey`.
## Generated Key Pair
On `key_gen::ProcessorMessage::GeneratedKeyPair`, a
`validator_sets::pallet::vote` transaction is made to vote in the new key.
The Serai blockchain needs to know the key pair in order for it to be able to
publish `Batch`s. Additionally, having the Serai blockchain confirm the keys
provides a BFT consensus guarantee. While the tributary itself could also offer
a BFT consensus guarantee, there's no point when we'd then get BFT consensus
on the Serai blockchain anyways.
## Key Generation Event
On `validator_sets::pallet::Event::KeyGen`, the coordinator sends

View file

@ -16,9 +16,35 @@ commitments.
### Key Gen Shares
`DkgShares` is created when a processor sends the coordinator
`key_gen::ProcessorMessage::Shares`. When all validators participating in
a multisig publish `DkgShares`, the coordinator sends the processor
`key_gen::CoordinatorMessage::Shares`, excluding the processor's own shares.
`key_gen::ProcessorMessage::Shares`. The coordinator additionally includes its
own pair of MuSig nonces, used in a signing protocol to inform Substrate of the
key's successful creation.
When all validators participating in a multisig publish `DkgShares`, the
coordinator sends the processor `key_gen::CoordinatorMessage::Shares`, excluding
the processor's own shares and the MuSig nonces.
### Key Gen Confirmation
`DkgConfirmed` is created when a processor sends the coordinator
`key_gen::ProcessorMessage::GeneratedKeyPair`. The coordinator takes the MuSig
nonces they prior associated with this DKG attempt and publishes their signature
share.
When all validators participating in the multisig publish `DkgConfirmed`, an
extrinsic calling `validator_sets::pallet::set_keys` is made to confirm the
keys.
Setting the keys on the Serai blockchain as such lets it receive `Batch`s,
provides a BFT consensus guarantee, and enables accessibility by users. While
the tributary itself could offer both the BFT consensus guarantee, and
verifiable accessibility to users, they'd both require users access the
tributary. Since Substrate must already know the resulting key, there's no value
to usage of the tributary as-such, as all desired properties are already offered
by Substrate.
Note that the keys are confirmed when Substrate emits a `KeyGen` event,
regardless of if the Tributary has the expected `DkgConfirmed` transactions.
### External Block

View file

@ -26,6 +26,7 @@ use subxt::{
pub use serai_runtime::primitives;
pub use primitives::{Signature, SeraiAddress};
pub use serai_runtime as runtime;
use serai_runtime::{
system::Config, support::traits::PalletInfo as PalletInfoTrait, PalletInfo, Runtime,
};

View file

@ -146,6 +146,7 @@ pub mod pallet {
// Confirm a key hasn't been set for this set instance
let set = ValidatorSet { session, network };
// TODO: Is this needed? validate_unsigned should be called before this and ensure it's Ok
Self::verify_signature(set, &key_pair, &signature)?;
Keys::<T>::set(set, Some(key_pair.clone()));