diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 04df8516..bc91fdd6 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -30,8 +30,7 @@ use tokio::{ }; use ::tributary::{ - ReadWrite, ProvidedError, TransactionKind, Transaction as TransactionTrait, Block, Tributary, - TributaryReader, + ReadWrite, ProvidedError, TransactionKind, TransactionTrait, Block, Tributary, TributaryReader, }; mod tributary; @@ -185,7 +184,7 @@ pub async fn scan_tributaries( } for (spec, reader) in &tributary_readers { - tributary::scanner::handle_new_blocks( + tributary::scanner::handle_new_blocks::<_, _, _, _, P>( &mut tributary_db, &key, &recognized_id_send, @@ -460,7 +459,7 @@ pub async fn handle_processors( if id.attempt != 0 { panic!("attempt wasn't 0"); } - let nonces = crate::tributary::scanner::dkg_confirmation_nonces(&key, &spec); + let nonces = crate::tributary::dkg_confirmation_nonces(&key, &spec); Some(Transaction::DkgShares { attempt: id.attempt, sender_i: my_i, @@ -478,7 +477,7 @@ pub async fn handle_processors( // Tell the Tributary the key pair, get back the share for the MuSig signature let mut txn = db.txn(); - let share = crate::tributary::scanner::generated_key_pair::( + let share = crate::tributary::generated_key_pair::( &mut txn, &key, &spec, diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 4711fb01..0b27d02e 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -20,7 +20,7 @@ use tokio::time::sleep; use serai_db::MemDb; -use tributary::{Transaction as TransactionTrait, Tributary}; +use tributary::Tributary; use crate::{ P2pMessageKind, P2p, diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index dad6266d..03aaeda0 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -20,7 +20,7 @@ use processor_messages::{ CoordinatorMessage, }; -use tributary::{Transaction as TransactionTrait, Tributary}; +use tributary::{TransactionTrait, Tributary}; use crate::{ tributary::{TributaryDb, Transaction, TributarySpec, scanner::handle_new_blocks}, @@ -84,7 +84,7 @@ async fn dkg_test() { let mut scanner_db = TributaryDb(MemDb::new()); let processors = MemProcessors::new(); // Uses a brand new channel since this channel won't be used within this test - handle_new_blocks( + handle_new_blocks::<_, _, _, _, LocalP2p>( &mut scanner_db, key, &mpsc::unbounded_channel().0, @@ -108,7 +108,7 @@ async fn dkg_test() { sleep(Duration::from_secs(Tributary::::block_time().into())).await; // Verify the scanner emits a KeyGen::Commitments message - handle_new_blocks( + handle_new_blocks::<_, _, _, _, LocalP2p>( &mut scanner_db, &keys[0], &mpsc::unbounded_channel().0, @@ -170,7 +170,7 @@ async fn dkg_test() { attempt, sender_i: Participant::new((k + 1).try_into().unwrap()).unwrap(), shares, - confirmation_nonces: crate::tributary::scanner::dkg_confirmation_nonces(key, &spec), + confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec), signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, spec.genesis(), key, 1); @@ -186,7 +186,7 @@ async fn dkg_test() { } // With just 4 sets of shares, nothing should happen yet - handle_new_blocks( + handle_new_blocks::<_, _, _, _, LocalP2p>( &mut scanner_db, &keys[0], &mpsc::unbounded_channel().0, @@ -227,7 +227,7 @@ async fn dkg_test() { }; // Any scanner which has handled the prior blocks should only emit the new event - handle_new_blocks( + handle_new_blocks::<_, _, _, _, LocalP2p>( &mut scanner_db, &keys[0], &mpsc::unbounded_channel().0, @@ -278,8 +278,7 @@ async fn dkg_test() { // albeit poor let mut txn = scanner_db.0.txn(); let share = - crate::tributary::scanner::generated_key_pair::(&mut txn, key, &spec, &key_pair) - .unwrap(); + crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair).unwrap(); txn.commit(); let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed()); @@ -295,7 +294,7 @@ async fn dkg_test() { } // The scanner should successfully try to publish a transaction with a validly signed signature - handle_new_blocks( + handle_new_blocks::<_, _, _, _, LocalP2p>( &mut scanner_db, &keys[0], &mpsc::unbounded_channel().0, diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs index 98e8f327..3030834d 100644 --- a/coordinator/src/tests/tributary/tx.rs +++ b/coordinator/src/tests/tributary/tx.rs @@ -6,7 +6,9 @@ use tokio::time::sleep; use serai_db::MemDb; -use tributary::{Transaction as TransactionTrait, Tributary}; +use tributary::{ + transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary, +}; use crate::{ tributary::Transaction, @@ -49,6 +51,6 @@ async fn tx_test() { // All tributaries should have acknowledged this transaction in a block for (_, tributary) in tributaries { let block = tributary.reader().block(&included_in).unwrap(); - assert_eq!(block.transactions, vec![tx.clone()]); + assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]); } } diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index 7320bfbb..648d76e3 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -31,6 +31,32 @@ impl TributaryDb { self.0.get(Self::block_key(genesis)).map(|last| last.try_into().unwrap()).unwrap_or(genesis) } + /* TODO + pub fn slash_point_key(genesis: [u8; 32], id: [u8; 32]) -> Vec { + Self::tributary_key(b"slash_point", [genesis, id].concat()) + } + */ + + pub fn slash_vote_key(genesis: [u8; 32], id: [u8; 13], target: [u8; 32]) -> Vec { + Self::tributary_key(b"slash_vote", [genesis.as_slice(), &id, &target].concat()) + } + + fn fatal_slash_key(genesis: [u8; 32]) -> Vec { + Self::tributary_key(b"fatal_slash", genesis) + } + pub fn set_fatally_slashed(txn: &mut D::Transaction<'_>, genesis: [u8; 32], id: [u8; 32]) { + let key = Self::fatal_slash_key(genesis); + let mut existing = txn.get(&key).unwrap_or(vec![]); + + // don't append if we already have it. + if existing.chunks(32).any(|ex_id| ex_id == id) { + return; + } + + existing.extend(id); + txn.put(key, existing); + } + fn plan_ids_key(genesis: &[u8], block: u64) -> Vec { Self::tributary_key(b"plan_ids", [genesis, block.to_le_bytes().as_ref()].concat()) } diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs new file mode 100644 index 00000000..ed033f73 --- /dev/null +++ b/coordinator/src/tributary/handle.rs @@ -0,0 +1,548 @@ +use core::{ops::Deref, future::Future}; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + +use transcript::{Transcript, RecommendedTranscript}; +use ciphersuite::{Ciphersuite, Ristretto}; +use frost::{ + FrostError, + dkg::{Participant, musig::musig}, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +use tokio::sync::mpsc::UnboundedSender; + +use serai_client::{ + Signature, + validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, + subxt::utils::Encoded, + Serai, +}; + +use tributary::Signed; + +use processor_messages::{ + CoordinatorMessage, coordinator, + key_gen::{self, KeyGenId}, + sign::{self, SignId}, +}; + +use serai_db::{Get, Db}; + +use crate::processors::Processors; +use super::{Transaction, TributarySpec, TributaryDb, scanner::RecognizedIdType}; + +const DKG_CONFIRMATION_NONCES: &[u8] = b"dkg_confirmation_nonces"; +const DKG_CONFIRMATION_SHARES: &[u8] = b"dkg_confirmation_shares"; + +// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call. +// This simplifies data flow and prevents requiring multiple paths. +// While more expensive, this only runs an O(n) algorithm, which is tolerable to run multiple +// times. +struct DkgConfirmer; +impl DkgConfirmer { + fn preprocess_internal( + spec: &TributarySpec, + key: &Zeroizing<::F>, + ) -> (AlgorithmSignMachine, [u8; 64]) { + // TODO: Does Substrate already have a validator-uniqueness check? + let validators = spec.validators().iter().map(|val| val.0).collect::>(); + + let context = musig_context(spec.set()); + let mut chacha = ChaCha20Rng::from_seed({ + let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); + entropy_transcript.append_message(b"spec", spec.serialize()); + entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); + // TODO: This is incredibly insecure unless message-bound (or bound via the attempt) + Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") + }); + let (machine, preprocess) = AlgorithmMachine::new( + Schnorrkel::new(b"substrate"), + musig(&context, key, &validators) + .expect("confirming the DKG for a set we aren't in/validator present multiple times") + .into(), + ) + .preprocess(&mut chacha); + + (machine, preprocess.serialize().try_into().unwrap()) + } + // Get the preprocess for this confirmation. + fn preprocess(spec: &TributarySpec, key: &Zeroizing<::F>) -> [u8; 64] { + Self::preprocess_internal(spec, key).1 + } + + fn share_internal( + spec: &TributarySpec, + key: &Zeroizing<::F>, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let machine = Self::preprocess_internal(spec, key).0; + let preprocesses = preprocesses + .into_iter() + .map(|(p, preprocess)| { + machine + .read_preprocess(&mut preprocess.as_slice()) + .map(|preprocess| (p, preprocess)) + .map_err(|_| p) + }) + .collect::, _>>()?; + let (machine, share) = machine + .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) + .map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok((machine, share.serialize().try_into().unwrap())) + } + // Get the share for this confirmation, if the preprocesses are valid. + fn share( + spec: &TributarySpec, + key: &Zeroizing<::F>, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<[u8; 32], Participant> { + Self::share_internal(spec, key, preprocesses, key_pair).map(|(_, share)| share) + } + + fn complete( + spec: &TributarySpec, + key: &Zeroizing<::F>, + preprocesses: HashMap>, + key_pair: &KeyPair, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let machine = Self::share_internal(spec, key, preprocesses, key_pair) + .expect("trying to complete a machine which failed to preprocess") + .0; + + let shares = shares + .into_iter() + .map(|(p, share)| { + machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) + }) + .collect::, _>>()?; + let signature = machine.complete(shares).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok(signature.to_bytes()) + } +} + +#[allow(clippy::too_many_arguments)] // TODO +fn read_known_to_exist_data( + getter: &G, + spec: &TributarySpec, + key: &Zeroizing<::F>, + label: &'static [u8], + id: [u8; 32], + needed: u16, + attempt: u32, + bytes: Vec, + signed: Option<&Signed>, +) -> HashMap> { + let mut data = HashMap::new(); + for validator in spec.validators().iter().map(|validator| validator.0) { + data.insert( + spec.i(validator).unwrap(), + if Some(&validator) == signed.map(|signed| &signed.signer) { + bytes.clone() + } else if let Some(data) = + TributaryDb::::data(label, getter, spec.genesis(), id, attempt, validator) + { + data + } else { + continue; + }, + ); + } + assert_eq!(data.len(), usize::from(needed)); + + // Remove our own piece of data + assert!(data + .remove( + &spec + .i(Ristretto::generator() * key.deref()) + .expect("handling a message for a Tributary we aren't part of") + ) + .is_some()); + + data +} + +pub fn dkg_confirmation_nonces( + key: &Zeroizing<::F>, + spec: &TributarySpec, +) -> [u8; 64] { + DkgConfirmer::preprocess(spec, key) +} + +#[allow(clippy::needless_pass_by_ref_mut)] +pub fn generated_key_pair( + txn: &mut D::Transaction<'_>, + key: &Zeroizing<::F>, + spec: &TributarySpec, + key_pair: &KeyPair, +) -> Result<[u8; 32], Participant> { + TributaryDb::::save_currently_completing_key_pair(txn, spec.genesis(), key_pair); + + let attempt = 0; // TODO + let preprocesses = read_known_to_exist_data::( + txn, + spec, + key, + DKG_CONFIRMATION_NONCES, + [0; 32], + spec.n(), + attempt, + vec![], + None, + ); + DkgConfirmer::share(spec, key, preprocesses, key_pair) +} + +#[allow(clippy::too_many_arguments)] // TODO +pub async fn handle_application_tx< + D: Db, + Pro: Processors, + F: Future, + PST: Clone + Fn(ValidatorSet, Encoded) -> F, +>( + tx: Transaction, + spec: &TributarySpec, + processors: &Pro, + publish_serai_tx: PST, + genesis: [u8; 32], + key: &Zeroizing<::F>, + recognized_id: &UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>, + txn: &mut ::Transaction<'_>, +) { + // Used to determine if an ID is acceptable + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + enum Zone { + Dkg, + Batch, + Sign, + } + + impl Zone { + fn label(&self) -> &'static str { + match self { + Zone::Dkg => { + panic!("getting the label for dkg despite dkg code paths not needing a label") + } + Zone::Batch => "batch", + Zone::Sign => "sign", + } + } + } + + let handle = + |txn: &mut _, zone: Zone, label, needed, id, attempt, bytes: Vec, signed: &Signed| { + if zone == Zone::Dkg { + // Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32] + assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID"); + } else if !TributaryDb::::recognized_id(txn, zone.label(), genesis, id) { + // TODO: Full slash + todo!(); + } + + // If they've already published a TX for this attempt, slash + if let Some(data) = TributaryDb::::data(label, txn, genesis, id, attempt, signed.signer) { + if data != bytes { + // TODO: Full slash + todo!(); + } + + // TODO: Slash + return None; + } + + // If the attempt is lesser than the blockchain's, slash + let curr_attempt = TributaryDb::::attempt(txn, genesis, id); + if attempt < curr_attempt { + // TODO: Slash for being late + return None; + } + if attempt > curr_attempt { + // TODO: Full slash + todo!(); + } + + // TODO: We can also full slash if shares before all commitments, or share before the + // necessary preprocesses + + // TODO: If this is shares, we need to check they are part of the selected signing set + + // Store this data + let received = + TributaryDb::::set_data(label, txn, genesis, id, attempt, signed.signer, &bytes); + + // If we have all the needed commitments/preprocesses/shares, tell the processor + // TODO: This needs to be coded by weight, not by validator count + if received == needed { + return Some(read_known_to_exist_data::( + txn, + spec, + key, + label, + id, + needed, + attempt, + bytes, + Some(signed), + )); + } + None + }; + + match tx { + Transaction::DkgCommitments(attempt, bytes, signed) => { + if let Some(commitments) = + handle(txn, Zone::Dkg, b"dkg_commitments", spec.n(), [0; 32], attempt, bytes, &signed) + { + log::info!("got all DkgCommitments for {}", hex::encode(genesis)); + processors + .send( + spec.set().network, + CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { + id: KeyGenId { set: spec.set(), attempt }, + commitments, + }), + ) + .await; + } + } + + Transaction::DkgShares { attempt, sender_i, mut shares, confirmation_nonces, signed } => { + if sender_i != + spec + .i(signed.signer) + .expect("transaction added to tributary by signer who isn't a participant") + { + // TODO: Full slash + todo!(); + } + + if shares.len() != (usize::from(spec.n()) - 1) { + // TODO: Full slash + todo!(); + } + + // Only save our share's bytes + let our_i = spec + .i(Ristretto::generator() * key.deref()) + .expect("in a tributary we're not a validator for"); + // This unwrap is safe since the length of shares is checked, the the only missing key + // within the valid range will be the sender's i + let bytes = if sender_i == our_i { vec![] } else { shares.remove(&our_i).unwrap() }; + + let confirmation_nonces = handle( + txn, + Zone::Dkg, + DKG_CONFIRMATION_NONCES, + spec.n(), + [0; 32], + attempt, + confirmation_nonces.to_vec(), + &signed, + ); + if let Some(shares) = + handle(txn, Zone::Dkg, b"dkg_shares", spec.n(), [0; 32], attempt, bytes, &signed) + { + log::info!("got all DkgShares for {}", hex::encode(genesis)); + assert!(confirmation_nonces.is_some()); + processors + .send( + spec.set().network, + CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { + id: KeyGenId { set: spec.set(), attempt }, + shares, + }), + ) + .await; + } else { + assert!(confirmation_nonces.is_none()); + } + } + + Transaction::DkgConfirmed(attempt, shares, signed) => { + if let Some(shares) = handle( + txn, + Zone::Dkg, + DKG_CONFIRMATION_SHARES, + spec.n(), + [0; 32], + attempt, + shares.to_vec(), + &signed, + ) { + log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); + + let preprocesses = read_known_to_exist_data::( + txn, + spec, + key, + DKG_CONFIRMATION_NONCES, + [0; 32], + spec.n(), + attempt, + vec![], + None, + ); + + let key_pair = TributaryDb::::currently_completing_key_pair(txn, genesis) + .unwrap_or_else(|| { + panic!( + "in DkgConfirmed handling, which happens after everyone {}", + "(including us) fires DkgConfirmed, yet no confirming key pair" + ) + }); + let Ok(sig) = DkgConfirmer::complete(spec, key, preprocesses, &key_pair, shares) else { + // TODO: Full slash + todo!(); + }; + + publish_serai_tx( + spec.set(), + Serai::set_validator_set_keys(spec.set().network, key_pair, Signature(sig)), + ) + .await; + } + } + + Transaction::ExternalBlock(block) => { + // Because this external block has been finalized, its batch ID should be authorized + TributaryDb::::recognize_id(txn, Zone::Batch.label(), genesis, block); + recognized_id + .send((genesis, RecognizedIdType::Block, block)) + .expect("recognized_id_recv was dropped. are we shutting down?"); + } + + Transaction::SubstrateBlock(block) => { + let plan_ids = TributaryDb::::plan_ids(txn, genesis, block).expect( + "synced a tributary block finalizing a substrate block in a provided transaction \ + despite us not providing that transaction", + ); + + for id in plan_ids { + TributaryDb::::recognize_id(txn, Zone::Sign.label(), genesis, id); + recognized_id + .send((genesis, RecognizedIdType::Plan, id)) + .expect("recognized_id_recv was dropped. are we shutting down?"); + } + } + + Transaction::BatchPreprocess(data) => { + if let Some(preprocesses) = handle( + txn, + Zone::Batch, + b"batch_preprocess", + spec.t(), + data.plan, + data.attempt, + data.data, + &data.signed, + ) { + processors + .send( + spec.set().network, + CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchPreprocesses { + id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, + preprocesses, + }), + ) + .await; + } + } + Transaction::BatchShare(data) => { + if let Some(shares) = handle( + txn, + Zone::Batch, + b"batch_share", + spec.t(), + data.plan, + data.attempt, + data.data, + &data.signed, + ) { + processors + .send( + spec.set().network, + CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchShares { + id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, + shares: shares + .drain() + .map(|(validator, share)| (validator, share.try_into().unwrap())) + .collect(), + }), + ) + .await; + } + } + + Transaction::SignPreprocess(data) => { + if let Some(preprocesses) = handle( + txn, + Zone::Sign, + b"sign_preprocess", + spec.t(), + data.plan, + data.attempt, + data.data, + &data.signed, + ) { + processors + .send( + spec.set().network, + CoordinatorMessage::Sign(sign::CoordinatorMessage::Preprocesses { + id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, + preprocesses, + }), + ) + .await; + } + } + Transaction::SignShare(data) => { + if let Some(shares) = handle( + txn, + Zone::Sign, + b"sign_share", + spec.t(), + data.plan, + data.attempt, + data.data, + &data.signed, + ) { + processors + .send( + spec.set().network, + CoordinatorMessage::Sign(sign::CoordinatorMessage::Shares { + id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, + shares, + }), + ) + .await; + } + } + Transaction::SignCompleted(_, _, _) => todo!(), + } +} diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index 4df750f3..b9ab5c38 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -26,12 +26,16 @@ use serai_client::{ #[rustfmt::skip] use tributary::{ - ReadWrite, Signed, TransactionError, TransactionKind, Transaction as TransactionTrait, + ReadWrite, + transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait} }; mod db; pub use db::*; +mod handle; +pub use handle::*; + pub mod scanner; #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index 40c9a640..497300bf 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,227 +1,31 @@ -use core::{ops::Deref, future::Future}; -use std::collections::HashMap; +use core::future::Future; use zeroize::Zeroizing; -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::{Ciphersuite, Ristretto}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; -use serai_client::{ - Signature, - validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, - subxt::utils::Encoded, - Serai, -}; +use serai_client::{validator_sets::primitives::ValidatorSet, subxt::utils::Encoded}; use tokio::sync::mpsc::UnboundedSender; -use tributary::{Signed, Block, TributaryReader}; - -use processor_messages::{ - key_gen::{self, KeyGenId}, - sign::{self, SignId}, - coordinator, CoordinatorMessage, +use tributary::{ + Transaction as TributaryTransaction, Block, TributaryReader, + tendermint::{ + tx::{TendermintTx, decode_evidence}, + TendermintNetwork, + }, }; use serai_db::{Get, DbTxn}; use crate::{ Db, + tributary::handle::handle_application_tx, processors::Processors, tributary::{TributaryDb, TributarySpec, Transaction}, + P2p, }; -const DKG_CONFIRMATION_NONCES: &[u8] = b"dkg_confirmation_nonces"; -const DKG_CONFIRMATION_SHARES: &[u8] = b"dkg_confirmation_shares"; - -// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call. -// This simplifies data flow and prevents requiring multiple paths. -// While more expensive, this only runs an O(n) algorithm, which is tolerable to run multiple -// times. -struct DkgConfirmer; -impl DkgConfirmer { - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - ) -> (AlgorithmSignMachine, [u8; 64]) { - // TODO: Does Substrate already have a validator-uniqueness check? - let validators = spec.validators().iter().map(|val| val.0).collect::>(); - - let context = musig_context(spec.set()); - let mut chacha = ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - // TODO: This is incredibly insecure unless message-bound (or bound via the attempt) - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }); - let (machine, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - musig(&context, key, &validators) - .expect("confirming the DKG for a set we aren't in/validator present multiple times") - .into(), - ) - .preprocess(&mut chacha); - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - fn preprocess(spec: &TributarySpec, key: &Zeroizing<::F>) -> [u8; 64] { - Self::preprocess_internal(spec, key).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = Self::preprocess_internal(spec, key).0; - let preprocesses = preprocesses - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, preprocesses, key_pair).map(|(_, share)| share) - } - - fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - preprocesses: HashMap>, - key_pair: &KeyPair, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let machine = Self::share_internal(spec, key, preprocesses, key_pair) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = shares - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok(signature.to_bytes()) - } -} - -#[allow(clippy::too_many_arguments)] // TODO -fn read_known_to_exist_data( - getter: &G, - spec: &TributarySpec, - key: &Zeroizing<::F>, - label: &'static [u8], - id: [u8; 32], - needed: u16, - attempt: u32, - bytes: Vec, - signed: Option<&Signed>, -) -> HashMap> { - let mut data = HashMap::new(); - for validator in spec.validators().iter().map(|validator| validator.0) { - data.insert( - spec.i(validator).unwrap(), - if Some(&validator) == signed.map(|signed| &signed.signer) { - bytes.clone() - } else if let Some(data) = - TributaryDb::::data(label, getter, spec.genesis(), id, attempt, validator) - { - data - } else { - continue; - }, - ); - } - assert_eq!(data.len(), usize::from(needed)); - - // Remove our own piece of data - assert!(data - .remove( - &spec - .i(Ristretto::generator() * key.deref()) - .expect("handling a message for a Tributary we aren't part of") - ) - .is_some()); - - data -} - -pub fn dkg_confirmation_nonces( - key: &Zeroizing<::F>, - spec: &TributarySpec, -) -> [u8; 64] { - DkgConfirmer::preprocess(spec, key) -} - -#[allow(clippy::needless_pass_by_ref_mut)] -pub fn generated_key_pair( - txn: &mut D::Transaction<'_>, - key: &Zeroizing<::F>, - spec: &TributarySpec, - key_pair: &KeyPair, -) -> Result<[u8; 32], Participant> { - TributaryDb::::save_currently_completing_key_pair(txn, spec.genesis(), key_pair); - - let attempt = 0; // TODO - let preprocesses = read_known_to_exist_data::( - txn, - spec, - key, - DKG_CONFIRMATION_NONCES, - [0; 32], - spec.n(), - attempt, - vec![], - None, - ); - DkgConfirmer::share(spec, key, preprocesses, key_pair) -} - #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum RecognizedIdType { Block, @@ -234,7 +38,8 @@ async fn handle_block< D: Db, Pro: Processors, F: Future, - PST: Fn(ValidatorSet, Encoded) -> F, + PST: Clone + Fn(ValidatorSet, Encoded) -> F, + P: P2p, >( db: &mut TributaryDb, key: &Zeroizing<::F>, @@ -252,334 +57,57 @@ async fn handle_block< let mut event_id = 0; #[allow(clippy::explicit_counter_loop)] // event_id isn't TX index. It just currently lines up for tx in block.transactions { - if !TributaryDb::::handled_event(&db.0, hash, event_id) { - let mut txn = db.0.txn(); - - // Used to determine if an ID is acceptable - #[derive(Clone, Copy, PartialEq, Eq, Debug)] - enum Zone { - Dkg, - Batch, - Sign, - } - - impl Zone { - fn label(&self) -> &'static str { - match self { - Zone::Dkg => { - panic!("getting the label for dkg despite dkg code paths not needing a label") - } - Zone::Batch => "batch", - Zone::Sign => "sign", - } - } - } - - let handle = - |txn: &mut _, zone: Zone, label, needed, id, attempt, bytes: Vec, signed: &Signed| { - if zone == Zone::Dkg { - // Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32] - assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID"); - } else if !TributaryDb::::recognized_id(txn, zone.label(), genesis, id) { - // TODO: Full slash - todo!(); - } - - // If they've already published a TX for this attempt, slash - if let Some(data) = - TributaryDb::::data(label, txn, genesis, id, attempt, signed.signer) - { - if data != bytes { - // TODO: Full slash - todo!(); - } - - // TODO: Slash - return None; - } - - // If the attempt is lesser than the blockchain's, slash - let curr_attempt = TributaryDb::::attempt(txn, genesis, id); - if attempt < curr_attempt { - // TODO: Slash for being late - return None; - } - if attempt > curr_attempt { - // TODO: Full slash - todo!(); - } - - // TODO: We can also full slash if shares before all commitments, or share before the - // necessary preprocesses - - // TODO: If this is shares, we need to check they are part of the selected signing set - - // Store this data - let received = - TributaryDb::::set_data(label, txn, genesis, id, attempt, signed.signer, &bytes); - - // If we have all the needed commitments/preprocesses/shares, tell the processor - // TODO: This needs to be coded by weight, not by validator count - if received == needed { - return Some(read_known_to_exist_data::( - txn, - spec, - key, - label, - id, - needed, - attempt, - bytes, - Some(signed), - )); - } - None - }; - - match tx { - Transaction::DkgCommitments(attempt, bytes, signed) => { - if let Some(commitments) = handle( - &mut txn, - Zone::Dkg, - b"dkg_commitments", - spec.n(), - [0; 32], - attempt, - bytes, - &signed, - ) { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - processors - .send( - spec.set().network, - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { set: spec.set(), attempt }, - commitments, - }), - ) - .await; - } - } - - Transaction::DkgShares { attempt, sender_i, mut shares, confirmation_nonces, signed } => { - if sender_i != - spec - .i(signed.signer) - .expect("transaction added to tributary by signer who isn't a participant") - { - // TODO: Full slash - todo!(); - } - - if shares.len() != (usize::from(spec.n()) - 1) { - // TODO: Full slash - todo!(); - } - - // Only save our share's bytes - let our_i = spec - .i(Ristretto::generator() * key.deref()) - .expect("in a tributary we're not a validator for"); - // This unwrap is safe since the length of shares is checked, the the only missing key - // within the valid range will be the sender's i - let bytes = if sender_i == our_i { vec![] } else { shares.remove(&our_i).unwrap() }; - - let confirmation_nonces = handle( - &mut txn, - Zone::Dkg, - DKG_CONFIRMATION_NONCES, - spec.n(), - [0; 32], - attempt, - confirmation_nonces.to_vec(), - &signed, - ); - if let Some(shares) = - handle(&mut txn, Zone::Dkg, b"dkg_shares", spec.n(), [0; 32], attempt, bytes, &signed) - { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - assert!(confirmation_nonces.is_some()); - processors - .send( - spec.set().network, - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { - id: KeyGenId { set: spec.set(), attempt }, - shares, - }), - ) - .await; - } else { - assert!(confirmation_nonces.is_none()); - } - } - - Transaction::DkgConfirmed(attempt, shares, signed) => { - if let Some(shares) = handle( - &mut txn, - Zone::Dkg, - DKG_CONFIRMATION_SHARES, - spec.n(), - [0; 32], - attempt, - shares.to_vec(), - &signed, - ) { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let preprocesses = read_known_to_exist_data::( - &txn, - spec, - key, - DKG_CONFIRMATION_NONCES, - [0; 32], - spec.n(), - attempt, - vec![], - None, - ); - - let key_pair = TributaryDb::::currently_completing_key_pair(&txn, genesis) - .unwrap_or_else(|| { - panic!( - "in DkgConfirmed handling, which happens after everyone {}", - "(including us) fires DkgConfirmed, yet no confirming key pair" - ) - }); - let Ok(sig) = DkgConfirmer::complete(spec, key, preprocesses, &key_pair, shares) else { - // TODO: Full slash - todo!(); - }; - - publish_serai_tx( - spec.set(), - Serai::set_validator_set_keys(spec.set().network, key_pair, Signature(sig)), - ) - .await; - } - } - - Transaction::ExternalBlock(block) => { - // Because this external block has been finalized, its batch ID should be authorized - TributaryDb::::recognize_id(&mut txn, Zone::Batch.label(), genesis, block); - recognized_id - .send((genesis, RecognizedIdType::Block, block)) - .expect("recognized_id_recv was dropped. are we shutting down?"); - } - - Transaction::SubstrateBlock(block) => { - let plan_ids = TributaryDb::::plan_ids(&txn, genesis, block).expect( - "synced a tributary block finalizing a substrate block in a provided transaction \ - despite us not providing that transaction", - ); - - for id in plan_ids { - TributaryDb::::recognize_id(&mut txn, Zone::Sign.label(), genesis, id); - recognized_id - .send((genesis, RecognizedIdType::Plan, id)) - .expect("recognized_id_recv was dropped. are we shutting down?"); - } - } - - Transaction::BatchPreprocess(data) => { - if let Some(preprocesses) = handle( - &mut txn, - Zone::Batch, - b"batch_preprocess", - spec.t(), - data.plan, - data.attempt, - data.data, - &data.signed, - ) { - processors - .send( - spec.set().network, - CoordinatorMessage::Coordinator( - coordinator::CoordinatorMessage::BatchPreprocesses { - id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, - preprocesses, - }, - ), - ) - .await; - } - } - Transaction::BatchShare(data) => { - if let Some(shares) = handle( - &mut txn, - Zone::Batch, - b"batch_share", - spec.t(), - data.plan, - data.attempt, - data.data, - &data.signed, - ) { - processors - .send( - spec.set().network, - CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchShares { - id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, - shares: shares - .drain() - .map(|(validator, share)| (validator, share.try_into().unwrap())) - .collect(), - }), - ) - .await; - } - } - - Transaction::SignPreprocess(data) => { - if let Some(preprocesses) = handle( - &mut txn, - Zone::Sign, - b"sign_preprocess", - spec.t(), - data.plan, - data.attempt, - data.data, - &data.signed, - ) { - processors - .send( - spec.set().network, - CoordinatorMessage::Sign(sign::CoordinatorMessage::Preprocesses { - id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, - preprocesses, - }), - ) - .await; - } - } - Transaction::SignShare(data) => { - if let Some(shares) = handle( - &mut txn, - Zone::Sign, - b"sign_share", - spec.t(), - data.plan, - data.attempt, - data.data, - &data.signed, - ) { - processors - .send( - spec.set().network, - CoordinatorMessage::Sign(sign::CoordinatorMessage::Shares { - id: SignId { key: todo!(), id: data.plan, attempt: data.attempt }, - shares, - }), - ) - .await; - } - } - Transaction::SignCompleted(_, _, _) => todo!(), - } - - TributaryDb::::handle_event(&mut txn, hash, event_id); - txn.commit(); + if TributaryDb::::handled_event(&db.0, hash, event_id) { + event_id += 1; + continue; } + + let mut txn = db.0.txn(); + + match tx { + TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { + // Since the evidence is on the chain, it should already have been validated + // We can just punish the signer + let msgs = decode_evidence::>(&ev).unwrap(); + + // Since anything with evidence is fundamentally faulty behavior, not just temporal errors, + // mark the node as fatally slashed + TributaryDb::::set_fatally_slashed(&mut txn, genesis, msgs.0.msg.sender); + + // TODO: disconnect the node from network/ban from further participation in Tributary + } + TributaryTransaction::Tendermint(TendermintTx::SlashVote(vote)) => { + // TODO: make sure same signer doesn't vote twice + + // increment the counter for this vote + let vote_key = TributaryDb::::slash_vote_key(genesis, vote.id, vote.target); + let mut count = txn.get(&vote_key).map_or(0, |c| u32::from_le_bytes(c.try_into().unwrap())); + count += 1; // TODO: Increase by weight, not by 1 + txn.put(vote_key, count.to_le_bytes()); + + // TODO: Check if a supermajority of validators, by weight, voted, and increment slash + // points if so + // If a node has a certain number more than the median slash points, the node should be + // removed + } + TributaryTransaction::Application(tx) => { + handle_application_tx::( + tx, + spec, + processors, + publish_serai_tx.clone(), + genesis, + key, + recognized_id, + &mut txn, + ) + .await; + } + } + + TributaryDb::::handle_event(&mut txn, hash, event_id); + txn.commit(); + event_id += 1; } @@ -591,6 +119,7 @@ pub async fn handle_new_blocks< Pro: Processors, F: Future, PST: Clone + Fn(ValidatorSet, Encoded) -> F, + P: P2p, >( db: &mut TributaryDb, key: &Zeroizing<::F>, @@ -604,7 +133,16 @@ pub async fn handle_new_blocks< let mut last_block = db.last_block(genesis); while let Some(next) = tributary.block_after(&last_block) { let block = tributary.block(&next).unwrap(); - handle_block(db, key, recognized_id, processors, publish_serai_tx.clone(), spec, block).await; + handle_block::<_, _, _, _, P>( + db, + key, + recognized_id, + processors, + publish_serai_tx.clone(), + spec, + block, + ) + .await; last_block = next; db.set_last_block(genesis, next); } diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index cc3bd24f..b4eb4935 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -34,5 +34,8 @@ tendermint = { package = "tendermint-machine", path = "./tendermint" } tokio = { version = "1", features = ["sync", "time", "rt"] } +[dev-dependencies] +tokio = { version = "1", features = ["macros"] } + [features] tests = [] diff --git a/coordinator/tributary/src/block.rs b/coordinator/tributary/src/block.rs index 64a398ab..4e0c5050 100644 --- a/coordinator/tributary/src/block.rs +++ b/coordinator/tributary/src/block.rs @@ -1,6 +1,6 @@ use std::{ io, - collections::{VecDeque, HashMap}, + collections::{VecDeque, HashSet, HashMap}, }; use thiserror::Error; @@ -9,6 +9,16 @@ use blake2::{Digest, Blake2s256}; use ciphersuite::{Ciphersuite, Ristretto}; +use tendermint::ext::{Network, Commit}; + +use crate::{ + transaction::{ + TransactionError, Signed, TransactionKind, Transaction as TransactionTrait, verify_transaction, + }, + BLOCK_SIZE_LIMIT, ReadWrite, merkle, Transaction, + tendermint::tx::verify_tendermint_tx, +}; + #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum BlockError { /// Block was too large. @@ -20,9 +30,12 @@ pub enum BlockError { /// Header specified an invalid transactions merkle tree hash. #[error("header transactions hash is incorrect")] InvalidTransactions, - /// A provided transaction was placed after a non-provided transaction. - #[error("a provided transaction was included after a non-provided transaction")] - ProvidedAfterNonProvided, + /// An unsigned transaction which was already added to the chain was present again. + #[error("an unsigned transaction which was already added to the chain was present again")] + UnsignedAlreadyIncluded, + /// Transactions weren't ordered as expected (Provided, followed by Unsigned, folowed by Signed). + #[error("transactions weren't ordered as expected (Provided, Unsigned, Signed)")] + WrongTransactionOrder, /// The block had a provided transaction this validator has yet to be provided. #[error("block had a provided transaction not yet locally provided: {0:?}")] NonLocalProvided([u8; 32]), @@ -34,11 +47,6 @@ pub enum BlockError { TransactionError(TransactionError), } -use crate::{ - BLOCK_SIZE_LIMIT, ReadWrite, TransactionError, Signed, TransactionKind, Transaction, merkle, - verify_transaction, -}; - #[derive(Clone, PartialEq, Eq, Debug)] pub struct BlockHeader { pub parent: [u8; 32], @@ -66,12 +74,12 @@ impl BlockHeader { } #[derive(Clone, PartialEq, Eq, Debug)] -pub struct Block { +pub struct Block { pub header: BlockHeader, - pub transactions: Vec, + pub transactions: Vec>, } -impl ReadWrite for Block { +impl ReadWrite for Block { fn read(reader: &mut R) -> io::Result { let header = BlockHeader::read(reader)?; @@ -81,7 +89,7 @@ impl ReadWrite for Block { let mut transactions = Vec::with_capacity(usize::try_from(txs).unwrap()); for _ in 0 .. txs { - transactions.push(T::read(reader)?); + transactions.push(Transaction::read(reader)?); } Ok(Block { header, transactions }) @@ -97,22 +105,33 @@ impl ReadWrite for Block { } } -impl Block { +impl Block { /// Create a new block. /// - /// mempool is expected to only have valid, non-conflicting transactions. - pub(crate) fn new(parent: [u8; 32], provided: Vec, mempool: Vec) -> Self { - let mut txs = provided; - for tx in mempool { - assert!( - !matches!(tx.kind(), TransactionKind::Provided(_)), - "provided transaction entered mempool" - ); - txs.push(tx); + /// mempool is expected to only have valid, non-conflicting transactions, sorted by nonce. + pub(crate) fn new(parent: [u8; 32], provided: Vec, mempool: Vec>) -> Self { + let mut txs = vec![]; + for tx in provided { + txs.push(Transaction::Application(tx)) } + let mut signed = vec![]; + let mut unsigned = vec![]; + for tx in mempool { + match tx.kind() { + TransactionKind::Signed(_) => signed.push(tx), + TransactionKind::Unsigned => unsigned.push(tx), + TransactionKind::Provided(_) => panic!("provided transaction entered mempool"), + } + } + + // unsigned first + txs.extend(unsigned); + // then signed + txs.extend(signed); + // Check TXs are sorted by nonce. - let nonce = |tx: &T| { + let nonce = |tx: &Transaction| { if let TransactionKind::Signed(Signed { nonce, .. }) = tx.kind() { *nonce } else { @@ -123,7 +142,7 @@ impl Block { for tx in &txs { let nonce = nonce(tx); if nonce < last { - panic!("failed to sort txs by nonce"); + panic!("TXs in mempool weren't ordered by nonce"); } last = nonce; } @@ -146,13 +165,33 @@ impl Block { self.header.hash() } - pub(crate) fn verify( + #[allow(clippy::too_many_arguments)] + pub(crate) fn verify( &self, genesis: [u8; 32], last_block: [u8; 32], mut locally_provided: HashMap<&'static str, VecDeque>, mut next_nonces: HashMap<::G, u32>, + schema: N::SignatureScheme, + commit: impl Fn(u32) -> Option>, + unsigned_in_chain: impl Fn([u8; 32]) -> bool, ) -> Result<(), BlockError> { + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + enum Order { + Provided, + Unsigned, + Signed, + } + impl From for u8 { + fn from(order: Order) -> u8 { + match order { + Order::Provided => 0, + Order::Unsigned => 1, + Order::Signed => 2, + } + } + } + if self.serialize().len() > BLOCK_SIZE_LIMIT { Err(BlockError::TooLargeBlock)?; } @@ -161,33 +200,66 @@ impl Block { Err(BlockError::InvalidParent)?; } - let mut found_non_provided = false; + let mut last_tx_order = Order::Provided; + let mut included_in_block = HashSet::new(); let mut txs = Vec::with_capacity(self.transactions.len()); for tx in self.transactions.iter() { - txs.push(tx.hash()); + let tx_hash = tx.hash(); + txs.push(tx_hash); - if let TransactionKind::Provided(order) = tx.kind() { - if found_non_provided { - Err(BlockError::ProvidedAfterNonProvided)?; + let current_tx_order = match tx.kind() { + TransactionKind::Provided(order) => { + let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) + else { + Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? + }; + // Since this was a provided TX, it must be an application TX + let Transaction::Application(tx) = tx else { + Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? + }; + if tx != &local { + Err(BlockError::DistinctProvided)?; + } + + Order::Provided } + TransactionKind::Unsigned => { + // check we don't already have the tx in the chain + if unsigned_in_chain(tx_hash) || included_in_block.contains(&tx_hash) { + Err(BlockError::UnsignedAlreadyIncluded)?; + } + included_in_block.insert(tx_hash); - let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) - else { - Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? - }; - if tx != &local { - Err(BlockError::DistinctProvided)?; + Order::Unsigned } + TransactionKind::Signed(..) => Order::Signed, + }; + // enforce Provided => Unsigned => Signed order + if u8::from(current_tx_order) < u8::from(last_tx_order) { + Err(BlockError::WrongTransactionOrder)?; + } + last_tx_order = current_tx_order; + + if current_tx_order == Order::Provided { // We don't need to call verify_transaction since we did when we locally provided this // transaction. Since it's identical, it must be valid continue; } - found_non_provided = true; - match verify_transaction(tx, genesis, &mut next_nonces) { - Ok(()) => {} - Err(e) => Err(BlockError::TransactionError(e))?, + // TODO: should we modify the verify_transaction to take `Transaction` or + // use this pattern of verifying tendermint Txs and app txs differently? + match tx { + Transaction::Tendermint(tx) => { + match verify_tendermint_tx::(tx, genesis, schema.clone(), &commit) { + Ok(()) => {} + Err(e) => Err(BlockError::TransactionError(e))?, + } + } + Transaction::Application(tx) => match verify_transaction(tx, genesis, &mut next_nonces) { + Ok(()) => {} + Err(e) => Err(BlockError::TransactionError(e))?, + }, } } diff --git a/coordinator/tributary/src/blockchain.rs b/coordinator/tributary/src/blockchain.rs index fc6d7df2..78c2ca2b 100644 --- a/coordinator/tributary/src/blockchain.rs +++ b/coordinator/tributary/src/blockchain.rs @@ -4,13 +4,17 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_db::{DbTxn, Db}; +use scale::Decode; + +use tendermint::ext::{Network, Commit}; + use crate::{ - ReadWrite, Signed, TransactionKind, Transaction, ProvidedError, ProvidedTransactions, BlockError, - Block, Mempool, + ReadWrite, ProvidedError, ProvidedTransactions, BlockError, Block, Mempool, Transaction, + transaction::{Signed, TransactionKind, Transaction as TransactionTrait}, }; #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Blockchain { +pub(crate) struct Blockchain { db: Option, genesis: [u8; 32], @@ -22,7 +26,7 @@ pub(crate) struct Blockchain { mempool: Mempool, } -impl Blockchain { +impl Blockchain { fn tip_key(&self) -> Vec { D::key(b"tributary_blockchain", b"tip", self.genesis) } @@ -32,12 +36,18 @@ impl Blockchain { fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"block", [genesis, hash].concat()) } + fn block_hash_key(genesis: &[u8], block_number: u32) -> Vec { + D::key(b"tributary_blockchain", b"block_hash", [genesis, &block_number.to_le_bytes()].concat()) + } fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"commit", [genesis, hash].concat()) } fn block_after_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"block_after", [genesis, hash].concat()) } + fn unsigned_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { + D::key(b"tributary_blockchain", b"unsigned_included", [genesis, hash].concat()) + } fn next_nonce_key(&self, signer: &::G) -> Vec { D::key( b"tributary_blockchain", @@ -102,16 +112,46 @@ impl Blockchain { db.get(Self::commit_key(&genesis, block)) } + pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u32) -> Option<[u8; 32]> { + db.get(Self::block_hash_key(&genesis, block)).map(|h| h.try_into().unwrap()) + } + pub(crate) fn commit(&self, block: &[u8; 32]) -> Option> { Self::commit_from_db(self.db.as_ref().unwrap(), self.genesis, block) } + pub(crate) fn block_hash(&self, block: u32) -> Option<[u8; 32]> { + Self::block_hash_from_db(self.db.as_ref().unwrap(), self.genesis, block) + } + + pub(crate) fn commit_by_block_number(&self, block: u32) -> Option> { + self.commit(&self.block_hash(block)?) + } + pub(crate) fn block_after(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<[u8; 32]> { db.get(Self::block_after_key(&genesis, block)).map(|bytes| bytes.try_into().unwrap()) } - pub(crate) fn add_transaction(&mut self, internal: bool, tx: T) -> bool { - self.mempool.add(&self.next_nonces, internal, tx) + pub(crate) fn add_transaction( + &mut self, + internal: bool, + tx: Transaction, + schema: N::SignatureScheme, + ) -> bool { + let db = self.db.as_ref().unwrap(); + let genesis = self.genesis; + + let commit = |block: u32| -> Option> { + let hash = Self::block_hash_from_db(db, genesis, block)?; + // we must have a commit per valid hash + let commit = Self::commit_from_db(db, genesis, &hash).unwrap(); + // commit has to be valid if it is coming from our db + Some(Commit::::decode(&mut commit.as_ref()).unwrap()) + }; + + let unsigned_in_chain = + |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); + self.mempool.add::(&self.next_nonces, internal, tx, schema, unsigned_in_chain, commit) } pub(crate) fn provide_transaction(&mut self, tx: T) -> Result<(), ProvidedError> { @@ -123,29 +163,53 @@ impl Blockchain { Some(self.next_nonces.get(&key).cloned()?.max(self.mempool.next_nonce(&key).unwrap_or(0))) } - pub(crate) fn build_block(&mut self) -> Block { + pub(crate) fn build_block(&mut self, schema: N::SignatureScheme) -> Block { + let db = self.db.as_ref().unwrap(); + let unsigned_in_chain = + |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); + let block = Block::new( self.tip, self.provided.transactions.values().flatten().cloned().collect(), - self.mempool.block(&self.next_nonces), + self.mempool.block(&self.next_nonces, unsigned_in_chain), ); // build_block should not return invalid blocks - self.verify_block(&block).unwrap(); + self.verify_block::(&block, schema).unwrap(); block } - pub(crate) fn verify_block(&self, block: &Block) -> Result<(), BlockError> { - block.verify( + pub(crate) fn verify_block( + &self, + block: &Block, + schema: N::SignatureScheme, + ) -> Result<(), BlockError> { + let db = self.db.as_ref().unwrap(); + let unsigned_in_chain = + |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); + let commit = |block: u32| -> Option> { + let commit = self.commit_by_block_number(block)?; + // commit has to be valid if it is coming from our db + Some(Commit::::decode(&mut commit.as_ref()).unwrap()) + }; + block.verify::( self.genesis, self.tip, self.provided.transactions.clone(), self.next_nonces.clone(), + schema, + &commit, + unsigned_in_chain, ) } /// Add a block. - pub(crate) fn add_block(&mut self, block: &Block, commit: Vec) -> Result<(), BlockError> { - self.verify_block(block)?; + pub(crate) fn add_block( + &mut self, + block: &Block, + commit: Vec, + schema: N::SignatureScheme, + ) -> Result<(), BlockError> { + self.verify_block::(block, schema)?; log::info!( "adding block {} to tributary {} with {} TXs", @@ -167,6 +231,8 @@ impl Blockchain { self.block_number += 1; txn.put(self.block_number_key(), self.block_number.to_le_bytes()); + txn.put(Self::block_hash_key(&self.genesis, self.block_number), self.tip); + txn.put(Self::block_key(&self.genesis, &self.tip), block.serialize()); txn.put(Self::commit_key(&self.genesis, &self.tip), commit); @@ -177,7 +243,13 @@ impl Blockchain { TransactionKind::Provided(order) => { self.provided.complete(&mut txn, order, tx.hash()); } - TransactionKind::Unsigned => {} + TransactionKind::Unsigned => { + let hash = tx.hash(); + // Save as included on chain + txn.put(Self::unsigned_included_key(&self.genesis, &hash), []); + // remove from the mempool + self.mempool.remove(&hash); + } TransactionKind::Signed(Signed { signer, nonce, .. }) => { let next_nonce = nonce + 1; let prev = self diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index cf554f97..b7767181 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -22,8 +22,10 @@ use tokio::sync::RwLock; mod merkle; pub(crate) use merkle::*; -mod transaction; -pub use transaction::*; +pub mod transaction; +pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}; + +use crate::tendermint::tx::TendermintTx; mod provided; pub(crate) use provided::*; @@ -38,7 +40,7 @@ pub(crate) use blockchain::*; mod mempool; pub(crate) use mempool::*; -mod tendermint; +pub mod tendermint; pub(crate) use crate::tendermint::*; #[cfg(any(test, feature = "tests"))] @@ -57,6 +59,59 @@ pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const BLOCK_MESSAGE: u8 = 1; pub(crate) const TRANSACTION_MESSAGE: u8 = 2; +#[allow(clippy::large_enum_variant)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Transaction { + Tendermint(TendermintTx), + Application(T), +} + +impl ReadWrite for Transaction { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let tx = TendermintTx::read(reader)?; + Ok(Transaction::Tendermint(tx)) + } + 1 => { + let tx = T::read(reader)?; + Ok(Transaction::Application(tx)) + } + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid transaction type")), + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Transaction::Tendermint(tx) => { + writer.write_all(&[0])?; + tx.write(writer) + } + Transaction::Application(tx) => { + writer.write_all(&[1])?; + tx.write(writer) + } + } + } +} + +impl Transaction { + pub fn hash(&self) -> [u8; 32] { + match self { + Transaction::Tendermint(tx) => tx.hash(), + Transaction::Application(tx) => tx.hash(), + } + } + + pub fn kind(&self) -> TransactionKind<'_> { + match self { + Transaction::Tendermint(tx) => tx.kind(), + Transaction::Application(tx) => tx.kind(), + } + } +} + /// An item which can be read and written. pub trait ReadWrite: Sized { fn read(reader: &mut R) -> io::Result; @@ -83,7 +138,7 @@ impl P2p for Arc

{ } #[derive(Clone)] -pub struct Tributary { +pub struct Tributary { db: D, genesis: [u8; 32], @@ -94,7 +149,7 @@ pub struct Tributary { messages: Arc>>>, } -impl Tributary { +impl Tributary { pub async fn new( db: D, genesis: [u8; 32], @@ -118,7 +173,9 @@ impl Tributary { } else { start_time }; - let proposal = TendermintBlock(blockchain.build_block().serialize()); + let proposal = TendermintBlock( + blockchain.build_block::>(validators.clone()).serialize(), + ); let blockchain = Arc::new(RwLock::new(blockchain)); let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; @@ -168,9 +225,14 @@ impl Tributary { // Safe to be &self since the only meaningful usage of self is self.network.blockchain which // successfully acquires its own write lock pub async fn add_transaction(&self, tx: T) -> bool { + let tx = Transaction::Application(tx); let mut to_broadcast = vec![TRANSACTION_MESSAGE]; tx.write(&mut to_broadcast).unwrap(); - let res = self.network.blockchain.write().await.add_transaction(true, tx); + let res = self.network.blockchain.write().await.add_transaction::>( + true, + tx, + self.network.signature_scheme(), + ); if res { self.network.p2p.broadcast(self.genesis, to_broadcast).await; } @@ -218,14 +280,19 @@ impl Tributary { pub async fn handle_message(&mut self, msg: &[u8]) -> bool { match msg.first() { Some(&TRANSACTION_MESSAGE) => { - let Ok(tx) = T::read::<&[u8]>(&mut &msg[1 ..]) else { + let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { log::error!("received invalid transaction message"); return false; }; // TODO: Sync mempools with fellow peers // Can we just rebroadcast transactions not included for at least two blocks? - let res = self.network.blockchain.write().await.add_transaction(false, tx); + let res = + self.network.blockchain.write().await.add_transaction::>( + false, + tx, + self.network.signature_scheme(), + ); log::debug!("received transaction message. valid new transaction: {res}"); res } @@ -261,8 +328,8 @@ impl Tributary { } #[derive(Clone)] -pub struct TributaryReader(D, [u8; 32], PhantomData); -impl TributaryReader { +pub struct TributaryReader(D, [u8; 32], PhantomData); +impl TributaryReader { pub fn genesis(&self) -> [u8; 32] { self.1 } diff --git a/coordinator/tributary/src/mempool.rs b/coordinator/tributary/src/mempool.rs index 6f67b2c5..7a7cdf37 100644 --- a/coordinator/tributary/src/mempool.rs +++ b/coordinator/tributary/src/mempool.rs @@ -4,18 +4,25 @@ use ciphersuite::{Ciphersuite, Ristretto}; use serai_db::{DbTxn, Db}; -use crate::{ACCOUNT_MEMPOOL_LIMIT, Signed, TransactionKind, Transaction, verify_transaction}; +use tendermint::ext::{Network, Commit}; + +use crate::{ + ACCOUNT_MEMPOOL_LIMIT, ReadWrite, + transaction::{Signed, TransactionKind, Transaction as TransactionTrait, verify_transaction}, + tendermint::tx::verify_tendermint_tx, + Transaction, +}; #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Mempool { +pub(crate) struct Mempool { db: D, genesis: [u8; 32], - txs: HashMap<[u8; 32], T>, + txs: HashMap<[u8; 32], Transaction>, next_nonces: HashMap<::G, u32>, } -impl Mempool { +impl Mempool { fn transaction_key(&self, hash: &[u8]) -> Vec { D::key(b"tributary_mempool", b"transaction", [self.genesis.as_ref(), hash].concat()) } @@ -23,90 +30,141 @@ impl Mempool { D::key(b"tributary_mempool", b"current", self.genesis) } + // save given tx to the mempool db + fn save_tx(&mut self, tx: Transaction) { + let tx_hash = tx.hash(); + let transaction_key = self.transaction_key(&tx_hash); + let current_mempool_key = self.current_mempool_key(); + let mut current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); + + let mut txn = self.db.txn(); + txn.put(transaction_key, tx.serialize()); + current_mempool.extend(tx_hash); + txn.put(current_mempool_key, current_mempool); + txn.commit(); + + self.txs.insert(tx_hash, tx); + } + + fn unsigned_already_exist( + &self, + hash: [u8; 32], + unsigned_in_chain: impl Fn([u8; 32]) -> bool, + ) -> bool { + unsigned_in_chain(hash) || self.txs.contains_key(&hash) + } + pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self { let mut res = Mempool { db, genesis, txs: HashMap::new(), next_nonces: HashMap::new() }; let current_mempool = res.db.get(res.current_mempool_key()).unwrap_or(vec![]); - let mut hash = [0; 32]; - let mut i = 0; - while i < current_mempool.len() { - hash.copy_from_slice(¤t_mempool[i .. (i + 32)]); - let tx = - T::read::<&[u8]>(&mut res.db.get(res.transaction_key(&hash)).unwrap().as_ref()).unwrap(); - match tx.kind() { - TransactionKind::Signed(Signed { signer, nonce, .. }) => { - if let Some(prev) = res.next_nonces.insert(*signer, nonce + 1) { - // These mempool additions should've been ordered - assert!(prev < *nonce); + for hash in current_mempool.chunks(32) { + let hash: [u8; 32] = hash.try_into().unwrap(); + let tx: Transaction = + Transaction::read::<&[u8]>(&mut res.db.get(res.transaction_key(&hash)).unwrap().as_ref()) + .unwrap(); + debug_assert_eq!(tx.hash(), hash); + + match tx { + Transaction::Tendermint(tx) => { + res.txs.insert(hash, Transaction::Tendermint(tx)); + } + Transaction::Application(tx) => { + match tx.kind() { + TransactionKind::Signed(Signed { signer, nonce, .. }) => { + if let Some(prev) = res.next_nonces.insert(*signer, nonce + 1) { + // These mempool additions should've been ordered + debug_assert!(prev < *nonce); + } + res.txs.insert(hash, Transaction::Application(tx)); + } + TransactionKind::Unsigned => { + res.txs.insert(hash, Transaction::Application(tx)); + } + _ => panic!("mempool database had a provided transaction"), } } - _ => panic!("mempool database had a non-signed transaction"), } - - debug_assert_eq!(tx.hash(), hash); - res.txs.insert(hash, tx); - i += 32; } res } /// Returns true if this is a valid, new transaction. - pub(crate) fn add( + pub(crate) fn add( &mut self, blockchain_next_nonces: &HashMap<::G, u32>, internal: bool, - tx: T, + tx: Transaction, + schema: N::SignatureScheme, + unsigned_in_chain: impl Fn([u8; 32]) -> bool, + commit: impl Fn(u32) -> Option>, ) -> bool { - match tx.kind() { - TransactionKind::Signed(Signed { signer, nonce, .. }) => { - // Get the nonce from the blockchain - let Some(blockchain_next_nonce) = blockchain_next_nonces.get(signer).cloned() else { - // Not a participant - return false; - }; + match &tx { + Transaction::Tendermint(tendermint_tx) => { + // All Tendermint transactions should be unsigned + assert_eq!(TransactionKind::Unsigned, tendermint_tx.kind()); - // If the blockchain's nonce is greater than the mempool's, use it - // Default to true so if the mempool hasn't tracked this nonce yet, it'll be inserted - let mut blockchain_is_greater = true; - if let Some(mempool_next_nonce) = self.next_nonces.get(signer) { - blockchain_is_greater = blockchain_next_nonce > *mempool_next_nonce; - } - - if blockchain_is_greater { - self.next_nonces.insert(*signer, blockchain_next_nonce); - } - - // If we have too many transactions from this sender, don't add this yet UNLESS we are - // this sender - if !internal && (nonce >= &(blockchain_next_nonce + ACCOUNT_MEMPOOL_LIMIT)) { + // check we have the tx in the pool/chain + if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { return false; } - if verify_transaction(&tx, self.genesis, &mut self.next_nonces).is_err() { + // verify the tx + if verify_tendermint_tx::(tendermint_tx, self.genesis, schema, commit).is_err() { return false; } - assert_eq!(self.next_nonces[signer], nonce + 1); - - let tx_hash = tx.hash(); - - let transaction_key = self.transaction_key(&tx_hash); - let current_mempool_key = self.current_mempool_key(); - let mut current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); - - let mut txn = self.db.txn(); - txn.put(transaction_key, tx.serialize()); - current_mempool.extend(tx_hash); - txn.put(current_mempool_key, current_mempool); - txn.commit(); - - self.txs.insert(tx_hash, tx); - - true } - _ => false, + Transaction::Application(app_tx) => { + match app_tx.kind() { + TransactionKind::Signed(Signed { signer, nonce, .. }) => { + // Get the nonce from the blockchain + let Some(blockchain_next_nonce) = blockchain_next_nonces.get(signer).cloned() else { + // Not a participant + return false; + }; + + // If the blockchain's nonce is greater than the mempool's, use it + // Default to true so if the mempool hasn't tracked this nonce yet, it'll be inserted + let mut blockchain_is_greater = true; + if let Some(mempool_next_nonce) = self.next_nonces.get(signer) { + blockchain_is_greater = blockchain_next_nonce > *mempool_next_nonce; + } + + if blockchain_is_greater { + self.next_nonces.insert(*signer, blockchain_next_nonce); + } + + // If we have too many transactions from this sender, don't add this yet UNLESS we are + // this sender + if !internal && (nonce >= &(blockchain_next_nonce + ACCOUNT_MEMPOOL_LIMIT)) { + return false; + } + + if verify_transaction(app_tx, self.genesis, &mut self.next_nonces).is_err() { + return false; + } + debug_assert_eq!(self.next_nonces[signer], nonce + 1); + } + TransactionKind::Unsigned => { + // check we have the tx in the pool/chain + if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { + return false; + } + + if app_tx.verify().is_err() { + return false; + } + } + TransactionKind::Provided(_) => return false, + } + } } + + // Save the TX to the pool + self.save_tx(tx); + true } // Returns None if the mempool doesn't have a nonce tracked. @@ -118,10 +176,13 @@ impl Mempool { pub(crate) fn block( &mut self, blockchain_next_nonces: &HashMap<::G, u32>, - ) -> Vec { - let mut res = vec![]; + unsigned_in_chain: impl Fn([u8; 32]) -> bool, + ) -> Vec> { + let mut unsigned = vec![]; + let mut signed = vec![]; for hash in self.txs.keys().cloned().collect::>() { let tx = &self.txs[&hash]; + // Verify this hasn't gone stale match tx.kind() { TransactionKind::Signed(Signed { signer, nonce, .. }) => { @@ -129,25 +190,35 @@ impl Mempool { self.remove(&hash); continue; } - } - _ => panic!("non-signed transaction entered mempool"), - } - // Since this TX isn't stale, include it - res.push(tx.clone()); + // Since this TX isn't stale, include it + signed.push(tx.clone()); + } + TransactionKind::Unsigned => { + if unsigned_in_chain(hash) { + self.remove(&hash); + continue; + } + + unsigned.push(tx.clone()); + } + _ => panic!("provided transaction entered mempool"), + } } - // Sort res by nonce. - let nonce = |tx: &T| { + // Sort signed by nonce + let nonce = |tx: &Transaction| { if let TransactionKind::Signed(Signed { nonce, .. }) = tx.kind() { *nonce } else { - 0 + unreachable!() } }; - res.sort_by(|a, b| nonce(a).partial_cmp(&nonce(b)).unwrap()); + signed.sort_by(|a, b| nonce(a).partial_cmp(&nonce(b)).unwrap()); - res + // unsigned first, then signed. + unsigned.append(&mut signed); + unsigned } /// Remove a transaction from the mempool. @@ -177,7 +248,7 @@ impl Mempool { } #[cfg(test)] - pub(crate) fn txs(&self) -> &HashMap<[u8; 32], T> { + pub(crate) fn txs(&self) -> &HashMap<[u8; 32], Transaction> { &self.txs } } diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 29172ca6..b9724ba2 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -4,7 +4,7 @@ use thiserror::Error; use serai_db::{Get, DbTxn, Db}; -use crate::{TransactionKind, TransactionError, Transaction, verify_transaction}; +use crate::transaction::{TransactionKind, TransactionError, Transaction, verify_transaction}; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum ProvidedError { diff --git a/coordinator/tributary/src/tendermint.rs b/coordinator/tributary/src/tendermint/mod.rs similarity index 79% rename from coordinator/tributary/src/tendermint.rs rename to coordinator/tributary/src/tendermint/mod.rs index 8b894a56..7f995465 100644 --- a/coordinator/tributary/src/tendermint.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use subtle::ConstantTimeEq; use zeroize::{Zeroize, Zeroizing}; -use rand::{SeedableRng, seq::SliceRandom}; +use rand::{SeedableRng, seq::SliceRandom, rngs::OsRng}; use rand_chacha::ChaCha12Rng; use transcript::{Transcript, RecommendedTranscript}; @@ -29,6 +29,7 @@ use tendermint::{ BlockNumber, RoundNumber, Signer as SignerTrait, SignatureScheme, Weights, Block as BlockTrait, BlockError as TendermintBlockError, Commit, Network, }, + SlashEvent, }; use tokio::{ @@ -37,10 +38,14 @@ use tokio::{ }; use crate::{ - TENDERMINT_MESSAGE, BLOCK_MESSAGE, ReadWrite, Transaction, BlockHeader, Block, BlockError, - Blockchain, P2p, + TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite, + transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, + Blockchain, P2p, tendermint::tx::SlashVote, }; +pub mod tx; +use tx::{TendermintTx, VoteSignature}; + fn challenge( genesis: [u8; 32], key: [u8; 32], @@ -205,7 +210,7 @@ impl Weights for Validators { let block = usize::try_from(block.0).unwrap(); let round = usize::try_from(round.0).unwrap(); // If multiple rounds are used, a naive block + round would cause the same index to be chosen - // in quick succesion. + // in quick succession. // Accordingly, if we use additional rounds, jump halfway around. // While this is still game-able, it's not explicitly reusing indexes immediately after each // other. @@ -215,7 +220,7 @@ impl Weights for Validators { } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -pub(crate) struct TendermintBlock(pub Vec); +pub struct TendermintBlock(pub Vec); impl BlockTrait for TendermintBlock { type Id = [u8; 32]; fn id(&self) -> Self::Id { @@ -224,7 +229,7 @@ impl BlockTrait for TendermintBlock { } #[derive(Clone, Debug)] -pub(crate) struct TendermintNetwork { +pub struct TendermintNetwork { pub(crate) genesis: [u8; 32], pub(crate) signer: Arc, @@ -235,7 +240,7 @@ pub(crate) struct TendermintNetwork { } #[async_trait] -impl Network for TendermintNetwork { +impl Network for TendermintNetwork { type ValidatorId = [u8; 32]; type SignatureScheme = Arc; type Weights = Arc; @@ -262,22 +267,55 @@ impl Network for TendermintNetwork { to_broadcast.extend(msg.encode()); self.p2p.broadcast(self.genesis, to_broadcast).await } - async fn slash(&mut self, validator: Self::ValidatorId) { - // TODO: Handle this slash + + async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) { log::error!( - "validator {} triggered a slash event on tributary {}", + "validator {} triggered a slash event on tributary {} (with evidence: {})", hex::encode(validator), - hex::encode(self.genesis) + hex::encode(self.genesis), + matches!(slash_event, SlashEvent::WithEvidence(_, _)), ); + + let signer = self.signer(); + let tx = match slash_event { + SlashEvent::WithEvidence(m1, m2) => { + // create an unsigned evidence tx + TendermintTx::SlashEvidence((m1, m2).encode()) + } + SlashEvent::Id(reason, block, round) => { + // create a signed vote tx + let mut tx = TendermintTx::SlashVote(SlashVote { + id: (reason, block, round).encode().try_into().unwrap(), + target: validator.encode().try_into().unwrap(), + sig: VoteSignature::default(), + }); + tx.sign(&mut OsRng, signer.genesis, &signer.key); + tx + } + }; + + // add tx to blockchain and broadcast to peers + // TODO: Make a function out of this following block + let mut to_broadcast = vec![TRANSACTION_MESSAGE]; + tx.write(&mut to_broadcast).unwrap(); + if self.blockchain.write().await.add_transaction::( + true, + Transaction::Tendermint(tx), + self.signature_scheme(), + ) { + self.p2p.broadcast(signer.genesis, to_broadcast).await; + } } async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> { let block = Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; - self.blockchain.read().await.verify_block(&block).map_err(|e| match e { - BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, - _ => TendermintBlockError::Fatal, - }) + self.blockchain.read().await.verify_block::(&block, self.signature_scheme()).map_err( + |e| match e { + BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, + _ => TendermintBlockError::Fatal, + }, + ) } async fn add_block( @@ -303,7 +341,11 @@ impl Network for TendermintNetwork { let encoded_commit = commit.encode(); loop { - let block_res = self.blockchain.write().await.add_block(&block, encoded_commit.clone()); + let block_res = self.blockchain.write().await.add_block::( + &block, + encoded_commit.clone(), + self.signature_scheme(), + ); match block_res { Ok(()) => { // If we successfully added this block, broadcast it @@ -326,6 +368,8 @@ impl Network for TendermintNetwork { } } - Some(TendermintBlock(self.blockchain.write().await.build_block().serialize())) + Some(TendermintBlock( + self.blockchain.write().await.build_block::(self.signature_scheme()).serialize(), + )) } } diff --git a/coordinator/tributary/src/tendermint/tx.rs b/coordinator/tributary/src/tendermint/tx.rs new file mode 100644 index 00000000..5ae89d80 --- /dev/null +++ b/coordinator/tributary/src/tendermint/tx.rs @@ -0,0 +1,347 @@ +use core::ops::Deref; +use std::{io, vec, default::Default}; + +use scale::Decode; + +use zeroize::Zeroizing; + +use blake2::{Digest, Blake2s256, Blake2b512}; + +use rand::{RngCore, CryptoRng}; + +use ciphersuite::{ + group::{GroupEncoding, ff::Field}, + Ciphersuite, Ristretto, +}; +use schnorr::SchnorrSignature; + +use crate::{ + transaction::{Transaction, TransactionKind, TransactionError}, + ReadWrite, +}; + +use tendermint::{ + SignedMessageFor, Data, + round::RoundData, + time::CanonicalInstant, + commit_msg, + ext::{Network, Commit, RoundNumber, SignatureScheme}, +}; + +/// Signing data for a slash vote. +/// +/// The traditional Signed uses a nonce, whereas votes aren't required/expected to be ordered. +/// Accordingly, a simple uniqueness check works instead. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct VoteSignature { + pub signer: ::G, + pub signature: SchnorrSignature, +} + +impl ReadWrite for VoteSignature { + fn read(reader: &mut R) -> io::Result { + let signer = Ristretto::read_G(reader)?; + let signature = SchnorrSignature::::read(reader)?; + + Ok(VoteSignature { signer, signature }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.signer.to_bytes())?; + self.signature.write(writer) + } +} + +impl Default for VoteSignature { + fn default() -> Self { + VoteSignature { + signer: Ristretto::generator(), + signature: SchnorrSignature::::read(&mut [0; 64].as_slice()).unwrap(), + } + } +} + +/// A vote to slash a malicious validator. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct SlashVote { + pub id: [u8; 13], // vote id(slash event id) + pub target: [u8; 32], // who to slash + pub sig: VoteSignature, // signature +} + +impl ReadWrite for SlashVote { + fn read(reader: &mut R) -> io::Result { + let mut id = [0; 13]; + let mut target = [0; 32]; + reader.read_exact(&mut id)?; + reader.read_exact(&mut target)?; + let sig = VoteSignature::read(reader)?; + + Ok(SlashVote { id, target, sig }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id)?; + writer.write_all(&self.target)?; + self.sig.write(writer) + } +} + +#[allow(clippy::large_enum_variant)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TendermintTx { + SlashEvidence(Vec), + // TODO: should the SlashVote.sig be directly in the enum + // like as in (SlashVote, sig) since the sig is sig of the tx. + SlashVote(SlashVote), +} + +impl ReadWrite for TendermintTx { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let mut len = [0; 4]; + reader.read_exact(&mut len)?; + let mut len = + usize::try_from(u32::from_le_bytes(len)).expect("running on a 16-bit system?"); + + let mut data = vec![]; + + // Read chunk-by-chunk so a claimed 4 GB length doesn't cause a 4 GB allocation + // While we could check the length is sane, that'd require we know what a sane length is + // We'd also have to maintain that length's sanity even as other parts of the codebase, + // and even entire crates, change + // This is fine as it'll eventually hit the P2P message size limit, yet doesn't require + // knowing it nor does it make any assumptions + const CHUNK_LEN: usize = 1024; + let mut chunk = [0; CHUNK_LEN]; + while len > 0 { + let to_read = len.min(CHUNK_LEN); + data.reserve(to_read); + reader.read_exact(&mut chunk[.. to_read])?; + data.extend(&chunk[.. to_read]); + len -= to_read; + } + Ok(TendermintTx::SlashEvidence(data)) + } + 1 => { + let vote = SlashVote::read(reader)?; + Ok(TendermintTx::SlashVote(vote)) + } + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid transaction type")), + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + TendermintTx::SlashEvidence(ev) => { + writer.write_all(&[0])?; + writer.write_all(&u32::try_from(ev.len()).unwrap().to_le_bytes())?; + writer.write_all(ev) + } + TendermintTx::SlashVote(vote) => { + writer.write_all(&[1])?; + vote.write(writer) + } + } + } +} + +impl Transaction for TendermintTx { + fn kind(&self) -> TransactionKind<'_> { + // There's an assert elsewhere in the codebase expecting this behavior + // If we do want to add Provided/Signed TendermintTxs, review the implications carefully + TransactionKind::Unsigned + } + + fn hash(&self) -> [u8; 32] { + let mut tx = self.serialize(); + if let TendermintTx::SlashVote(vote) = self { + // Make sure the part we're cutting off is the signature + assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), vote.sig.signature.serialize()); + } + Blake2s256::digest(tx).into() + } + + fn sig_hash(&self, genesis: [u8; 32]) -> ::F { + match self { + TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"), + TendermintTx::SlashVote(vote) => { + let signature = &vote.sig.signature; + ::F::from_bytes_mod_order_wide( + &Blake2b512::digest( + [genesis.as_ref(), &self.hash(), signature.R.to_bytes().as_ref()].concat(), + ) + .into(), + ) + } + } + } + + fn verify(&self) -> Result<(), TransactionError> { + Ok(()) + } +} + +impl TendermintTx { + // Sign a transaction + pub fn sign( + &mut self, + rng: &mut R, + genesis: [u8; 32], + key: &Zeroizing<::F>, + ) { + fn signature(tx: &mut TendermintTx) -> Option<&mut VoteSignature> { + match tx { + TendermintTx::SlashVote(vote) => Some(&mut vote.sig), + _ => None, + } + } + + signature(self).unwrap().signer = Ristretto::generator() * key.deref(); + + let sig_nonce = Zeroizing::new(::F::random(rng)); + signature(self).unwrap().signature.R = + ::generator() * sig_nonce.deref(); + + let sig_hash = self.sig_hash(genesis); + + signature(self).unwrap().signature = + SchnorrSignature::::sign(key, sig_nonce, sig_hash); + } +} + +pub fn decode_evidence( + mut ev: &[u8], +) -> Result<(SignedMessageFor, Option>), TransactionError> { + <(SignedMessageFor, Option>)>::decode(&mut ev).map_err(|_| { + dbg!("failed to decode"); + TransactionError::InvalidContent + }) +} + +// TODO: Move this into tendermint-machine +// TODO: Strongly type Evidence, instead of having two messages and no idea what's supposedly +// wrong with them. Doing so will massively simplify the auditability of this (as this +// re-implements an entire foreign library's checks for malicious behavior). +pub(crate) fn verify_tendermint_tx( + tx: &TendermintTx, + genesis: [u8; 32], + schema: N::SignatureScheme, + commit: impl Fn(u32) -> Option>, +) -> Result<(), TransactionError> { + tx.verify()?; + + match tx { + TendermintTx::SlashEvidence(ev) => { + let (first, second) = decode_evidence::(ev)?; + + // verify that evidence messages are signed correctly + if !first.verify_signature(&schema) { + Err(TransactionError::InvalidSignature)? + } + let first = first.msg; + + if let Some(second) = second { + if !second.verify_signature(&schema) { + Err(TransactionError::InvalidSignature)? + } + let second = second.msg; + + // 2 types of evidence here + // 1- multiple distinct messages for the same block + round + step + // 2- precommitted to multiple blocks + + // Make sure they're distinct messages, from the same sender, within the same block + if (first == second) || (first.sender != second.sender) || (first.block != second.block) { + Err(TransactionError::InvalidContent)?; + } + + // Distinct messages within the same step + if (first.round == second.round) && (first.data.step() == second.data.step()) { + return Ok(()); + } + + // check whether messages are precommits to different blocks + // The inner signatures don't need to be verified since the outer signatures were + // While the inner signatures may be invalid, that would've yielded a invalid precommit + // signature slash instead of distinct precommit slash + if let Data::Precommit(Some((h1, _))) = first.data { + if let Data::Precommit(Some((h2, _))) = second.data { + if h1 == h2 { + Err(TransactionError::InvalidContent)?; + } + return Ok(()); + } + } + + // No fault identified + Err(TransactionError::InvalidContent)? + } + + // 2 types of evidence can be here + // 1- invalid commit signature + // 2- vr number that was greater than or equal to the current round + match &first.data { + Data::Proposal(vr, _) => { + // check the vr + if vr.is_none() || vr.unwrap().0 < first.round.0 { + Err(TransactionError::InvalidContent)? + } + } + Data::Precommit(Some((id, sig))) => { + // TODO: We need to be passed in the genesis time to handle this edge case + if first.block.0 == 0 { + todo!("invalid precommit signature on first block") + } + + // get the last commit + // TODO: Why do we use u32 when Tendermint uses u64? + let prior_commit = match u32::try_from(first.block.0 - 1) { + Ok(n) => match commit(n) { + Some(c) => c, + // If we have yet to sync the block in question, we will return InvalidContent based + // on our own temporal ambiguity + // This will also cause an InvalidContent for anything using a non-existent block, + // yet that's valid behavior + // TODO: Double check the ramifications of this + _ => Err(TransactionError::InvalidContent)?, + }, + _ => Err(TransactionError::InvalidContent)?, + }; + + // calculate the end time till the msg round + let mut last_end_time = CanonicalInstant::new(prior_commit.end_time); + for r in 0 ..= first.round.0 { + last_end_time = RoundData::::new(RoundNumber(r), last_end_time).end_time(); + } + + // verify that the commit was actually invalid + if schema.verify(first.sender, &commit_msg(last_end_time.canonical(), id.as_ref()), sig) { + Err(TransactionError::InvalidContent)? + } + } + _ => Err(TransactionError::InvalidContent)?, + } + } + TendermintTx::SlashVote(vote) => { + // TODO: verify the target is actually one of our validators? + // this shouldn't be a problem because if the target isn't valid, no one else + // gonna vote on it. But we still have to think about spam votes. + + // TODO: we need to check signer is a participant + + // TODO: Move this into the standalone TendermintTx verify + let sig = &vote.sig; + // verify the tx signature + // TODO: Use Schnorr half-aggregation and a batch verification here + if !sig.signature.verify(sig.signer, tx.sig_hash(genesis)) { + Err(TransactionError::InvalidSignature)?; + } + } + } + + Ok(()) +} diff --git a/coordinator/tributary/src/tests/block.rs b/coordinator/tributary/src/tests/block.rs index 06ae46ea..2bc4b823 100644 --- a/coordinator/tributary/src/tests/block.rs +++ b/coordinator/tributary/src/tests/block.rs @@ -1,14 +1,23 @@ -use std::{io, collections::HashMap}; +use std::{sync::Arc, io, collections::HashMap, fmt::Debug}; use blake2::{Digest, Blake2s256}; - use ciphersuite::{ group::{ff::Field, Group}, Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use crate::{ReadWrite, TransactionError, Signed, TransactionKind, Transaction, BlockError, Block}; +use serai_db::MemDb; +use tendermint::ext::Commit; + +use crate::{ + ReadWrite, BlockError, Block, Transaction, + tests::p2p::DummyP2p, + transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}, + tendermint::{TendermintNetwork, Validators}, +}; + +type N = TendermintNetwork; // A transaction solely defined by its nonce and a distinguisher (to allow creating distinct TXs // sharing a nonce). @@ -50,7 +59,7 @@ impl ReadWrite for NonceTransaction { } } -impl Transaction for NonceTransaction { +impl TransactionTrait for NonceTransaction { fn kind(&self) -> TransactionKind<'_> { TransactionKind::Signed(&self.2) } @@ -68,8 +77,21 @@ impl Transaction for NonceTransaction { fn empty_block() { const GENESIS: [u8; 32] = [0xff; 32]; const LAST: [u8; 32] = [0x01; 32]; + let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap()); + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + let unsigned_in_chain = |_: [u8; 32]| false; Block::::new(LAST, vec![], vec![]) - .verify(GENESIS, LAST, HashMap::new(), HashMap::new()) + .verify::( + GENESIS, + LAST, + HashMap::new(), + HashMap::new(), + validators, + commit, + unsigned_in_chain, + ) .unwrap(); } @@ -78,19 +100,29 @@ fn duplicate_nonces() { const GENESIS: [u8; 32] = [0xff; 32]; const LAST: [u8; 32] = [0x01; 32]; + let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap()); + // Run once without duplicating a nonce, and once with, so that's confirmed to be the faulty // component for i in [1, 0] { let mut mempool = vec![]; - let mut insert = |tx: NonceTransaction| mempool.push(tx); + let mut insert = |tx: NonceTransaction| mempool.push(Transaction::Application(tx)); insert(NonceTransaction::new(0, 0)); insert(NonceTransaction::new(i, 1)); - let res = Block::new(LAST, vec![], mempool).verify( + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + let unsigned_in_chain = |_: [u8; 32]| false; + + let res = Block::new(LAST, vec![], mempool).verify::( GENESIS, LAST, HashMap::new(), HashMap::from([(::G::identity(), 0)]), + validators.clone(), + commit, + unsigned_in_chain, ); if i == 1 { res.unwrap(); diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary/src/tests/blockchain.rs index f38fd7bd..31cdf178 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary/src/tests/blockchain.rs @@ -1,7 +1,12 @@ -use std::collections::{VecDeque, HashMap}; +use core::ops::Deref; +use std::{ + collections::{VecDeque, HashMap}, + sync::Arc, + io, +}; use zeroize::Zeroizing; -use rand::{RngCore, rngs::OsRng}; +use rand::rngs::OsRng; use blake2::{Digest, Blake2s256}; @@ -10,17 +15,20 @@ use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; use serai_db::{DbTxn, Db, MemDb}; use crate::{ - merkle, Transaction, ProvidedError, ProvidedTransactions, Block, Blockchain, - tests::{ProvidedTransaction, SignedTransaction, random_provided_transaction}, + ReadWrite, TransactionKind, + transaction::Transaction as TransactionTrait, + TransactionError, Transaction, ProvidedError, ProvidedTransactions, merkle, BlockError, Block, + Blockchain, + tendermint::{TendermintNetwork, Validators, tx::TendermintTx, Signer, TendermintBlock}, + tests::{ + ProvidedTransaction, SignedTransaction, random_provided_transaction, p2p::DummyP2p, + new_genesis, random_vote_tx, random_evidence_tx, + }, }; -fn new_genesis() -> [u8; 32] { - let mut genesis = [0; 32]; - OsRng.fill_bytes(&mut genesis); - genesis -} +type N = TendermintNetwork; -fn new_blockchain( +fn new_blockchain( genesis: [u8; 32], participants: &[::G], ) -> (MemDb, Blockchain) { @@ -34,12 +42,14 @@ fn new_blockchain( #[test] fn block_addition() { let genesis = new_genesis(); + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (db, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block(); + let block = blockchain.build_block::(validators.clone()); + assert_eq!(block.header.parent, genesis); assert_eq!(block.header.transactions, [0; 32]); - blockchain.verify_block(&block).unwrap(); - assert!(blockchain.add_block(&block, vec![]).is_ok()); + blockchain.verify_block::(&block, validators.clone()).unwrap(); + assert!(blockchain.add_block::(&block, vec![], validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.block_number(), 1); assert_eq!( @@ -51,23 +61,24 @@ fn block_addition() { #[test] fn invalid_block() { let genesis = new_genesis(); + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block(); + let block = blockchain.build_block::(validators.clone()); // Mutate parent { #[allow(clippy::redundant_clone)] // False positive let mut block = block.clone(); block.header.parent = Blake2s256::digest(block.header.parent).into(); - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); } // Mutate tranactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); } let key = Zeroizing::new(::F::random(&mut OsRng)); @@ -76,9 +87,9 @@ fn invalid_block() { // Not a participant { // Manually create the block to bypass build_block's checks - let block = Block::new(blockchain.tip(), vec![], vec![tx.clone()]); + let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); } // Run the rest of the tests with them as a participant @@ -86,40 +97,81 @@ fn invalid_block() { // Re-run the not a participant block to make sure it now works { - let block = Block::new(blockchain.tip(), vec![], vec![tx.clone()]); + let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block(&block).unwrap(); + blockchain.verify_block::(&block, validators.clone()).unwrap(); } { // Add a valid transaction let mut blockchain = blockchain.clone(); - assert!(blockchain.add_transaction(true, tx.clone())); - let mut block = blockchain.build_block(); + assert!(blockchain.add_transaction::( + true, + Transaction::Application(tx.clone()), + validators.clone() + )); + let mut block = blockchain.build_block::(validators.clone()); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block(&block).unwrap(); + blockchain.verify_block::(&block, validators.clone()).unwrap(); // And verify mutating the transactions merkle now causes a failure block.header.transactions = merkle(&[]); - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); } { // Invalid nonce let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5); // Manually create the block to bypass build_block's checks - let block = Block::new(blockchain.tip(), vec![], vec![tx]); - assert!(blockchain.verify_block(&block).is_err()); + let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); } { // Invalid signature - let mut blockchain = blockchain; - assert!(blockchain.add_transaction(true, tx)); - let mut block = blockchain.build_block(); - blockchain.verify_block(&block).unwrap(); - block.transactions[0].1.signature.s += ::F::ONE; - assert!(blockchain.verify_block(&block).is_err()); + let mut blockchain = blockchain.clone(); + assert!(blockchain.add_transaction::( + true, + Transaction::Application(tx), + validators.clone() + )); + let mut block = blockchain.build_block::(validators.clone()); + blockchain.verify_block::(&block, validators.clone()).unwrap(); + match &mut block.transactions[0] { + Transaction::Application(tx) => { + tx.1.signature.s += ::F::ONE; + } + _ => panic!("non-signed tx found"), + } + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); + + // Make sure this isn't because the merkle changed due to the transaction hash including the + // signature (which it explicitly isn't allowed to anyways) + assert_eq!(block.header.transactions, merkle(&[block.transactions[0].hash()])); + } + + { + // Invalid vote signature + let mut blockchain = blockchain.clone(); + let vote_tx = random_vote_tx(&mut OsRng, genesis); + assert!(blockchain.add_transaction::( + true, + Transaction::Tendermint(vote_tx), + validators.clone() + )); + let mut block = blockchain.build_block::(validators.clone()); + blockchain.verify_block::(&block, validators.clone()).unwrap(); + match &mut block.transactions[0] { + Transaction::Tendermint(tx) => match tx { + TendermintTx::SlashVote(vote) => { + vote.sig.signature.s += ::F::ONE; + } + _ => panic!("non-vote tx found"), + }, + _ => panic!("non-tendermint tx found"), + } + + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); // Make sure this isn't because the merkle changed due to the transaction hash including the // signature (which it explicitly isn't allowed to anyways) @@ -130,7 +182,7 @@ fn invalid_block() { #[test] fn signed_transaction() { let genesis = new_genesis(); - + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let key = Zeroizing::new(::F::random(&mut OsRng)); let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0); let signer = tx.1.signer; @@ -139,14 +191,21 @@ fn signed_transaction() { assert_eq!(blockchain.next_nonce(signer), Some(0)); let test = |blockchain: &mut Blockchain, - mempool: Vec| { + mempool: Vec>| { let tip = blockchain.tip(); for tx in mempool.clone() { + let Transaction::Application(tx) = tx else { + panic!("tendermint tx found"); + }; let next_nonce = blockchain.next_nonce(signer).unwrap(); - assert!(blockchain.add_transaction(true, tx)); + assert!(blockchain.add_transaction::( + true, + Transaction::Application(tx), + validators.clone() + )); assert_eq!(next_nonce + 1, blockchain.next_nonce(signer).unwrap()); } - let block = blockchain.build_block(); + let block = blockchain.build_block::(validators.clone()); assert_eq!(block, Block::new(blockchain.tip(), vec![], mempool.clone())); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -160,19 +219,21 @@ fn signed_transaction() { ); // Verify and add the block - blockchain.verify_block(&block).unwrap(); - assert!(blockchain.add_block(&block, vec![]).is_ok()); + blockchain.verify_block::(&block, validators.clone()).unwrap(); + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; // Test with a single nonce - test(&mut blockchain, vec![tx]); + test(&mut blockchain, vec![Transaction::Application(tx)]); assert_eq!(blockchain.next_nonce(signer), Some(1)); // Test with a flood of nonces let mut mempool = vec![]; for nonce in 1 .. 64 { - mempool.push(crate::tests::signed_transaction(&mut OsRng, genesis, &key, nonce)); + mempool.push(Transaction::Application(crate::tests::signed_transaction( + &mut OsRng, genesis, &key, nonce, + ))); } test(&mut blockchain, mempool); assert_eq!(blockchain.next_nonce(signer), Some(64)); @@ -181,6 +242,7 @@ fn signed_transaction() { #[test] fn provided_transaction() { let genesis = new_genesis(); + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); let tx = random_provided_transaction(&mut OsRng); @@ -203,18 +265,274 @@ fn provided_transaction() { // Non-provided transactions should fail verification let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); // Provided transactions should pass verification blockchain.provide_transaction(tx.clone()).unwrap(); - blockchain.verify_block(&block).unwrap(); + blockchain.verify_block::(&block, validators.clone()).unwrap(); // add_block should work for verified blocks - assert!(blockchain.add_block(&block, vec![]).is_ok()); + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); let block = Block::new(blockchain.tip(), vec![tx], vec![]); // The provided transaction should no longer considered provided, causing this error - assert!(blockchain.verify_block(&block).is_err()); + assert!(blockchain.verify_block::(&block, validators.clone()).is_err()); // add_block should fail for unverified provided transactions if told to add them - assert!(blockchain.add_block(&block, vec![]).is_err()); + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_err()); +} + +#[test] +fn tendermint_vote_tx() { + let genesis = new_genesis(); + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); + + let (_, mut blockchain) = new_blockchain::(genesis, &[]); + + let test = |blockchain: &mut Blockchain, + mempool: Vec>| { + let tip = blockchain.tip(); + for tx in mempool.clone() { + let Transaction::Tendermint(tx) = tx else { + panic!("non-tendermint tx found"); + }; + assert!(blockchain.add_transaction::( + true, + Transaction::Tendermint(tx), + validators.clone() + )); + } + let block = blockchain.build_block::(validators.clone()); + + assert_eq!(blockchain.tip(), tip); + assert_eq!(block.header.parent, tip); + + // Make sure all transactions were included + for bt in &block.transactions { + assert!(mempool.contains(bt)); + } + + // Make sure the merkle was correct + // Uses block.transactions instead of mempool as order may differ between the two + assert_eq!( + block.header.transactions, + merkle(&block.transactions.iter().map(Transaction::hash).collect::>()), + ); + + // Verify and add the block + blockchain.verify_block::(&block, validators.clone()).unwrap(); + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + assert_eq!(blockchain.tip(), block.hash()); + }; + + // test with single tx + let tx = random_vote_tx(&mut OsRng, genesis); + test(&mut blockchain, vec![Transaction::Tendermint(tx)]); + + // test with multiple txs + let mut mempool: Vec> = vec![]; + for _ in 0 .. 5 { + mempool.push(Transaction::Tendermint(random_vote_tx(&mut OsRng, genesis))); + } + test(&mut blockchain, mempool); +} + +#[tokio::test] +async fn tendermint_evidence_tx() { + let genesis = new_genesis(); + let key = Zeroizing::new(::F::random(&mut OsRng)); + let signer = Signer::new(genesis, key.clone()); + let signer_id = Ristretto::generator() * key.deref(); + let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap()); + + let (_, mut blockchain) = new_blockchain::(genesis, &[]); + + let test = |blockchain: &mut Blockchain, + mempool: Vec>, + validators: Arc| { + let tip = blockchain.tip(); + for tx in mempool.clone() { + let Transaction::Tendermint(tx) = tx else { + panic!("non-tendermint tx found"); + }; + assert!(blockchain.add_transaction::( + true, + Transaction::Tendermint(tx), + validators.clone() + )); + } + let block = blockchain.build_block::(validators.clone()); + assert_eq!(blockchain.tip(), tip); + assert_eq!(block.header.parent, tip); + + // Make sure all transactions were included + for bt in &block.transactions { + assert!(mempool.contains(bt)); + } + + // Verify and add the block + blockchain.verify_block::(&block, validators.clone()).unwrap(); + assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + assert_eq!(blockchain.tip(), block.hash()); + }; + + // test with single tx + let tx = random_evidence_tx::(signer.into(), TendermintBlock(vec![0x12])).await; + test(&mut blockchain, vec![Transaction::Tendermint(tx)], validators); + + // test with multiple txs + let mut mempool: Vec> = vec![]; + let mut signers = vec![]; + for _ in 0 .. 5 { + let key = Zeroizing::new(::F::random(&mut OsRng)); + let signer = Signer::new(genesis, key.clone()); + let signer_id = Ristretto::generator() * key.deref(); + signers.push((signer_id, 1)); + mempool.push(Transaction::Tendermint( + random_evidence_tx::(signer.into(), TendermintBlock(vec![0x12])).await, + )); + } + + // update validators + let validators = Arc::new(Validators::new(genesis, signers).unwrap()); + test(&mut blockchain, mempool, validators); +} + +#[test] +fn block_tx_ordering() { + #[derive(Debug, PartialEq, Eq, Clone)] + enum SignedTx { + Signed(Box), + Provided(Box), + } + impl ReadWrite for SignedTx { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => Ok(SignedTx::Signed(Box::new(SignedTransaction::read(reader)?))), + 1 => Ok(SignedTx::Provided(Box::new(ProvidedTransaction::read(reader)?))), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid transaction type")), + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + SignedTx::Signed(signed) => { + writer.write_all(&[0])?; + signed.write(writer) + } + SignedTx::Provided(pro) => { + writer.write_all(&[1])?; + pro.write(writer) + } + } + } + } + + impl TransactionTrait for SignedTx { + fn kind(&self) -> TransactionKind<'_> { + match self { + SignedTx::Signed(signed) => signed.kind(), + SignedTx::Provided(pro) => pro.kind(), + } + } + + fn hash(&self) -> [u8; 32] { + match self { + SignedTx::Signed(signed) => signed.hash(), + SignedTx::Provided(pro) => pro.hash(), + } + } + + fn verify(&self) -> Result<(), TransactionError> { + Ok(()) + } + } + + let genesis = new_genesis(); + let key = Zeroizing::new(::F::random(&mut OsRng)); + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); + + // signer + let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer; + + let (_, mut blockchain) = new_blockchain::(genesis, &[signer]); + let tip = blockchain.tip(); + + // add txs + let mut mempool = vec![]; + let mut provided_txs = vec![]; + for i in 0 .. 128 { + let signed_tx = Transaction::Application(SignedTx::Signed(Box::new( + crate::tests::signed_transaction(&mut OsRng, genesis, &key, i), + ))); + assert!(blockchain.add_transaction::(true, signed_tx.clone(), validators.clone())); + mempool.push(signed_tx); + + let unsigned_tx = Transaction::Tendermint(random_vote_tx(&mut OsRng, genesis)); + assert!(blockchain.add_transaction::(true, unsigned_tx.clone(), validators.clone())); + mempool.push(unsigned_tx); + + let provided_tx = SignedTx::Provided(Box::new(random_provided_transaction(&mut OsRng))); + blockchain.provide_transaction(provided_tx.clone()).unwrap(); + provided_txs.push(provided_tx); + } + let block = blockchain.build_block::(validators.clone()); + + assert_eq!(blockchain.tip(), tip); + assert_eq!(block.header.parent, tip); + + // Make sure all transactions were included + assert_eq!(block.transactions.len(), 3 * 128); + for bt in &block.transactions[128 ..] { + assert!(mempool.contains(bt)); + } + + // check the tx order + let txs = &block.transactions; + for tx in txs.iter().take(128) { + assert!(matches!(tx.kind(), TransactionKind::Provided(..))); + } + for tx in txs.iter().take(128).skip(128) { + assert!(matches!(tx.kind(), TransactionKind::Unsigned)); + } + for tx in txs.iter().take(128).skip(256) { + assert!(matches!(tx.kind(), TransactionKind::Signed(..))); + } + + // should be a valid block + blockchain.verify_block::(&block, validators.clone()).unwrap(); + + // Unsigned before Provided + { + let mut block = block.clone(); + // Doesn't use swap to preserve the order of Provided, as that's checked before kind ordering + let unsigned = block.transactions.remove(128); + block.transactions.insert(0, unsigned); + assert_eq!( + blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + BlockError::WrongTransactionOrder + ); + } + + // Signed before Provided + { + let mut block = block.clone(); + let signed = block.transactions.remove(256); + block.transactions.insert(0, signed); + assert_eq!( + blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + BlockError::WrongTransactionOrder + ); + } + + // Signed before Unsigned + { + let mut block = block; + block.transactions.swap(128, 256); + assert_eq!( + blockchain.verify_block::(&block, validators.clone()).unwrap_err(), + BlockError::WrongTransactionOrder + ); + } } diff --git a/coordinator/tributary/src/tests/mempool.rs b/coordinator/tributary/src/tests/mempool.rs index 3105e4cc..b4e36ebf 100644 --- a/coordinator/tributary/src/tests/mempool.rs +++ b/coordinator/tributary/src/tests/mempool.rs @@ -1,50 +1,132 @@ -use std::collections::HashMap; +use std::{sync::Arc, collections::HashMap}; use zeroize::Zeroizing; use rand::{RngCore, rngs::OsRng}; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; +use tendermint::ext::Commit; + use serai_db::MemDb; use crate::{ + transaction::Transaction as TransactionTrait, + tendermint::{TendermintBlock, Validators, Signer, TendermintNetwork}, ACCOUNT_MEMPOOL_LIMIT, Transaction, Mempool, - tests::{SignedTransaction, signed_transaction}, + tests::{ + SignedTransaction, signed_transaction, p2p::DummyP2p, random_vote_tx, random_evidence_tx, + }, }; -fn new_mempool() -> ([u8; 32], MemDb, Mempool) { +type N = TendermintNetwork; + +fn new_mempool() -> ([u8; 32], MemDb, Mempool) { let mut genesis = [0; 32]; OsRng.fill_bytes(&mut genesis); let db = MemDb::new(); (genesis, db.clone(), Mempool::new(db, genesis)) } -#[test] -fn mempool_addition() { +#[tokio::test] +async fn mempool_addition() { let (genesis, db, mut mempool) = new_mempool::(); - + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + let unsigned_in_chain = |_: [u8; 32]| false; let key = Zeroizing::new(::F::random(&mut OsRng)); let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0); let signer = first_tx.1.signer; assert_eq!(mempool.next_nonce(&signer), None); + // validators + let validators = Arc::new(Validators::new(genesis, vec![(signer, 1)]).unwrap()); + // Add TX 0 let mut blockchain_next_nonces = HashMap::from([(signer, 0)]); - assert!(mempool.add(&blockchain_next_nonces, true, first_tx.clone())); + assert!(mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(first_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); assert_eq!(mempool.next_nonce(&signer), Some(1)); + // add a tendermint vote tx + let vote_tx = random_vote_tx(&mut OsRng, genesis); + assert!(mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(vote_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); + + // add a tendermint evidence tx + let evidence_tx = + random_evidence_tx::(Signer::new(genesis, key.clone()).into(), TendermintBlock(vec![])) + .await; + assert!(mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(evidence_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); + // Test reloading works assert_eq!(mempool, Mempool::new(db, genesis)); // Adding it again should fail - assert!(!mempool.add(&blockchain_next_nonces, true, first_tx.clone())); + assert!(!mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(first_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); + assert!(!mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(vote_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); + assert!(!mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Tendermint(evidence_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); // Do the same with the next nonce let second_tx = signed_transaction(&mut OsRng, genesis, &key, 1); - assert!(mempool.add(&blockchain_next_nonces, true, second_tx.clone())); + assert!(mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(second_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); assert_eq!(mempool.next_nonce(&signer), Some(2)); - assert!(!mempool.add(&blockchain_next_nonces, true, second_tx.clone())); + assert!(!mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(second_tx.clone()), + validators.clone(), + unsigned_in_chain, + commit, + )); // If the mempool doesn't have a nonce for an account, it should successfully use the // blockchain's @@ -53,43 +135,68 @@ fn mempool_addition() { let second_signer = tx.1.signer; assert_eq!(mempool.next_nonce(&second_signer), None); blockchain_next_nonces.insert(second_signer, 2); - assert!(mempool.add(&blockchain_next_nonces, true, tx.clone())); + assert!(mempool.add::( + &blockchain_next_nonces, + true, + Transaction::Application(tx.clone()), + validators.clone(), + unsigned_in_chain, + commit + )); assert_eq!(mempool.next_nonce(&second_signer), Some(3)); // Getting a block should work - assert_eq!(mempool.block(&blockchain_next_nonces).len(), 3); + assert_eq!(mempool.block(&blockchain_next_nonces, unsigned_in_chain).len(), 5); // If the blockchain says an account had its nonce updated, it should cause a prune blockchain_next_nonces.insert(signer, 1); - let mut block = mempool.block(&blockchain_next_nonces); - assert_eq!(block.len(), 2); + let mut block = mempool.block(&blockchain_next_nonces, unsigned_in_chain); + assert_eq!(block.len(), 4); assert!(!block.iter().any(|tx| tx.hash() == first_tx.hash())); assert_eq!(mempool.txs(), &block.drain(..).map(|tx| (tx.hash(), tx)).collect::>()); // Removing should also successfully prune mempool.remove(&tx.hash()); - assert_eq!(mempool.txs(), &HashMap::from([(second_tx.hash(), second_tx)])); + mempool.remove(&vote_tx.hash()); + + assert_eq!( + mempool.txs(), + &HashMap::from([ + (second_tx.hash(), Transaction::Application(second_tx)), + (evidence_tx.hash(), Transaction::Tendermint(evidence_tx)) + ]) + ); } #[test] fn too_many_mempool() { let (genesis, _, mut mempool) = new_mempool::(); - + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + let unsigned_in_chain = |_: [u8; 32]| false; let key = Zeroizing::new(::F::random(&mut OsRng)); let signer = signed_transaction(&mut OsRng, genesis, &key, 0).1.signer; // We should be able to add transactions up to the limit for i in 0 .. ACCOUNT_MEMPOOL_LIMIT { - assert!(mempool.add( + assert!(mempool.add::( &HashMap::from([(signer, 0)]), false, - signed_transaction(&mut OsRng, genesis, &key, i) + Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), + validators.clone(), + unsigned_in_chain, + commit, )); } // Yet adding more should fail - assert!(!mempool.add( + assert!(!mempool.add::( &HashMap::from([(signer, 0)]), false, - signed_transaction(&mut OsRng, genesis, &key, ACCOUNT_MEMPOOL_LIMIT) + Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, ACCOUNT_MEMPOOL_LIMIT)), + validators.clone(), + unsigned_in_chain, + commit, )); } diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index fb808477..7c75ac36 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -10,3 +10,5 @@ mod block; mod blockchain; #[cfg(test)] mod mempool; +#[cfg(test)] +mod p2p; diff --git a/coordinator/tributary/src/tests/p2p.rs b/coordinator/tributary/src/tests/p2p.rs new file mode 100644 index 00000000..d3e3b74c --- /dev/null +++ b/coordinator/tributary/src/tests/p2p.rs @@ -0,0 +1,11 @@ +pub use crate::P2p; + +#[derive(Clone, Debug)] +pub struct DummyP2p; + +#[async_trait::async_trait] +impl P2p for DummyP2p { + async fn broadcast(&self, _: [u8; 32], _: Vec) { + unimplemented!() + } +} diff --git a/coordinator/tributary/src/tests/transaction/mod.rs b/coordinator/tributary/src/tests/transaction/mod.rs index b264568c..bd213017 100644 --- a/coordinator/tributary/src/tests/transaction/mod.rs +++ b/coordinator/tributary/src/tests/transaction/mod.rs @@ -1,8 +1,8 @@ use core::ops::Deref; -use std::{io, collections::HashMap}; +use std::{sync::Arc, io, collections::HashMap}; use zeroize::Zeroizing; -use rand::{RngCore, CryptoRng}; +use rand::{RngCore, CryptoRng, rngs::OsRng}; use blake2::{Digest, Blake2s256}; @@ -12,11 +12,28 @@ use ciphersuite::{ }; use schnorr::SchnorrSignature; -use crate::{ReadWrite, Signed, TransactionError, TransactionKind, Transaction, verify_transaction}; +use scale::Encode; + +use ::tendermint::{ + ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber}, + SignedMessageFor, DataFor, Message, SignedMessage, Data, +}; + +use crate::{ + transaction::{Signed, TransactionError, TransactionKind, Transaction, verify_transaction}, + ReadWrite, + tendermint::{ + tx::{SlashVote, VoteSignature, TendermintTx}, + Validators, Signer, + }, +}; #[cfg(test)] mod signed; +#[cfg(test)] +mod tendermint; + pub fn random_signed(rng: &mut R) -> Signed { Signed { signer: ::G::random(&mut *rng), @@ -138,3 +155,69 @@ pub fn random_signed_transaction( (genesis, signed_transaction(rng, genesis, &key, nonce)) } + +pub fn new_genesis() -> [u8; 32] { + let mut genesis = [0; 32]; + OsRng.fill_bytes(&mut genesis); + genesis +} + +pub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc) { + // signer + let genesis = new_genesis(); + let signer = + Signer::new(genesis, Zeroizing::new(::F::random(&mut OsRng))); + let validator_id = signer.validator_id().await.unwrap(); + + // schema + let signer_pub = + ::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap(); + let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap()); + + (genesis, signer, validator_id, validators) +} + +pub async fn signed_from_data( + signer: ::Signer, + signer_id: N::ValidatorId, + block_number: u64, + round_number: u32, + data: DataFor, +) -> SignedMessageFor { + let msg = Message { + sender: signer_id, + block: BlockNumber(block_number), + round: RoundNumber(round_number), + data, + }; + let sig = signer.sign(&msg.encode()).await; + SignedMessage { msg, sig } +} + +pub async fn random_evidence_tx( + signer: ::Signer, + b: N::Block, +) -> TendermintTx { + // Creates a TX with an invalid valid round number + // TODO: Use a random failure reason + let data = Data::Proposal(Some(RoundNumber(0)), b); + let signer_id = signer.validator_id().await.unwrap(); + let signed = signed_from_data::(signer, signer_id, 0, 0, data).await; + TendermintTx::SlashEvidence((signed, None::>).encode()) +} + +pub fn random_vote_tx(rng: &mut R, genesis: [u8; 32]) -> TendermintTx { + // private key + let key = Zeroizing::new(::F::random(&mut *rng)); + + // vote data + let mut id = [0u8; 13]; + let mut target = [0u8; 32]; + rng.fill_bytes(&mut id); + rng.fill_bytes(&mut target); + + let mut tx = TendermintTx::SlashVote(SlashVote { id, target, sig: VoteSignature::default() }); + tx.sign(rng, genesis, &key); + + tx +} diff --git a/coordinator/tributary/src/tests/transaction/signed.rs b/coordinator/tributary/src/tests/transaction/signed.rs index b9c624f9..637be5c0 100644 --- a/coordinator/tributary/src/tests/transaction/signed.rs +++ b/coordinator/tributary/src/tests/transaction/signed.rs @@ -7,7 +7,8 @@ use blake2::{Digest, Blake2s256}; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; use crate::{ - ReadWrite, Signed, Transaction, verify_transaction, + ReadWrite, + transaction::{Signed, Transaction, verify_transaction}, tests::{random_signed, random_signed_transaction}, }; diff --git a/coordinator/tributary/src/tests/transaction/tendermint.rs b/coordinator/tributary/src/tests/transaction/tendermint.rs new file mode 100644 index 00000000..f36efd12 --- /dev/null +++ b/coordinator/tributary/src/tests/transaction/tendermint.rs @@ -0,0 +1,303 @@ +use std::sync::Arc; + +use zeroize::Zeroizing; +use rand::{RngCore, rngs::OsRng}; + +use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field}; +use schnorr::SchnorrSignature; + +use scale::Encode; + +use tendermint::{ + time::CanonicalInstant, + round::RoundData, + Data, SignedMessageFor, commit_msg, + ext::{RoundNumber, Commit, Signer as SignerTrait}, +}; + +use serai_db::MemDb; + +use crate::{ + ReadWrite, + tendermint::{ + tx::{TendermintTx, verify_tendermint_tx}, + TendermintBlock, Signer, Validators, TendermintNetwork, + }, + tests::{ + p2p::DummyP2p, SignedTransaction, new_genesis, random_evidence_tx, random_vote_tx, + tendermint_meta, signed_from_data, + }, +}; + +type N = TendermintNetwork; + +#[test] +fn vote_tx() { + let genesis = new_genesis(); + let mut tx = random_vote_tx(&mut OsRng, genesis); + + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); + + // should pass + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + if let TendermintTx::SlashVote(vote) = &mut tx { + vote.sig.signature = SchnorrSignature::read(&mut [0; 64].as_slice()).unwrap(); + } else { + panic!("SlashVote TX wasn't SlashVote"); + } + + // should fail + assert!(verify_tendermint_tx::(&tx, genesis, validators, commit).is_err()); +} + +#[tokio::test] +async fn serialize_tendermint() { + // make a tendermint tx with random evidence + let (genesis, signer, _, _) = tendermint_meta().await; + let tx = random_evidence_tx::(signer.into(), TendermintBlock(vec![])).await; + let res = TendermintTx::read::<&[u8]>(&mut tx.serialize().as_ref()).unwrap(); + assert_eq!(res, tx); + + // with vote tx + let vote_tx = random_vote_tx(&mut OsRng, genesis); + let vote_res = TendermintTx::read::<&[u8]>(&mut vote_tx.serialize().as_ref()).unwrap(); + assert_eq!(vote_res, vote_tx); +} + +#[tokio::test] +async fn invalid_valid_round() { + // signer + let (genesis, signer, signer_id, validators) = tendermint_meta().await; + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + + let valid_round_tx = |valid_round| { + let signer = signer.clone(); + async move { + let data = Data::Proposal(valid_round, TendermintBlock(vec![])); + let signed = signed_from_data::(signer.clone().into(), signer_id, 0, 0, data).await; + (signed.clone(), TendermintTx::SlashEvidence((signed, None::>).encode())) + } + }; + + // This should be invalid evidence if a valid valid round is specified + let (_, tx) = valid_round_tx(None).await; + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + + // If an invalid valid round is specified (>= current), this should be invalid evidence + let (mut signed, tx) = valid_round_tx(Some(RoundNumber(0))).await; + + // should pass + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + // change the signature + let mut random_sig = [0u8; 64]; + OsRng.fill_bytes(&mut random_sig); + signed.sig = random_sig; + let tx = TendermintTx::SlashEvidence((signed.clone(), None::>).encode()); + + // should fail + assert!(verify_tendermint_tx::(&tx, genesis, validators, commit).is_err()); +} + +#[tokio::test] +async fn invalid_precommit_signature() { + let (genesis, signer, signer_id, validators) = tendermint_meta().await; + let commit = |i: u32| -> Option>> { + assert_eq!(i, 0); + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + + let precommit = |precommit| { + let signer = signer.clone(); + async move { + let signed = + signed_from_data::(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit)) + .await; + (signed.clone(), TendermintTx::SlashEvidence((signed, None::>).encode())) + } + }; + + // Empty Precommit should fail. + assert!(verify_tendermint_tx::(&precommit(None).await.1, genesis, validators.clone(), commit) + .is_err()); + + // valid precommit signature should fail. + let block_id = [0x22u8; 32]; + let last_end_time = + RoundData::::new(RoundNumber(0), CanonicalInstant::new(commit(0).unwrap().end_time)) + .end_time(); + let commit_msg = commit_msg(last_end_time.canonical(), block_id.as_ref()); + + assert!(verify_tendermint_tx::( + &precommit(Some((block_id, signer.clone().sign(&commit_msg).await))).await.1, + genesis, + validators.clone(), + commit + ) + .is_err()); + + // any other signature can be used as evidence. + { + let (mut signed, tx) = precommit(Some((block_id, signer.sign(&[]).await))).await; + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + // So long as we can authenticate where it came from + let mut random_sig = [0u8; 64]; + OsRng.fill_bytes(&mut random_sig); + signed.sig = random_sig; + let tx = TendermintTx::SlashEvidence((signed.clone(), None::>).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators, commit).is_err()); + } +} + +#[tokio::test] +async fn evidence_with_prevote() { + let (genesis, signer, signer_id, validators) = tendermint_meta().await; + let commit = |_: u32| -> Option>> { + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + + let prevote = |block_id| { + let signer = signer.clone(); + async move { + TendermintTx::SlashEvidence( + ( + signed_from_data::(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) + .await, + None::>, + ) + .encode(), + ) + } + }; + + // No prevote message should be valid as slash evidence at this time + for prevote in [prevote(None).await, prevote(Some([0x22u8; 32])).await] { + assert!(verify_tendermint_tx::(&prevote, genesis, validators.clone(), commit).is_err()); + } +} + +#[tokio::test] +async fn conflicting_msgs_evidence_tx() { + let (genesis, signer, signer_id, validators) = tendermint_meta().await; + let commit = |i: u32| -> Option>> { + assert_eq!(i, 0); + Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) + }; + + // Block b, round n + let signed_for_b_r = |block, round, data| { + let signer = signer.clone(); + async move { signed_from_data::(signer.clone().into(), signer_id, block, round, data).await } + }; + + // Proposal + { + // non-conflicting data should fail + let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(&signed_1)).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + + // conflicting data should pass + let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + // Except if it has a distinct round number, as we don't check cross-round conflicts + // (except for Precommit) + let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap_err(); + + // Proposals for different block numbers should also fail as evidence + let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap_err(); + } + + // Prevote + { + // non-conflicting data should fail + let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(&signed_1)).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + + // conflicting data should pass + let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + // Except if it has a distinct round number, as we don't check cross-round conflicts + // (except for Precommit) + let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap_err(); + + // Proposals for different block numbers should also fail as evidence + let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap_err(); + } + + // Precommit + { + let sig = signer.sign(&[]).await; // the inner signature doesn't matter + + let signed_1 = signed_for_b_r(0, 0, Data::Precommit(Some(([0x11; 32], sig)))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(&signed_1)).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + + // For precommit, the round number is ignored + let signed_2 = signed_for_b_r(0, 1, Data::Precommit(Some(([0x22; 32], sig)))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).unwrap(); + + // Yet the block number isn't + let signed_2 = signed_for_b_r(1, 0, Data::Precommit(Some(([0x22; 32], sig)))).await; + let tx = TendermintTx::SlashEvidence((&signed_1, Some(signed_2)).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + } + + // msgs from different senders should fail + { + let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; + + let signer_2 = + Signer::new(genesis, Zeroizing::new(::F::random(&mut OsRng))); + let signed_id_2 = signer_2.validator_id().await.unwrap(); + let signed_2 = signed_from_data::( + signer_2.into(), + signed_id_2, + 0, + 0, + Data::Proposal(None, TendermintBlock(vec![0x22])), + ) + .await; + + let tx = TendermintTx::SlashEvidence((signed_1, Some(signed_2)).encode()); + + // update schema so that we don't fail due to invalid signature + let signer_pub = + ::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap(); + let signer_pub_2 = + ::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap(); + let validators = + Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap()); + + assert!(verify_tendermint_tx::(&tx, genesis, validators, commit).is_err()); + } + + // msgs with different steps should fail + { + let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await; + let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await; + let tx = TendermintTx::SlashEvidence((signed_1, Some(signed_2)).encode()); + assert!(verify_tendermint_tx::(&tx, genesis, validators.clone(), commit).is_err()); + } +} diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 95852835..9d25bc9b 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -92,12 +92,18 @@ pub enum TransactionKind<'a> { Provided(&'static str), /// An unsigned transaction, only able to be included by the block producer. + /// + /// Once an Unsigned transaction is included on-chain, it may not be included again. In order to + /// have multiple Unsigned transactions with the same values included on-chain, some distinct + /// nonce must be included in order to cause a distinct hash. Unsigned, /// A signed transaction. Signed(&'a Signed), } +// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists? +// Or should the literal Transaction be renamed to Event? pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { /// Return what type of transaction this is. fn kind(&self) -> TransactionKind<'_>; diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index 4404089f..c04b711f 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -6,7 +6,7 @@ use thiserror::Error; use parity_scale_codec::{Encode, Decode}; -use crate::{SignedMessageFor, commit_msg}; +use crate::{SignedMessageFor, SlashEvent, commit_msg}; /// An alias for a series of traits required for a type to be usable as a validator ID, /// automatically implemented for all types satisfying those traits. @@ -21,8 +21,8 @@ impl Signature for S {} +pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {} +impl Signature for S {} // Type aliases which are distinct according to the type system @@ -62,7 +62,7 @@ impl Signer for Arc { } /// A signature scheme used by validators. -pub trait SignatureScheme: Send + Sync { +pub trait SignatureScheme: Send + Sync + Clone { // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature type. @@ -153,7 +153,7 @@ pub trait Weights: Send + Sync { ((self.total_weight() * 2) / 3) + 1 } /// Threshold preventing BFT consensus. - fn fault_thresold(&self) -> u64 { + fn fault_threshold(&self) -> u64 { (self.total_weight() - self.threshold()) + 1 } @@ -190,9 +190,9 @@ pub enum BlockError { } /// Trait representing a Block. -pub trait Block: Send + Sync + Clone + PartialEq + Debug + Encode + Decode { +pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode { // Type used to identify blocks. Presumably a cryptographic hash of the block. - type Id: Send + Sync + Copy + Clone + PartialEq + AsRef<[u8]> + Debug + Encode + Decode; + type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode; /// Return the deterministic, unique ID for this block. fn id(&self) -> Self::Id; @@ -200,7 +200,7 @@ pub trait Block: Send + Sync + Clone + PartialEq + Debug + Encode + Decode { /// Trait representing the distributed system Tendermint is providing consensus over. #[async_trait] -pub trait Network: Send + Sync { +pub trait Network: Sized + Send + Sync { // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature scheme used by validators. @@ -265,7 +265,7 @@ pub trait Network: Send + Sync { /// /// The exact process of triggering a slash is undefined and left to the network as a whole. // TODO: We need to provide some evidence for this. - async fn slash(&mut self, validator: Self::ValidatorId); + async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); /// Validate a block. async fn validate(&mut self, block: &Self::Block) -> Result<(), BlockError>; diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 7bdc11eb..fcf83fe0 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -15,10 +15,10 @@ use futures::{ }; use tokio::time::sleep; -mod time; +pub mod time; use time::{sys_time, CanonicalInstant}; -mod round; +pub mod round; mod block; use block::BlockData; @@ -29,19 +29,19 @@ pub(crate) mod message_log; pub mod ext; use ext::*; -pub(crate) fn commit_msg(end_time: u64, id: &[u8]) -> Vec { +pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec { [&end_time.to_le_bytes(), id].concat().to_vec() } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] -enum Step { +pub enum Step { Propose, Prevote, Precommit, } -#[derive(Clone, Debug, Encode, Decode)] -enum Data { +#[derive(Clone, Eq, Debug, Encode, Decode)] +pub enum Data { Proposal(Option, B), Prevote(Option), Precommit(Option<(B::Id, S)>), @@ -62,7 +62,7 @@ impl PartialEq for Data { } impl Data { - fn step(&self) -> Step { + pub fn step(&self) -> Step { match self { Data::Proposal(..) => Step::Propose, Data::Prevote(..) => Step::Prevote, @@ -71,21 +71,20 @@ impl Data { } } -#[derive(Clone, PartialEq, Debug, Encode, Decode)] -struct Message { - sender: V, +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +pub struct Message { + pub sender: V, + pub block: BlockNumber, + pub round: RoundNumber, - block: BlockNumber, - round: RoundNumber, - - data: Data, + pub data: Data, } /// A signed Tendermint consensus message to be broadcast to the other validators. -#[derive(Clone, PartialEq, Debug, Encode, Decode)] +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct SignedMessage { - msg: Message, - sig: S, + pub msg: Message, + pub sig: S, } impl SignedMessage { @@ -103,15 +102,15 @@ impl SignedMessage { } } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum TendermintError { - Malicious(V), +#[derive(Clone, PartialEq, Eq, Debug)] +enum TendermintError { + Malicious(N::ValidatorId, Option>), Temporal, AlreadyHandled, } // Type aliases to abstract over generic hell -pub(crate) type DataFor = +pub type DataFor = Data<::Block, <::SignatureScheme as SignatureScheme>::Signature>; pub(crate) type MessageFor = Message< ::ValidatorId, @@ -125,6 +124,22 @@ pub type SignedMessageFor = SignedMessage< <::SignatureScheme as SignatureScheme>::Signature, >; +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] +pub enum SlashReason { + FailToPropose, + InvalidBlock, + InvalidMessage, +} + +// TODO: Move WithEvidence to a proper Evidence enum, denoting the explicit reason its faulty +// This greatly simplifies the checking process and prevents new-reasons added here not being +// handled elsewhere +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum SlashEvent { + Id(SlashReason, u64, u32), + WithEvidence(SignedMessageFor, Option>), +} + /// A machine executing the Tendermint protocol. pub struct TendermintMachine { network: N, @@ -239,11 +254,13 @@ impl TendermintMachine { self.reset(round, proposal).await; } - async fn slash(&mut self, validator: N::ValidatorId) { + async fn slash(&mut self, validator: N::ValidatorId, slash_event: SlashEvent) { + // TODO: If the new slash event has evidence, emit to prevent a low-importance slash from + // cancelling emission of high-importance slashes if !self.block.slashes.contains(&validator) { log::info!(target: "tendermint", "Slashing validator {}", hex::encode(validator.encode())); self.block.slashes.insert(validator); - self.network.slash(validator).await; + self.network.slash(validator, slash_event).await; } } @@ -334,7 +351,7 @@ impl TendermintMachine { if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() }; if let Some((our_message, msg, mut sig)) = futures::select_biased! { - // Handle a new block occuring externally (an external sync loop) + // Handle a new block occurring externally (an external sync loop) // Has the highest priority as it makes all other futures here irrelevant msg = self.synced_block_recv.next() => { if let Some(SyncedBlock { number, block, commit }) = msg { @@ -380,8 +397,14 @@ impl TendermintMachine { Step::Propose => { // Slash the validator for not proposing when they should've log::debug!(target: "tendermint", "Validator didn't propose when they should have"); + // this slash will be voted on. self.slash( - self.weights.proposer(self.block.number, self.block.round().number) + self.weights.proposer(self.block.number, self.block.round().number), + SlashEvent::Id( + SlashReason::FailToPropose, + self.block.number.0, + self.block.round().number.0 + ), ).await; self.broadcast(Data::Prevote(None)); }, @@ -407,31 +430,41 @@ impl TendermintMachine { } } } { - let res = self.message(msg.clone()).await; + if our_message { + assert!(sig.is_none()); + sig = Some(self.signer.sign(&msg.encode()).await); + } + let sig = sig.unwrap(); + + // TODO: message may internally call broadcast. We should check within broadcast it's not + // broadcasting our own message at this time. + let signed_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; + let res = self.message(&signed_msg).await; if res.is_err() && our_message { panic!("honest node (ourselves) had invalid behavior"); } + // Only now should we allow broadcasts since we're sure an invariant wasn't reached causing + // us to have invalid messages. + + if res.is_ok() { + // Re-broadcast this since it's an original consensus message + self.network.broadcast(signed_msg).await; + } match res { - Ok(None) => { - if let Some(sig) = sig.take() { - // If it's our own message, it shouldn't already be signed - assert!(!our_message); - - // Re-broadcast this since it's an original consensus message - self.network.broadcast(SignedMessage { msg: msg.clone(), sig }).await; - } - } + Ok(None) => {} Ok(Some(block)) => { let mut validators = vec![]; let mut sigs = vec![]; // Get all precommits for this round for (validator, msgs) in &self.block.log.log[&msg.round] { - if let Some(Data::Precommit(Some((id, sig)))) = msgs.get(&Step::Precommit) { - // If this precommit was for this block, include it - if id == &block.id() { - validators.push(*validator); - sigs.push(sig.clone()); + if let Some(signed) = msgs.get(&Step::Precommit) { + if let Data::Precommit(Some((id, sig))) = &signed.msg.data { + // If this precommit was for this block, include it + if *id == block.id() { + validators.push(*validator); + sigs.push(sig.clone()); + } } } } @@ -453,16 +486,46 @@ impl TendermintMachine { log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref())); self.reset(msg.round, proposal).await; } - Err(TendermintError::Malicious(validator)) => self.slash(validator).await, + Err(TendermintError::Malicious(sender, evidence_msg)) => { + let current_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; + + let slash = if let Some(old_msg) = evidence_msg { + // if the malicious message contains a block, only vote to slash + // TODO: Should this decision be made at a higher level? + if let Data::Proposal(_, _) = ¤t_msg.msg.data { + SlashEvent::Id( + SlashReason::InvalidBlock, + self.block.number.0, + self.block.round().number.0, + ) + } else { + // if old msg and new msg is not the same, use both as evidence. + SlashEvent::WithEvidence( + old_msg.clone(), + if old_msg != current_msg { Some(current_msg.clone()) } else { None }, + ) + } + } else { + // we don't have evidence. Slash with vote. + SlashEvent::Id( + SlashReason::InvalidMessage, + self.block.number.0, + self.block.round().number.0, + ) + }; + + // Each message that we're voting to slash over needs to be re-broadcasted so other + // validators also trigger their own votes + // TODO: should this be inside slash function? + if let SlashEvent::Id(_, _, _) = slash { + self.network.broadcast(current_msg).await; + } + + self.slash(sender, slash).await + } Err(TendermintError::Temporal) => (), Err(TendermintError::AlreadyHandled) => (), } - - if our_message { - assert!(sig.is_none()); - let sig = self.signer.sign(&msg.encode()).await; - self.network.broadcast(SignedMessage { msg, sig }).await; - } } } } @@ -472,19 +535,19 @@ impl TendermintMachine { // Returns Err if the signature was invalid fn verify_precommit_signature( &self, - sender: N::ValidatorId, - round: RoundNumber, - data: &DataFor, - ) -> Result> { - if let Data::Precommit(Some((id, sig))) = data { + signed: &SignedMessageFor, + ) -> Result> { + let msg = &signed.msg; + if let Data::Precommit(Some((id, sig))) = &msg.data { // Also verify the end_time of the commit // Only perform this verification if we already have the end_time // Else, there's a DoS where we receive a precommit for some round infinitely in the future // which forces us to calculate every end time - if let Some(end_time) = self.block.end_time.get(&round) { - if !self.validators.verify(sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) { + if let Some(end_time) = self.block.end_time.get(&msg.round) { + if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) + { log::warn!(target: "tendermint", "Validator produced an invalid commit signature"); - Err(TendermintError::Malicious(sender))?; + Err(TendermintError::Malicious(msg.sender, Some(signed.clone())))?; } return Ok(true); } @@ -494,24 +557,26 @@ impl TendermintMachine { async fn message( &mut self, - msg: MessageFor, - ) -> Result, TendermintError> { + signed: &SignedMessageFor, + ) -> Result, TendermintError> { + let msg = &signed.msg; if msg.block != self.block.number { Err(TendermintError::Temporal)?; } // If this is a precommit, verify its signature - self.verify_precommit_signature(msg.sender, msg.round, &msg.data)?; + self.verify_precommit_signature(signed)?; // Only let the proposer propose if matches!(msg.data, Data::Proposal(..)) && (msg.sender != self.weights.proposer(msg.block, msg.round)) { log::warn!(target: "tendermint", "Validator who wasn't the proposer proposed"); - Err(TendermintError::Malicious(msg.sender))?; + // TODO: This should have evidence + Err(TendermintError::Malicious(msg.sender, None))?; }; - if !self.block.log.log(msg.clone())? { + if !self.block.log.log(signed.clone())? { return Err(TendermintError::AlreadyHandled); } log::debug!(target: "tendermint", "received new tendermint message"); @@ -524,16 +589,17 @@ impl TendermintMachine { let proposer = self.weights.proposer(self.block.number, msg.round); // Get the proposal - if let Some(Data::Proposal(_, block)) = self.block.log.get(msg.round, proposer, Step::Propose) - { - // Check if it has gotten a sufficient amount of precommits - // Use a junk signature since message equality disregards the signature - if self.block.log.has_consensus( - msg.round, - Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), - ) { - log::debug!(target: "tendermint", "block {} has consensus", msg.block.0); - return Ok(Some(block.clone())); + if let Some(proposal_signed) = self.block.log.get(msg.round, proposer, Step::Propose) { + if let Data::Proposal(_, block) = &proposal_signed.msg.data { + // Check if it has gotten a sufficient amount of precommits + // Use a junk signature since message equality disregards the signature + if self.block.log.has_consensus( + msg.round, + Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), + ) { + log::debug!(target: "tendermint", "block {} has consensus", msg.block.0); + return Ok(Some(block.clone())); + } } } } @@ -546,20 +612,20 @@ impl TendermintMachine { } else if msg.round.0 > self.block.round().number.0 { // 55-56 // Jump, enabling processing by the below code - if self.block.log.round_participation(msg.round) > self.weights.fault_thresold() { + if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { // If this round already has precommit messages, verify their signatures let round_msgs = self.block.log.log[&msg.round].clone(); for (validator, msgs) in &round_msgs { - if let Some(data) = msgs.get(&Step::Precommit) { - if let Ok(res) = self.verify_precommit_signature(*validator, msg.round, data) { + if let Some(existing) = msgs.get(&Step::Precommit) { + if let Ok(res) = self.verify_precommit_signature(existing) { // Ensure this actually verified the signature instead of believing it shouldn't yet - debug_assert!(res); + assert!(res); } else { // Remove the message so it isn't counted towards forming a commit/included in one // This won't remove the fact the precommitted for this block hash in the MessageLog // TODO: Don't even log these in the first place until we jump, preventing needing // to do this in the first place - self + let msg = self .block .log .log @@ -567,8 +633,11 @@ impl TendermintMachine { .unwrap() .get_mut(validator) .unwrap() - .remove(&Step::Precommit); - self.slash(*validator).await; + .remove(&Step::Precommit) + .unwrap(); + + // Slash the validator for publishing an invalid commit signature + self.slash(*validator, SlashEvent::WithEvidence(msg, None)).await; } } } @@ -583,6 +652,9 @@ impl TendermintMachine { } } + // msg.round is now guaranteed to be equal to self.block.round().number + debug_assert_eq!(msg.round, self.block.round().number); + // The paper executes these checks when the step is prevote. Making sure this message warrants // rerunning these checks is a sane optimization since message instances is a full iteration // of the round map @@ -610,10 +682,14 @@ impl TendermintMachine { // All further operations require actually having the proposal in question let proposer = self.weights.proposer(self.block.number, self.block.round().number); - let (vr, block) = if let Some(Data::Proposal(vr, block)) = + let (vr, block) = if let Some(proposal_signed) = self.block.log.get(self.block.round().number, proposer, Step::Propose) { - (vr, block) + if let Data::Proposal(vr, block) = &proposal_signed.msg.data { + (vr, block) + } else { + panic!("message for Step::Propose didn't have Data::Proposal"); + } } else { return Ok(None); }; @@ -626,7 +702,8 @@ impl TendermintMachine { Err(BlockError::Temporal) => (false, Ok(None)), Err(BlockError::Fatal) => (false, { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); - Err(TendermintError::Malicious(proposer)) + // TODO: Produce evidence of this for the higher level code to decide what to do with + Err(TendermintError::Malicious(proposer, None)) }), }; // Create a raw vote which only requires block validity as a basis for the actual vote. @@ -643,7 +720,7 @@ impl TendermintMachine { // Malformed message if vr.0 >= self.block.round().number.0 { log::warn!(target: "tendermint", "Validator claimed a round from the future was valid"); - Err(TendermintError::Malicious(msg.sender))?; + Err(TendermintError::Malicious(msg.sender, Some(signed.clone())))?; } if self.block.log.has_consensus(*vr, Data::Prevote(Some(block.id()))) { @@ -682,7 +759,8 @@ impl TendermintMachine { Err(BlockError::Temporal) => (), Err(BlockError::Fatal) => { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); - Err(TendermintError::Malicious(proposer))? + // TODO: Produce evidence of this for the higher level code to decide what to do with + Err(TendermintError::Malicious(proposer, None))? } }; diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index e914f694..f731d3c0 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -2,12 +2,12 @@ use std::{sync::Arc, collections::HashMap}; use log::debug; -use crate::{ext::*, RoundNumber, Step, Data, DataFor, MessageFor, TendermintError}; +use crate::{ext::*, RoundNumber, Step, Data, DataFor, TendermintError, SignedMessageFor}; -type RoundLog = HashMap<::ValidatorId, HashMap>>; +type RoundLog = HashMap<::ValidatorId, HashMap>>; pub(crate) struct MessageLog { weights: Arc, - precommitted: HashMap::Id>, + precommitted: HashMap>, pub(crate) log: HashMap>, } @@ -17,38 +17,40 @@ impl MessageLog { } // Returns true if it's a new message - pub(crate) fn log( - &mut self, - msg: MessageFor, - ) -> Result> { + pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result> { + let msg = &signed.msg; let round = self.log.entry(msg.round).or_insert_with(HashMap::new); let msgs = round.entry(msg.sender).or_insert_with(HashMap::new); // Handle message replays without issue. It's only multiple messages which is malicious let step = msg.data.step(); if let Some(existing) = msgs.get(&step) { - if existing != &msg.data { + if existing.msg.data != msg.data { debug!( target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); - Err(TendermintError::Malicious(msg.sender))?; + Err(TendermintError::Malicious(msg.sender, Some(existing.clone())))?; } return Ok(false); } // If they already precommitted to a distinct hash, error - if let Data::Precommit(Some((hash, _))) = &msg.data { + if let Data::Precommit(Some((hash, _))) = msg.data { if let Some(prev) = self.precommitted.get(&msg.sender) { - if hash != prev { - debug!(target: "tendermint", "Validator precommitted to multiple blocks"); - Err(TendermintError::Malicious(msg.sender))?; + if let Data::Precommit(Some((prev_hash, _))) = prev.msg.data { + if hash != prev_hash { + debug!(target: "tendermint", "Validator precommitted to multiple blocks"); + Err(TendermintError::Malicious(msg.sender, Some(prev.clone())))?; + } + } else { + panic!("message in precommitted wasn't Precommit"); } } - self.precommitted.insert(msg.sender, *hash); + self.precommitted.insert(msg.sender, signed.clone()); } - msgs.insert(step, msg.data); + msgs.insert(step, signed); Ok(true) } @@ -61,7 +63,7 @@ impl MessageLog { if let Some(msg) = msgs.get(&data.step()) { let validator_weight = self.weights.weight(*participant); participating += validator_weight; - if &data == msg { + if data == msg.msg.data { weight += validator_weight; } } @@ -102,7 +104,7 @@ impl MessageLog { round: RoundNumber, sender: N::ValidatorId, step: Step, - ) -> Option<&DataFor> { + ) -> Option<&SignedMessageFor> { self.log.get(&round).and_then(|round| round.get(&sender).and_then(|msgs| msgs.get(&step))) } } diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary/tendermint/src/round.rs index 18cc3c55..c55e0702 100644 --- a/coordinator/tributary/tendermint/src/round.rs +++ b/coordinator/tributary/tendermint/src/round.rs @@ -13,16 +13,16 @@ use crate::{ ext::{RoundNumber, Network}, }; -pub(crate) struct RoundData { +pub struct RoundData { _network: PhantomData, - pub(crate) number: RoundNumber, - pub(crate) start_time: CanonicalInstant, - pub(crate) step: Step, - pub(crate) timeouts: HashMap, + pub number: RoundNumber, + pub start_time: CanonicalInstant, + pub step: Step, + pub timeouts: HashMap, } impl RoundData { - pub(crate) fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self { + pub fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self { RoundData { _network: PhantomData, number, @@ -46,7 +46,7 @@ impl RoundData { self.start_time + offset } - pub(crate) fn end_time(&self) -> CanonicalInstant { + pub fn end_time(&self) -> CanonicalInstant { self.timeout(Step::Precommit) } diff --git a/coordinator/tributary/tendermint/src/time.rs b/coordinator/tributary/tendermint/src/time.rs index 3973b147..0daf4b95 100644 --- a/coordinator/tributary/tendermint/src/time.rs +++ b/coordinator/tributary/tendermint/src/time.rs @@ -2,7 +2,7 @@ use core::ops::Add; use std::time::{UNIX_EPOCH, SystemTime, Instant, Duration}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub(crate) struct CanonicalInstant { +pub struct CanonicalInstant { /// Time since the epoch. time: u64, /// An Instant synchronized with the above time. @@ -14,7 +14,7 @@ pub(crate) fn sys_time(time: u64) -> SystemTime { } impl CanonicalInstant { - pub(crate) fn new(time: u64) -> CanonicalInstant { + pub fn new(time: u64) -> CanonicalInstant { // This is imprecise yet should be precise enough, as it'll resolve within a few ms let instant_now = Instant::now(); let sys_now = SystemTime::now(); @@ -27,11 +27,11 @@ impl CanonicalInstant { CanonicalInstant { time, instant: synced_instant } } - pub(crate) fn canonical(&self) -> u64 { + pub fn canonical(&self) -> u64 { self.time } - pub(crate) fn instant(&self) -> Instant { + pub fn instant(&self) -> Instant { self.instant } } diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index c6248277..adc7637c 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -12,7 +12,7 @@ use tokio::{sync::RwLock, time::sleep}; use tendermint_machine::{ ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, - TendermintMachine, TendermintHandle, + SlashEvent, TendermintMachine, TendermintHandle, }; type TestValidatorId = u16; @@ -36,6 +36,7 @@ impl Signer for TestSigner { } } +#[derive(Clone)] struct TestSignatureScheme; impl SignatureScheme for TestSignatureScheme { type ValidatorId = TestValidatorId; @@ -83,7 +84,7 @@ impl Weights for TestWeights { } } -#[derive(Clone, PartialEq, Debug, Encode, Decode)] +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] struct TestBlock { id: TestBlockId, valid: Result<(), BlockError>, @@ -131,7 +132,7 @@ impl Network for TestNetwork { } } - async fn slash(&mut self, _: TestValidatorId) { + async fn slash(&mut self, _: TestValidatorId, _: SlashEvent) { dbg!("Slash"); todo!() }