diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs deleted file mode 100644 index ce65f7e4..00000000 --- a/coordinator/src/db.rs +++ /dev/null @@ -1,39 +0,0 @@ -pub use serai_db::*; - -#[derive(Debug)] -pub struct MainDb(pub D); -impl MainDb { - pub fn new(db: D) -> Self { - Self(db) - } - - fn main_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"MAIN", dst, key) - } - - fn substrate_block_key() -> Vec { - Self::main_key(b"substrate_block", []) - } - pub fn set_last_substrate_block(&mut self, block: u64) { - let mut txn = self.0.txn(); - txn.put(Self::substrate_block_key(), block.to_le_bytes()); - txn.commit(); - } - pub fn last_substrate_block(&self) -> u64 { - u64::from_le_bytes( - self.0.get(Self::substrate_block_key()).unwrap_or(vec![0; 8]).try_into().unwrap(), - ) - } - - fn event_key(id: &[u8], index: u32) -> Vec { - Self::main_key(b"event", [id, index.to_le_bytes().as_ref()].concat()) - } - pub fn handle_event(&mut self, id: [u8; 32], index: u32) { - let mut txn = self.0.txn(); - txn.put(Self::event_key(&id, index), []); - txn.commit(); - } - pub fn handled_event(&self, id: [u8; 32], index: u32) -> bool { - self.0.get(Self::event_key(&id, index)).is_some() - } -} diff --git a/coordinator/src/substrate/db.rs b/coordinator/src/substrate/db.rs new file mode 100644 index 00000000..c5fefdca --- /dev/null +++ b/coordinator/src/substrate/db.rs @@ -0,0 +1,36 @@ +pub use serai_db::*; + +#[derive(Debug)] +pub struct SubstrateDb(pub D); +impl SubstrateDb { + pub fn new(db: D) -> Self { + Self(db) + } + + fn substrate_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + D::key(b"SUBSTRATE", dst, key) + } + + fn block_key() -> Vec { + Self::substrate_key(b"block", []) + } + pub fn set_last_block(&mut self, block: u64) { + let mut txn = self.0.txn(); + txn.put(Self::block_key(), block.to_le_bytes()); + txn.commit(); + } + pub fn last_block(&self) -> u64 { + u64::from_le_bytes(self.0.get(Self::block_key()).unwrap_or(vec![0; 8]).try_into().unwrap()) + } + + fn event_key(id: &[u8], index: u32) -> Vec { + Self::substrate_key(b"event", [id, index.to_le_bytes().as_ref()].concat()) + } + pub fn handled_event(getter: &G, id: [u8; 32], index: u32) -> bool { + getter.get(Self::event_key(&id, index)).is_some() + } + pub fn handle_event(txn: &mut D::Transaction<'_>, id: [u8; 32], index: u32) { + assert!(!Self::handled_event(txn, id, index)); + txn.put(Self::event_key(&id, index), []); + } +} diff --git a/coordinator/src/substrate.rs b/coordinator/src/substrate/mod.rs similarity index 75% rename from coordinator/src/substrate.rs rename to coordinator/src/substrate/mod.rs index 404d4642..fab0bc74 100644 --- a/coordinator/src/substrate.rs +++ b/coordinator/src/substrate/mod.rs @@ -4,7 +4,7 @@ use std::collections::{HashSet, HashMap}; use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use frost::{Participant, ThresholdParams}; +use frost::ThresholdParams; use serai_client::{ SeraiError, Block, Serai, @@ -17,36 +17,31 @@ use serai_client::{ tokens::{primitives::OutInstructionWithBalance, TokensEvent}, }; +use serai_db::DbTxn; + use tributary::Tributary; use processor_messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage}; -use crate::{Db, MainDb, P2p, processor::Processor}; +use crate::{Db, P2p, processor::Processor, tributary::TributarySpec}; -async fn get_coin_key(serai: &Serai, set: ValidatorSet) -> Result>, SeraiError> { - Ok(serai.get_keys(set).await?.map(|keys| keys.1.into_inner())) -} +mod db; +pub use db::*; async fn in_set( key: &Zeroizing<::F>, serai: &Serai, set: ValidatorSet, -) -> Result>, SeraiError> { +) -> Result, SeraiError> { let Some(data) = serai.get_validator_set(set).await? else { return Ok(None); }; let key = (Ristretto::generator() * key.deref()).to_bytes(); - Ok(Some( - data - .participants - .iter() - .position(|(participant, _)| participant.0 == key) - .map(|index| Participant::new((index + 1).try_into().unwrap()).unwrap()), - )) + Ok(Some(data.participants.iter().any(|(participant, _)| participant.0 == key))) } async fn handle_new_set( - db: &mut MainDb, + db: D, key: &Zeroizing<::F>, p2p: &P, processor: &mut Pro, @@ -54,29 +49,18 @@ async fn handle_new_set( block: &Block, set: ValidatorSet, ) -> Result<(), SeraiError> { - if let Some(i) = in_set(key, serai, set).await?.expect("NewSet for set which doesn't exist") { + if in_set(key, serai, set).await?.expect("NewSet for set which doesn't exist") { let set_data = serai.get_validator_set(set).await?.expect("NewSet for set which doesn't exist"); - let n = u16::try_from(set_data.participants.len()).unwrap(); - let t = (2 * (n / 3)) + 1; - - let mut validators = HashMap::new(); - for (l, (participant, amount)) in set_data.participants.iter().enumerate() { - // TODO: Ban invalid keys from being validators on the Serai side - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - // Give one weight on Tributary per bond instance - validators.insert(participant, amount.0 / set_data.bond.0); - } + let spec = TributarySpec::new(block.hash(), block.time().unwrap(), set, set_data); // TODO: Do something with this let tributary = Tributary::<_, crate::tributary::Transaction, _>::new( - // TODO2: Use a DB on a dedicated volume - db.0.clone(), - crate::tributary::genesis(block.hash(), set), - block.time().expect("Serai block didn't have a timestamp set"), + db, + spec.genesis(), + spec.start_time(), key.clone(), - validators, + spec.validators(), p2p.clone(), ) .await @@ -91,7 +75,14 @@ async fn handle_new_set( .send(CoordinatorMessage::KeyGen( processor_messages::key_gen::CoordinatorMessage::GenerateKey { id: KeyGenId { set, attempt: 0 }, - params: ThresholdParams::new(t, n, i).unwrap(), + params: ThresholdParams::new( + spec.t(), + spec.n(), + spec + .i(Ristretto::generator() * key.deref()) + .expect("In set for a set we aren't in set for"), + ) + .unwrap(), }, )) .await; @@ -100,8 +91,7 @@ async fn handle_new_set( Ok(()) } -async fn handle_key_gen( - db: &mut MainDb, +async fn handle_key_gen( key: &Zeroizing<::F>, processor: &mut Pro, serai: &Serai, @@ -109,11 +99,7 @@ async fn handle_key_gen( set: ValidatorSet, key_pair: KeyPair, ) -> Result<(), SeraiError> { - if in_set(key, serai, set) - .await? - .expect("KeyGen occurred for a set which doesn't exist") - .is_some() - { + if in_set(key, serai, set).await?.expect("KeyGen occurred for a set which doesn't exist") { // TODO: Check how the processor handles this being fired multiple times processor .send(CoordinatorMessage::Substrate( @@ -137,9 +123,7 @@ async fn handle_key_gen( Ok(()) } -async fn handle_batch_and_burns( - db: &mut MainDb, - key: &Zeroizing<::F>, +async fn handle_batch_and_burns( processor: &mut Pro, serai: &Serai, block: &Block, @@ -213,13 +197,11 @@ async fn handle_batch_and_burns( serai_time: block.time().unwrap(), coin_latest_finalized_block, }, - key: get_coin_key( - serai, - // TODO2 - ValidatorSet { network, session: Session(0) }, - ) - .await? - .expect("batch/burn for network which never set keys"), + key: serai + .get_keys(ValidatorSet { network, session: Session(0) }) // TODO2 + .await? + .map(|keys| keys.1.into_inner()) + .expect("batch/burn for network which never set keys"), burns: burns.remove(&network).unwrap(), }, )) @@ -232,7 +214,7 @@ async fn handle_batch_and_burns( // Handle a specific Substrate block, returning an error when it fails to get data // (not blocking / holding) async fn handle_block( - db: &mut MainDb, + db: &mut SubstrateDb, key: &Zeroizing<::F>, p2p: &P, processor: &mut Pro, @@ -250,26 +232,31 @@ async fn handle_block( // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 // events will successfully be incrementally handled (though the Serai connection should be // stable) - if !db.handled_event(hash, event_id) { + if !SubstrateDb::::handled_event(&db.0, hash, event_id) { if let ValidatorSetsEvent::NewSet { set } = new_set { - handle_new_set(db, key, p2p, processor, serai, &block, set).await?; + // TODO2: Use a DB on a dedicated volume + handle_new_set(db.0.clone(), key, p2p, processor, serai, &block, set).await?; } else { panic!("NewSet event wasn't NewSet: {new_set:?}"); } - db.handle_event(hash, event_id); + let mut txn = db.0.txn(); + SubstrateDb::::handle_event(&mut txn, hash, event_id); + txn.commit(); } event_id += 1; } // If a key pair was confirmed, inform the processor for key_gen in serai.get_key_gen_events(hash).await? { - if !db.handled_event(hash, event_id) { + if !SubstrateDb::::handled_event(&db.0, hash, event_id) { if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { - handle_key_gen(db, key, processor, serai, &block, set, key_pair).await?; + handle_key_gen(key, processor, serai, &block, set, key_pair).await?; } else { panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); } - db.handle_event(hash, event_id); + let mut txn = db.0.txn(); + SubstrateDb::::handle_event(&mut txn, hash, event_id); + txn.commit(); } event_id += 1; } @@ -279,31 +266,33 @@ async fn handle_block( // following events share data collection // This does break the uniqueness of (hash, event_id) -> one event, yet // (network, (hash, event_id)) remains valid as a unique ID for an event - if !db.handled_event(hash, event_id) { - handle_batch_and_burns(db, key, processor, serai, &block).await?; + if !SubstrateDb::::handled_event(&db.0, hash, event_id) { + handle_batch_and_burns(processor, serai, &block).await?; } - db.handle_event(hash, event_id); + let mut txn = db.0.txn(); + SubstrateDb::::handle_event(&mut txn, hash, event_id); + txn.commit(); Ok(()) } pub async fn handle_new_blocks( - db: &mut MainDb, + db: &mut SubstrateDb, key: &Zeroizing<::F>, p2p: &P, processor: &mut Pro, serai: &Serai, - last_substrate_block: &mut u64, + last_block: &mut u64, ) -> Result<(), SeraiError> { // Check if there's been a new Substrate block let latest = serai.get_latest_block().await?; let latest_number = latest.number(); - if latest_number == *last_substrate_block { + if latest_number == *last_block { return Ok(()); } let mut latest = Some(latest); - for b in (*last_substrate_block + 1) ..= latest_number { + for b in (*last_block + 1) ..= latest_number { handle_block( db, key, @@ -320,8 +309,8 @@ pub async fn handle_new_blocks( }, ) .await?; - *last_substrate_block += 1; - db.set_last_substrate_block(*last_substrate_block); + *last_block += 1; + db.set_last_block(*last_block); } Ok(())