2023-04-15 21:38:47 +00:00
|
|
|
#![allow(unused_variables)]
|
2023-04-16 07:16:53 +00:00
|
|
|
#![allow(unreachable_code)]
|
|
|
|
#![allow(clippy::diverging_sub_expression)]
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-04-24 04:53:15 +00:00
|
|
|
use core::ops::Deref;
|
2023-04-23 07:48:50 +00:00
|
|
|
use std::{
|
|
|
|
sync::Arc,
|
2023-04-23 22:55:43 +00:00
|
|
|
time::{SystemTime, Duration},
|
2023-04-23 07:48:50 +00:00
|
|
|
collections::{VecDeque, HashMap},
|
|
|
|
};
|
2023-04-17 04:50:56 +00:00
|
|
|
|
2023-04-16 07:16:53 +00:00
|
|
|
use zeroize::Zeroizing;
|
2023-04-25 07:14:42 +00:00
|
|
|
use rand_core::OsRng;
|
2023-04-16 07:16:53 +00:00
|
|
|
|
|
|
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
use serai_db::{DbTxn, Db, MemDb};
|
2023-04-15 21:38:47 +00:00
|
|
|
use serai_client::Serai;
|
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
use tokio::{
|
|
|
|
sync::{
|
|
|
|
mpsc::{self, UnboundedSender},
|
|
|
|
RwLock,
|
|
|
|
},
|
|
|
|
time::sleep,
|
|
|
|
};
|
2023-04-23 07:48:50 +00:00
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
use ::tributary::{
|
|
|
|
ReadWrite, ProvidedError, TransactionKind, Transaction as TransactionTrait, Block, Tributary,
|
|
|
|
TributaryReader,
|
|
|
|
};
|
2023-04-17 04:50:56 +00:00
|
|
|
|
2023-04-20 09:05:17 +00:00
|
|
|
mod tributary;
|
2023-05-09 02:20:51 +00:00
|
|
|
#[rustfmt::skip]
|
|
|
|
use crate::tributary::{TributarySpec, SignData, Transaction, TributaryDb, scanner::RecognizedIdType};
|
2023-04-16 04:51:56 +00:00
|
|
|
|
2023-04-23 08:31:00 +00:00
|
|
|
mod db;
|
|
|
|
use db::MainDb;
|
|
|
|
|
2023-04-16 04:51:56 +00:00
|
|
|
mod p2p;
|
|
|
|
pub use p2p::*;
|
|
|
|
|
2023-04-25 07:14:42 +00:00
|
|
|
use processor_messages::{key_gen, sign, coordinator, ProcessorMessage};
|
|
|
|
|
2023-05-10 03:44:41 +00:00
|
|
|
pub mod processors;
|
|
|
|
use processors::Processors;
|
2023-04-17 06:10:33 +00:00
|
|
|
|
2023-04-15 21:38:47 +00:00
|
|
|
mod substrate;
|
2023-04-11 23:04:53 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
2023-04-23 02:27:12 +00:00
|
|
|
pub mod tests;
|
2023-04-11 23:04:53 +00:00
|
|
|
|
2023-04-23 07:48:50 +00:00
|
|
|
// This is a static to satisfy lifetime expectations
|
|
|
|
lazy_static::lazy_static! {
|
2023-04-23 22:29:50 +00:00
|
|
|
static ref NEW_TRIBUTARIES: RwLock<VecDeque<TributarySpec>> = RwLock::new(VecDeque::new());
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
pub struct ActiveTributary<D: Db, P: P2p> {
|
2023-04-24 06:50:03 +00:00
|
|
|
pub spec: TributarySpec,
|
|
|
|
pub tributary: Arc<RwLock<Tributary<D, Transaction, P>>>,
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a tributary into the specified HahMap
|
|
|
|
async fn add_tributary<D: Db, P: P2p>(
|
|
|
|
db: D,
|
2023-04-16 07:16:53 +00:00
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
p2p: P,
|
2023-04-24 03:15:15 +00:00
|
|
|
tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>,
|
|
|
|
spec: TributarySpec,
|
2023-04-24 10:50:40 +00:00
|
|
|
) -> TributaryReader<D, Transaction> {
|
2023-04-24 03:15:15 +00:00
|
|
|
let tributary = Tributary::<_, Transaction, _>::new(
|
2023-04-25 07:14:42 +00:00
|
|
|
// TODO2: Use a db on a distinct volume
|
2023-04-24 03:15:15 +00:00
|
|
|
db,
|
|
|
|
spec.genesis(),
|
|
|
|
spec.start_time(),
|
|
|
|
key,
|
|
|
|
spec.validators(),
|
|
|
|
p2p,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
|
2023-04-24 10:50:40 +00:00
|
|
|
let reader = tributary.reader();
|
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
tributaries.insert(
|
|
|
|
tributary.genesis(),
|
|
|
|
ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) },
|
|
|
|
);
|
2023-04-24 10:50:40 +00:00
|
|
|
|
|
|
|
reader
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
2023-05-10 03:44:41 +00:00
|
|
|
pub async fn scan_substrate<D: Db, Pro: Processors>(
|
2023-04-24 03:15:15 +00:00
|
|
|
db: D,
|
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-05-10 03:44:41 +00:00
|
|
|
processors: Pro,
|
2023-04-16 07:16:53 +00:00
|
|
|
serai: Serai,
|
|
|
|
) {
|
2023-04-24 03:15:15 +00:00
|
|
|
let mut db = substrate::SubstrateDb::new(db);
|
|
|
|
let mut last_substrate_block = db.last_block();
|
2023-04-20 09:05:17 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
loop {
|
|
|
|
match substrate::handle_new_blocks(
|
|
|
|
&mut db,
|
|
|
|
&key,
|
2023-04-26 04:10:06 +00:00
|
|
|
|db: &mut D, spec: TributarySpec| {
|
|
|
|
// Save it to the database
|
|
|
|
MainDb::new(db).add_active_tributary(&spec);
|
|
|
|
|
|
|
|
// Add it to the queue
|
|
|
|
// If we reboot before this is read from the queue, the fact it was saved to the database
|
|
|
|
// means it'll be handled on reboot
|
|
|
|
async {
|
|
|
|
NEW_TRIBUTARIES.write().await.push_back(spec);
|
|
|
|
}
|
|
|
|
},
|
2023-05-10 03:44:41 +00:00
|
|
|
&processors,
|
2023-04-24 03:15:15 +00:00
|
|
|
&serai,
|
|
|
|
&mut last_substrate_block,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
2023-04-25 07:14:42 +00:00
|
|
|
// TODO2: Should this use a notification system for new blocks?
|
2023-04-24 03:15:15 +00:00
|
|
|
// Right now it's sleeping for half the block time.
|
|
|
|
Ok(()) => sleep(Duration::from_secs(3)).await,
|
|
|
|
Err(e) => {
|
|
|
|
log::error!("couldn't communicate with serai node: {e}");
|
|
|
|
sleep(Duration::from_secs(5)).await;
|
2023-04-20 09:05:17 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-23 22:29:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
2023-05-10 03:44:41 +00:00
|
|
|
pub async fn scan_tributaries<D: Db, Pro: Processors, P: P2p>(
|
2023-04-24 03:15:15 +00:00
|
|
|
raw_db: D,
|
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-05-09 02:20:51 +00:00
|
|
|
recognized_id_send: UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
|
2023-04-24 03:15:15 +00:00
|
|
|
p2p: P,
|
2023-05-10 03:44:41 +00:00
|
|
|
processors: Pro,
|
2023-04-24 03:15:15 +00:00
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
2023-04-24 10:50:40 +00:00
|
|
|
let mut tributary_readers = vec![];
|
|
|
|
for ActiveTributary { spec, tributary } in tributaries.read().await.values() {
|
|
|
|
tributary_readers.push((spec.clone(), tributary.read().await.reader()));
|
|
|
|
}
|
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Handle new Tributary blocks
|
|
|
|
let mut tributary_db = tributary::TributaryDb::new(raw_db.clone());
|
|
|
|
loop {
|
|
|
|
// The following handle_new_blocks function may take an arbitrary amount of time
|
|
|
|
// Accordingly, it may take a long time to acquire a write lock on the tributaries table
|
|
|
|
// By definition of NEW_TRIBUTARIES, we allow tributaries to be added almost immediately,
|
|
|
|
// meaning the Substrate scanner won't become blocked on this
|
|
|
|
{
|
|
|
|
let mut new_tributaries = NEW_TRIBUTARIES.write().await;
|
|
|
|
while let Some(spec) = new_tributaries.pop_front() {
|
2023-04-24 10:50:40 +00:00
|
|
|
let reader = add_tributary(
|
2023-04-24 03:15:15 +00:00
|
|
|
raw_db.clone(),
|
|
|
|
key.clone(),
|
|
|
|
p2p.clone(),
|
|
|
|
// This is a short-lived write acquisition, which is why it should be fine
|
|
|
|
&mut *tributaries.write().await,
|
2023-04-24 10:50:40 +00:00
|
|
|
spec.clone(),
|
2023-04-24 03:15:15 +00:00
|
|
|
)
|
|
|
|
.await;
|
2023-04-24 10:50:40 +00:00
|
|
|
|
|
|
|
tributary_readers.push((spec, reader));
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 10:50:40 +00:00
|
|
|
for (spec, reader) in &tributary_readers {
|
|
|
|
tributary::scanner::handle_new_blocks::<_, _>(
|
2023-04-24 03:15:15 +00:00
|
|
|
&mut tributary_db,
|
|
|
|
&key,
|
2023-05-09 02:20:51 +00:00
|
|
|
&recognized_id_send,
|
2023-05-10 03:44:41 +00:00
|
|
|
&processors,
|
2023-04-23 20:56:23 +00:00
|
|
|
spec,
|
2023-04-24 10:50:40 +00:00
|
|
|
reader,
|
2023-04-23 20:56:23 +00:00
|
|
|
)
|
|
|
|
.await;
|
2023-04-23 08:31:00 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Sleep for half the block time
|
2023-04-25 07:14:42 +00:00
|
|
|
// TODO2: Should we define a notification system for when a new block occurs?
|
2023-04-24 03:15:15 +00:00
|
|
|
sleep(Duration::from_secs((Tributary::<D, Transaction, P>::block_time() / 2).into())).await;
|
|
|
|
}
|
|
|
|
}
|
2023-04-23 22:55:43 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
pub async fn heartbeat_tributaries<D: Db, P: P2p>(
|
|
|
|
p2p: P,
|
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
let ten_blocks_of_time =
|
2023-04-24 06:50:03 +00:00
|
|
|
Duration::from_secs((10 * Tributary::<D, Transaction, P>::block_time()).into());
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
loop {
|
|
|
|
for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() {
|
|
|
|
let tributary = tributary.read().await;
|
|
|
|
let tip = tributary.tip().await;
|
2023-04-24 10:50:40 +00:00
|
|
|
let block_time = SystemTime::UNIX_EPOCH +
|
|
|
|
Duration::from_secs(tributary.reader().time_of_block(&tip).unwrap_or(0));
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
// Only trigger syncing if the block is more than a minute behind
|
|
|
|
if SystemTime::now() > (block_time + Duration::from_secs(60)) {
|
|
|
|
log::warn!("last known tributary block was over a minute ago");
|
|
|
|
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), tip.to_vec()).await;
|
|
|
|
}
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Only check once every 10 blocks of time
|
|
|
|
sleep(ten_blocks_of_time).await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
pub async fn handle_p2p<D: Db, P: P2p>(
|
2023-04-24 04:53:15 +00:00
|
|
|
our_key: <Ristretto as Ciphersuite>::G,
|
2023-04-24 03:15:15 +00:00
|
|
|
p2p: P,
|
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
loop {
|
2023-04-24 04:53:15 +00:00
|
|
|
let mut msg = p2p.receive().await;
|
2023-04-24 03:15:15 +00:00
|
|
|
match msg.kind {
|
|
|
|
P2pMessageKind::Tributary(genesis) => {
|
2023-04-24 10:50:40 +00:00
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
2023-04-24 04:53:15 +00:00
|
|
|
log::debug!("received p2p message for unknown network");
|
|
|
|
continue;
|
|
|
|
};
|
2023-04-24 03:15:15 +00:00
|
|
|
|
2023-04-24 06:50:03 +00:00
|
|
|
if tributary.tributary.write().await.handle_message(&msg.msg).await {
|
2023-04-24 03:15:15 +00:00
|
|
|
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
// TODO2: Rate limit this per validator
|
2023-04-24 04:53:15 +00:00
|
|
|
P2pMessageKind::Heartbeat(genesis) => {
|
|
|
|
if msg.msg.len() != 32 {
|
|
|
|
log::error!("validator sent invalid heartbeat");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
|
|
|
log::debug!("received heartbeat message for unknown network");
|
|
|
|
continue;
|
|
|
|
};
|
2023-04-24 04:53:15 +00:00
|
|
|
let tributary_read = tributary.tributary.read().await;
|
|
|
|
|
2023-04-24 06:50:03 +00:00
|
|
|
/*
|
2023-04-24 04:53:15 +00:00
|
|
|
// Have sqrt(n) nodes reply with the blocks
|
|
|
|
let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64;
|
|
|
|
// Try to have at least 3 responders
|
|
|
|
if responders < 3 {
|
|
|
|
responders = tributary.spec.n().min(3).into();
|
|
|
|
}
|
2023-04-24 06:50:03 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
// Have up to three nodes respond
|
|
|
|
let responders = u64::from(tributary.spec.n().min(3));
|
2023-04-24 04:53:15 +00:00
|
|
|
|
2023-04-24 06:50:03 +00:00
|
|
|
// Decide which nodes will respond by using the latest block's hash as a mutually agreed
|
|
|
|
// upon entropy source
|
|
|
|
// THis isn't a secure source of entropy, yet it's fine for this
|
2023-04-24 04:53:15 +00:00
|
|
|
let entropy = u64::from_le_bytes(tributary_read.tip().await[.. 8].try_into().unwrap());
|
|
|
|
// If n = 10, responders = 3, we want start to be 0 ..= 7 (so the highest is 7, 8, 9)
|
|
|
|
// entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7
|
|
|
|
let start =
|
|
|
|
usize::try_from(entropy % (u64::from(tributary.spec.n() + 1) - responders)).unwrap();
|
|
|
|
let mut selected = false;
|
|
|
|
for validator in
|
|
|
|
&tributary.spec.validators()[start .. (start + usize::try_from(responders).unwrap())]
|
|
|
|
{
|
|
|
|
if our_key == validator.0 {
|
|
|
|
selected = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !selected {
|
2023-04-24 06:50:03 +00:00
|
|
|
log::debug!("received heartbeat and not selected to respond");
|
2023-04-24 04:53:15 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-04-24 06:50:03 +00:00
|
|
|
log::debug!("received heartbeat and selected to respond");
|
|
|
|
|
2023-04-24 10:50:40 +00:00
|
|
|
let reader = tributary_read.reader();
|
|
|
|
drop(tributary_read);
|
|
|
|
|
2023-04-24 04:53:15 +00:00
|
|
|
let mut latest = msg.msg.try_into().unwrap();
|
2023-04-24 10:50:40 +00:00
|
|
|
while let Some(next) = reader.block_after(&latest) {
|
|
|
|
let mut res = reader.block(&next).unwrap().serialize();
|
|
|
|
res.extend(reader.commit(&next).unwrap());
|
2023-04-24 04:53:15 +00:00
|
|
|
p2p.send(msg.sender, P2pMessageKind::Block(tributary.spec.genesis()), res).await;
|
|
|
|
latest = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
P2pMessageKind::Block(genesis) => {
|
|
|
|
let mut msg_ref: &[u8] = msg.msg.as_ref();
|
|
|
|
let Ok(block) = Block::<Transaction>::read(&mut msg_ref) else {
|
|
|
|
log::error!("received block message with an invalidly serialized block");
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
// Get just the commit
|
2023-04-24 06:50:03 +00:00
|
|
|
msg.msg.drain(.. (msg.msg.len() - msg_ref.len()));
|
2023-04-24 04:53:15 +00:00
|
|
|
|
2023-05-09 20:58:53 +00:00
|
|
|
// Spawn a dedicated task to add this block, as it may take a notable amount of time
|
|
|
|
// While we could use a long-lived task to add each block, that task would only add one
|
|
|
|
// block at a time *across all tributaries*
|
|
|
|
// We either need:
|
|
|
|
// 1) One task per tributary
|
|
|
|
// 2) Background tasks
|
|
|
|
// 3) For sync_block to return instead of waiting for provided transactions which are
|
|
|
|
// missing
|
|
|
|
// sync_block waiting is preferable since we know the block is valid by its commit, meaning
|
|
|
|
// we are the node behind
|
|
|
|
// A for 1/2, 1 may be preferable since this message may frequently occur
|
|
|
|
// We at least need to check if we take value from this message before running spawn
|
|
|
|
// TODO
|
|
|
|
tokio::spawn({
|
|
|
|
let tributaries = tributaries.clone();
|
|
|
|
async move {
|
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
|
|
|
log::debug!("received block message for unknown network");
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
let res = tributary.tributary.write().await.sync_block(block, msg.msg).await;
|
|
|
|
log::debug!("received block from {:?}, sync_block returned {}", msg.sender, res);
|
|
|
|
}
|
|
|
|
});
|
2023-04-24 04:53:15 +00:00
|
|
|
}
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-20 09:05:17 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
pub async fn publish_transaction<D: Db, P: P2p>(
|
|
|
|
tributary: &Tributary<D, Transaction, P>,
|
|
|
|
tx: Transaction,
|
|
|
|
) {
|
|
|
|
if let TransactionKind::Signed(signed) = tx.kind() {
|
|
|
|
if tributary
|
|
|
|
.next_nonce(signed.signer)
|
|
|
|
.await
|
|
|
|
.expect("we don't have a nonce, meaning we aren't a participant on this tributary") >
|
|
|
|
signed.nonce
|
|
|
|
{
|
|
|
|
log::warn!("we've already published this transaction. this should only appear on reboot");
|
|
|
|
} else {
|
|
|
|
// We should've created a valid transaction
|
|
|
|
assert!(tributary.add_transaction(tx).await, "created an invalid transaction");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
panic!("non-signed transaction passed to publish_transaction");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-25 07:14:42 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
2023-05-10 03:44:41 +00:00
|
|
|
pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
2023-05-09 02:20:51 +00:00
|
|
|
mut db: D,
|
2023-04-25 07:14:42 +00:00
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-05-10 03:44:41 +00:00
|
|
|
mut processors: Pro,
|
2023-04-25 07:14:42 +00:00
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
let pub_key = Ristretto::generator() * key.deref();
|
|
|
|
|
|
|
|
loop {
|
2023-05-10 03:44:41 +00:00
|
|
|
let msg = processors.recv().await;
|
2023-04-25 07:14:42 +00:00
|
|
|
|
2023-05-10 03:51:05 +00:00
|
|
|
// TODO2: This is slow, and only works as long as a network only has a single Tributary
|
|
|
|
// (which means there's a lack of multisig rotation)
|
|
|
|
let genesis = {
|
|
|
|
let mut genesis = None;
|
|
|
|
for tributary in tributaries.read().await.values() {
|
|
|
|
if tributary.spec.set().network == msg.network {
|
|
|
|
genesis = Some(tributary.spec.genesis());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
genesis.unwrap()
|
|
|
|
};
|
2023-04-25 07:14:42 +00:00
|
|
|
|
|
|
|
let tx = match msg.msg {
|
|
|
|
ProcessorMessage::KeyGen(msg) => match msg {
|
|
|
|
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
|
|
|
Some(Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed()))
|
|
|
|
}
|
|
|
|
key_gen::ProcessorMessage::Shares { id, shares } => {
|
|
|
|
Some(Transaction::DkgShares(id.attempt, shares, Transaction::empty_signed()))
|
|
|
|
}
|
|
|
|
// TODO
|
|
|
|
key_gen::ProcessorMessage::GeneratedKeyPair { .. } => todo!(),
|
|
|
|
},
|
|
|
|
ProcessorMessage::Sign(msg) => match msg {
|
|
|
|
sign::ProcessorMessage::Preprocess { id, preprocess } => {
|
2023-05-09 02:20:51 +00:00
|
|
|
if id.attempt == 0 {
|
|
|
|
let mut txn = db.txn();
|
|
|
|
MainDb::<D>::save_first_preprocess(&mut txn, id.id, preprocess);
|
|
|
|
txn.commit();
|
|
|
|
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(Transaction::SignPreprocess(SignData {
|
|
|
|
plan: id.id,
|
|
|
|
attempt: id.attempt,
|
|
|
|
data: preprocess,
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
}))
|
|
|
|
}
|
2023-04-25 07:14:42 +00:00
|
|
|
}
|
|
|
|
sign::ProcessorMessage::Share { id, share } => Some(Transaction::SignShare(SignData {
|
|
|
|
plan: id.id,
|
|
|
|
attempt: id.attempt,
|
|
|
|
data: share,
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
})),
|
|
|
|
// TODO
|
|
|
|
sign::ProcessorMessage::Completed { .. } => todo!(),
|
|
|
|
},
|
2023-05-10 03:44:41 +00:00
|
|
|
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
|
|
|
|
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, plans } => {
|
|
|
|
assert_eq!(
|
|
|
|
msg.network, network,
|
|
|
|
"processor claimed to be a different network than it was",
|
|
|
|
);
|
2023-05-09 02:20:51 +00:00
|
|
|
|
|
|
|
// Safe to use its own txn since this is static and just needs to be written before we
|
|
|
|
// provide SubstrateBlock
|
|
|
|
let mut txn = db.txn();
|
|
|
|
TributaryDb::<D>::set_plan_ids(&mut txn, genesis, block, &plans);
|
|
|
|
txn.commit();
|
|
|
|
|
|
|
|
Some(Transaction::SubstrateBlock(block))
|
|
|
|
}
|
2023-04-25 07:14:42 +00:00
|
|
|
coordinator::ProcessorMessage::BatchPreprocess { id, preprocess } => {
|
2023-05-09 02:20:51 +00:00
|
|
|
// If this is the first attempt instance, synchronize around the block first
|
|
|
|
if id.attempt == 0 {
|
|
|
|
// Save the preprocess to disk so we can publish it later
|
|
|
|
// This is fine to use its own TX since it's static and just needs to be written
|
|
|
|
// before this message finishes it handling (or with this message's finished handling)
|
|
|
|
let mut txn = db.txn();
|
|
|
|
MainDb::<D>::save_first_preprocess(&mut txn, id.id, preprocess);
|
|
|
|
txn.commit();
|
|
|
|
|
|
|
|
Some(Transaction::ExternalBlock(id.id))
|
|
|
|
} else {
|
|
|
|
Some(Transaction::BatchPreprocess(SignData {
|
|
|
|
plan: id.id,
|
|
|
|
attempt: id.attempt,
|
|
|
|
data: preprocess,
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
}))
|
|
|
|
}
|
2023-04-25 07:14:42 +00:00
|
|
|
}
|
|
|
|
coordinator::ProcessorMessage::BatchShare { id, share } => {
|
|
|
|
Some(Transaction::BatchShare(SignData {
|
|
|
|
plan: id.id,
|
|
|
|
attempt: id.attempt,
|
|
|
|
data: share.to_vec(),
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
},
|
|
|
|
ProcessorMessage::Substrate(msg) => match msg {
|
|
|
|
// TODO
|
|
|
|
processor_messages::substrate::ProcessorMessage::Update { .. } => todo!(),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
// If this created a transaction, publish it
|
|
|
|
if let Some(mut tx) = tx {
|
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
2023-04-25 19:05:58 +00:00
|
|
|
// TODO: This can happen since Substrate tells the Processor to generate commitments
|
|
|
|
// at the same time it tells the Tributary to be created
|
|
|
|
// There's no guarantee the Tributary will have been created though
|
|
|
|
panic!("processor is operating on tributary we don't have");
|
|
|
|
};
|
2023-04-25 07:14:42 +00:00
|
|
|
|
|
|
|
let tributary = tributary.tributary.read().await;
|
|
|
|
|
2023-05-09 02:20:51 +00:00
|
|
|
match tx.kind() {
|
|
|
|
TransactionKind::Provided(_) => {
|
|
|
|
let res = tributary.provide_transaction(tx).await;
|
|
|
|
if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {
|
|
|
|
panic!("provided an invalid transaction: {res:?}");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TransactionKind::Signed(_) => {
|
|
|
|
// Get the next nonce
|
|
|
|
// let mut txn = db.txn();
|
|
|
|
// let nonce = MainDb::tx_nonce(&mut txn, msg.id, tributary);
|
|
|
|
|
|
|
|
let nonce = 0; // TODO
|
|
|
|
tx.sign(&mut OsRng, genesis, &key, nonce);
|
|
|
|
|
|
|
|
publish_transaction(&tributary, tx).await;
|
|
|
|
|
|
|
|
// txn.commit();
|
|
|
|
}
|
|
|
|
_ => panic!("created an unexpected transaction"),
|
|
|
|
}
|
2023-04-25 07:14:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-10 03:44:41 +00:00
|
|
|
pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
2023-04-26 04:10:06 +00:00
|
|
|
mut raw_db: D,
|
2023-04-24 03:15:15 +00:00
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
p2p: P,
|
2023-05-10 03:44:41 +00:00
|
|
|
processors: Pro,
|
2023-04-24 03:15:15 +00:00
|
|
|
serai: Serai,
|
|
|
|
) {
|
|
|
|
// Handle new Substrate blocks
|
2023-05-10 03:44:41 +00:00
|
|
|
tokio::spawn(scan_substrate(raw_db.clone(), key.clone(), processors.clone(), serai.clone()));
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
// Handle the Tributaries
|
|
|
|
|
|
|
|
// Arc so this can be shared between the Tributary scanner task and the P2P task
|
|
|
|
// Write locks on this may take a while to acquire
|
|
|
|
let tributaries = Arc::new(RwLock::new(HashMap::<[u8; 32], ActiveTributary<D, P>>::new()));
|
|
|
|
|
|
|
|
// Reload active tributaries from the database
|
2023-04-26 04:10:06 +00:00
|
|
|
for spec in MainDb::new(&mut raw_db).active_tributaries().1 {
|
2023-04-24 10:50:40 +00:00
|
|
|
let _ = add_tributary(
|
|
|
|
raw_db.clone(),
|
|
|
|
key.clone(),
|
|
|
|
p2p.clone(),
|
|
|
|
&mut *tributaries.write().await,
|
|
|
|
spec,
|
|
|
|
)
|
|
|
|
.await;
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Handle new blocks for each Tributary
|
2023-05-09 02:20:51 +00:00
|
|
|
let (recognized_id_send, mut recognized_id_recv) = mpsc::unbounded_channel();
|
|
|
|
{
|
|
|
|
let raw_db = raw_db.clone();
|
|
|
|
tokio::spawn(scan_tributaries(
|
|
|
|
raw_db,
|
|
|
|
key.clone(),
|
|
|
|
recognized_id_send,
|
|
|
|
p2p.clone(),
|
2023-05-10 03:44:41 +00:00
|
|
|
processors.clone(),
|
2023-05-09 02:20:51 +00:00
|
|
|
tributaries.clone(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
// When we reach consensus on a new external block, send our BatchPreprocess for it
|
|
|
|
tokio::spawn({
|
|
|
|
let raw_db = raw_db.clone();
|
|
|
|
let key = key.clone();
|
|
|
|
let tributaries = tributaries.clone();
|
|
|
|
async move {
|
|
|
|
loop {
|
|
|
|
if let Some((genesis, id_type, id)) = recognized_id_recv.recv().await {
|
|
|
|
let mut tx = match id_type {
|
|
|
|
RecognizedIdType::Block => Transaction::BatchPreprocess(SignData {
|
|
|
|
plan: id,
|
|
|
|
attempt: 0,
|
|
|
|
data: MainDb::<D>::first_preprocess(&raw_db, id),
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
}),
|
|
|
|
|
|
|
|
RecognizedIdType::Plan => Transaction::SignPreprocess(SignData {
|
|
|
|
plan: id,
|
|
|
|
attempt: 0,
|
|
|
|
data: MainDb::<D>::first_preprocess(&raw_db, id),
|
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
}),
|
|
|
|
};
|
|
|
|
|
|
|
|
let nonce = 0; // TODO
|
|
|
|
tx.sign(&mut OsRng, genesis, &key, nonce);
|
|
|
|
|
|
|
|
// TODO: Consolidate this code with the above instance
|
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
|
|
|
panic!("tributary we don't have came to consensus on an ExternalBlock");
|
|
|
|
};
|
|
|
|
let tributary = tributary.tributary.read().await;
|
|
|
|
|
|
|
|
publish_transaction(&tributary, tx).await;
|
|
|
|
} else {
|
2023-05-09 20:58:53 +00:00
|
|
|
log::warn!("recognized_id_send was dropped. are we shutting down?");
|
2023-05-09 02:20:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
|
|
|
|
// in a while (presumably because we're behind)
|
|
|
|
tokio::spawn(heartbeat_tributaries(p2p.clone(), tributaries.clone()));
|
|
|
|
|
|
|
|
// Handle P2P messages
|
2023-04-25 07:14:42 +00:00
|
|
|
tokio::spawn(handle_p2p(Ristretto::generator() * key.deref(), p2p, tributaries.clone()));
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-04-25 07:14:42 +00:00
|
|
|
// Handle all messages from processors
|
2023-05-10 03:44:41 +00:00
|
|
|
handle_processors(raw_db, key, processors, tributaries).await;
|
2023-04-15 21:38:47 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 13:21:35 +00:00
|
|
|
#[tokio::main]
|
2023-04-15 21:38:47 +00:00
|
|
|
async fn main() {
|
2023-04-16 04:51:56 +00:00
|
|
|
let db = MemDb::new(); // TODO
|
2023-04-17 06:10:33 +00:00
|
|
|
|
2023-04-16 07:16:53 +00:00
|
|
|
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO); // TODO
|
2023-04-22 14:49:52 +00:00
|
|
|
let p2p = LocalP2p::new(1).swap_remove(0); // TODO
|
2023-04-17 06:10:33 +00:00
|
|
|
|
2023-05-10 03:44:41 +00:00
|
|
|
let processors = processors::MemProcessors::new(); // TODO
|
2023-04-17 06:10:33 +00:00
|
|
|
|
2023-04-16 04:51:56 +00:00
|
|
|
let serai = || async {
|
|
|
|
loop {
|
|
|
|
let Ok(serai) = Serai::new("ws://127.0.0.1:9944").await else {
|
|
|
|
log::error!("couldn't connect to the Serai node");
|
2023-04-17 06:10:33 +00:00
|
|
|
sleep(Duration::from_secs(5)).await;
|
2023-04-16 04:51:56 +00:00
|
|
|
continue
|
|
|
|
};
|
|
|
|
return serai;
|
|
|
|
}
|
|
|
|
};
|
2023-05-10 03:44:41 +00:00
|
|
|
run(db, key, p2p, processors, serai().await).await
|
2023-04-15 21:38:47 +00:00
|
|
|
}
|