2023-04-20 09:05:17 +00:00
|
|
|
#![allow(dead_code)]
|
2023-04-15 21:38:47 +00:00
|
|
|
#![allow(unused_variables)]
|
2023-04-16 07:16:53 +00:00
|
|
|
#![allow(unreachable_code)]
|
|
|
|
#![allow(clippy::diverging_sub_expression)]
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-04-24 04:53:15 +00:00
|
|
|
use core::ops::Deref;
|
2023-04-23 07:48:50 +00:00
|
|
|
use std::{
|
|
|
|
sync::Arc,
|
2023-04-23 22:55:43 +00:00
|
|
|
time::{SystemTime, Duration},
|
2023-04-23 07:48:50 +00:00
|
|
|
collections::{VecDeque, HashMap},
|
|
|
|
};
|
2023-04-17 04:50:56 +00:00
|
|
|
|
2023-04-16 07:16:53 +00:00
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
|
|
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-04-16 07:16:53 +00:00
|
|
|
use serai_db::{Db, MemDb};
|
2023-04-15 21:38:47 +00:00
|
|
|
use serai_client::Serai;
|
|
|
|
|
2023-04-23 07:48:50 +00:00
|
|
|
use tokio::{sync::RwLock, time::sleep};
|
|
|
|
|
2023-04-24 04:53:15 +00:00
|
|
|
use ::tributary::{ReadWrite, Block, Tributary};
|
2023-04-17 04:50:56 +00:00
|
|
|
|
2023-04-20 09:05:17 +00:00
|
|
|
mod tributary;
|
2023-04-23 07:48:50 +00:00
|
|
|
use crate::tributary::{TributarySpec, Transaction};
|
2023-04-16 04:51:56 +00:00
|
|
|
|
2023-04-23 08:31:00 +00:00
|
|
|
mod db;
|
|
|
|
use db::MainDb;
|
|
|
|
|
2023-04-16 04:51:56 +00:00
|
|
|
mod p2p;
|
|
|
|
pub use p2p::*;
|
|
|
|
|
2023-04-17 06:10:33 +00:00
|
|
|
pub mod processor;
|
|
|
|
use processor::Processor;
|
|
|
|
|
2023-04-15 21:38:47 +00:00
|
|
|
mod substrate;
|
2023-04-11 23:04:53 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
2023-04-23 02:27:12 +00:00
|
|
|
pub mod tests;
|
2023-04-11 23:04:53 +00:00
|
|
|
|
2023-04-23 07:48:50 +00:00
|
|
|
// This is a static to satisfy lifetime expectations
|
|
|
|
lazy_static::lazy_static! {
|
2023-04-23 22:29:50 +00:00
|
|
|
static ref NEW_TRIBUTARIES: RwLock<VecDeque<TributarySpec>> = RwLock::new(VecDeque::new());
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Specifies a new tributary
|
|
|
|
async fn create_new_tributary<D: Db>(db: D, spec: TributarySpec) {
|
|
|
|
// Save it to the database
|
|
|
|
MainDb(db).add_active_tributary(&spec);
|
|
|
|
// Add it to the queue
|
|
|
|
// If we reboot before this is read from the queue, the fact it was saved to the database
|
|
|
|
// means it'll be handled on reboot
|
|
|
|
NEW_TRIBUTARIES.write().await.push_back(spec);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct ActiveTributary<D: Db, P: P2p> {
|
|
|
|
spec: TributarySpec,
|
|
|
|
tributary: Arc<RwLock<Tributary<D, Transaction, P>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a tributary into the specified HahMap
|
|
|
|
async fn add_tributary<D: Db, P: P2p>(
|
|
|
|
db: D,
|
2023-04-16 07:16:53 +00:00
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
p2p: P,
|
2023-04-24 03:15:15 +00:00
|
|
|
tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>,
|
|
|
|
spec: TributarySpec,
|
|
|
|
) {
|
|
|
|
let tributary = Tributary::<_, Transaction, _>::new(
|
|
|
|
// TODO: Use a db on a distinct volume
|
|
|
|
db,
|
|
|
|
spec.genesis(),
|
|
|
|
spec.start_time(),
|
|
|
|
key,
|
|
|
|
spec.validators(),
|
|
|
|
p2p,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
tributaries.insert(
|
|
|
|
tributary.genesis(),
|
|
|
|
ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) },
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn scan_substrate<D: Db, Pro: Processor>(
|
|
|
|
db: D,
|
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-04-17 06:10:33 +00:00
|
|
|
mut processor: Pro,
|
2023-04-16 07:16:53 +00:00
|
|
|
serai: Serai,
|
|
|
|
) {
|
2023-04-24 03:15:15 +00:00
|
|
|
let mut db = substrate::SubstrateDb::new(db);
|
|
|
|
let mut last_substrate_block = db.last_block();
|
2023-04-20 09:05:17 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
loop {
|
|
|
|
match substrate::handle_new_blocks(
|
|
|
|
&mut db,
|
|
|
|
&key,
|
|
|
|
create_new_tributary,
|
|
|
|
&mut processor,
|
|
|
|
&serai,
|
|
|
|
&mut last_substrate_block,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
// TODO: Should this use a notification system for new blocks?
|
|
|
|
// Right now it's sleeping for half the block time.
|
|
|
|
Ok(()) => sleep(Duration::from_secs(3)).await,
|
|
|
|
Err(e) => {
|
|
|
|
log::error!("couldn't communicate with serai node: {e}");
|
|
|
|
sleep(Duration::from_secs(5)).await;
|
2023-04-20 09:05:17 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
}
|
2023-04-23 22:29:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
pub async fn scan_tributaries<D: Db, Pro: Processor, P: P2p>(
|
|
|
|
raw_db: D,
|
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
p2p: P,
|
|
|
|
mut processor: Pro,
|
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
// Handle new Tributary blocks
|
|
|
|
let mut tributary_db = tributary::TributaryDb::new(raw_db.clone());
|
|
|
|
loop {
|
|
|
|
// The following handle_new_blocks function may take an arbitrary amount of time
|
|
|
|
// Accordingly, it may take a long time to acquire a write lock on the tributaries table
|
|
|
|
// By definition of NEW_TRIBUTARIES, we allow tributaries to be added almost immediately,
|
|
|
|
// meaning the Substrate scanner won't become blocked on this
|
|
|
|
{
|
|
|
|
let mut new_tributaries = NEW_TRIBUTARIES.write().await;
|
|
|
|
while let Some(spec) = new_tributaries.pop_front() {
|
|
|
|
add_tributary(
|
|
|
|
raw_db.clone(),
|
|
|
|
key.clone(),
|
|
|
|
p2p.clone(),
|
|
|
|
// This is a short-lived write acquisition, which is why it should be fine
|
|
|
|
&mut *tributaries.write().await,
|
|
|
|
spec,
|
|
|
|
)
|
|
|
|
.await;
|
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// TODO: Instead of holding this lock long term, should this take in Arc RwLock and
|
|
|
|
// re-acquire read locks?
|
|
|
|
for ActiveTributary { spec, tributary } in tributaries.read().await.values() {
|
|
|
|
tributary::scanner::handle_new_blocks::<_, _, P>(
|
|
|
|
&mut tributary_db,
|
|
|
|
&key,
|
|
|
|
&mut processor,
|
2023-04-23 20:56:23 +00:00
|
|
|
spec,
|
2023-04-24 03:15:15 +00:00
|
|
|
&*tributary.read().await,
|
2023-04-23 20:56:23 +00:00
|
|
|
)
|
|
|
|
.await;
|
2023-04-23 08:31:00 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Sleep for half the block time
|
|
|
|
// TODO: Should we define a notification system for when a new block occurs?
|
|
|
|
sleep(Duration::from_secs((Tributary::<D, Transaction, P>::block_time() / 2).into())).await;
|
|
|
|
}
|
|
|
|
}
|
2023-04-23 22:55:43 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
pub async fn heartbeat_tributaries<D: Db, P: P2p>(
|
|
|
|
p2p: P,
|
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
let ten_blocks_of_time =
|
|
|
|
Duration::from_secs((Tributary::<D, Transaction, P>::block_time() * 10).into());
|
|
|
|
|
|
|
|
loop {
|
|
|
|
for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() {
|
|
|
|
let tributary = tributary.read().await;
|
|
|
|
let tip = tributary.tip().await;
|
2023-04-24 03:37:40 +00:00
|
|
|
let block_time =
|
|
|
|
SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0));
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
// Only trigger syncing if the block is more than a minute behind
|
|
|
|
if SystemTime::now() > (block_time + Duration::from_secs(60)) {
|
|
|
|
log::warn!("last known tributary block was over a minute ago");
|
|
|
|
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), tip.to_vec()).await;
|
|
|
|
}
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-23 07:48:50 +00:00
|
|
|
|
2023-04-24 03:15:15 +00:00
|
|
|
// Only check once every 10 blocks of time
|
|
|
|
sleep(ten_blocks_of_time).await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
pub async fn handle_p2p<D: Db, P: P2p>(
|
2023-04-24 04:53:15 +00:00
|
|
|
our_key: <Ristretto as Ciphersuite>::G,
|
2023-04-24 03:15:15 +00:00
|
|
|
p2p: P,
|
|
|
|
tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
|
|
|
|
) {
|
|
|
|
loop {
|
2023-04-24 04:53:15 +00:00
|
|
|
let mut msg = p2p.receive().await;
|
2023-04-24 03:15:15 +00:00
|
|
|
match msg.kind {
|
|
|
|
P2pMessageKind::Tributary(genesis) => {
|
|
|
|
let tributaries_read = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries_read.get(&genesis) else {
|
2023-04-24 04:53:15 +00:00
|
|
|
log::debug!("received p2p message for unknown network");
|
|
|
|
continue;
|
|
|
|
};
|
2023-04-24 03:15:15 +00:00
|
|
|
|
|
|
|
// This is misleading being read, as it will mutate the Tributary, yet there's
|
|
|
|
// greater efficiency when it is read
|
|
|
|
// The safety of it is also justified by Tributary::handle_message's documentation
|
|
|
|
if tributary.tributary.read().await.handle_message(&msg.msg).await {
|
|
|
|
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 04:53:15 +00:00
|
|
|
P2pMessageKind::Heartbeat(genesis) => {
|
|
|
|
let tributaries_read = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries_read.get(&genesis) else {
|
|
|
|
log::debug!("received hearttbeat message for unknown network");
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
|
|
|
|
if msg.msg.len() != 32 {
|
|
|
|
log::error!("validator sent invalid heartbeat");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let tributary_read = tributary.tributary.read().await;
|
|
|
|
|
|
|
|
// Have sqrt(n) nodes reply with the blocks
|
|
|
|
let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64;
|
|
|
|
// Try to have at least 3 responders
|
|
|
|
if responders < 3 {
|
|
|
|
responders = tributary.spec.n().min(3).into();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only respond to this if randomly chosen
|
|
|
|
let entropy = u64::from_le_bytes(tributary_read.tip().await[.. 8].try_into().unwrap());
|
|
|
|
// If n = 10, responders = 3, we want start to be 0 ..= 7 (so the highest is 7, 8, 9)
|
|
|
|
// entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7
|
|
|
|
let start =
|
|
|
|
usize::try_from(entropy % (u64::from(tributary.spec.n() + 1) - responders)).unwrap();
|
|
|
|
let mut selected = false;
|
|
|
|
for validator in
|
|
|
|
&tributary.spec.validators()[start .. (start + usize::try_from(responders).unwrap())]
|
|
|
|
{
|
|
|
|
if our_key == validator.0 {
|
|
|
|
selected = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !selected {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut latest = msg.msg.try_into().unwrap();
|
|
|
|
// TODO: All of these calls don't *actually* need a read lock, just access to a DB handle
|
|
|
|
// We can reduce lock contention accordingly
|
|
|
|
while let Some(next) = tributary_read.block_after(&latest) {
|
|
|
|
let mut res = tributary_read.block(&next).unwrap().serialize();
|
|
|
|
res.extend(tributary_read.commit(&next).unwrap());
|
|
|
|
p2p.send(msg.sender, P2pMessageKind::Block(tributary.spec.genesis()), res).await;
|
|
|
|
latest = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
P2pMessageKind::Block(genesis) => {
|
|
|
|
let mut msg_ref: &[u8] = msg.msg.as_ref();
|
|
|
|
let Ok(block) = Block::<Transaction>::read(&mut msg_ref) else {
|
|
|
|
log::error!("received block message with an invalidly serialized block");
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
// Get just the commit
|
|
|
|
msg.msg.drain((msg.msg.len() - msg_ref.len()) ..);
|
|
|
|
|
|
|
|
let tributaries = tributaries.read().await;
|
|
|
|
let Some(tributary) = tributaries.get(&genesis) else {
|
|
|
|
log::debug!("received block message for unknown network");
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
|
|
|
|
tributary.tributary.write().await.sync_block(block, msg.msg).await;
|
|
|
|
}
|
2023-04-23 20:56:23 +00:00
|
|
|
}
|
2023-04-20 09:05:17 +00:00
|
|
|
}
|
2023-04-24 03:15:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn run<D: Db, Pro: Processor, P: P2p>(
|
|
|
|
raw_db: D,
|
|
|
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
p2p: P,
|
|
|
|
processor: Pro,
|
|
|
|
serai: Serai,
|
|
|
|
) {
|
|
|
|
// Handle new Substrate blocks
|
|
|
|
tokio::spawn(scan_substrate(raw_db.clone(), key.clone(), processor.clone(), serai.clone()));
|
|
|
|
|
|
|
|
// Handle the Tributaries
|
|
|
|
|
|
|
|
// Arc so this can be shared between the Tributary scanner task and the P2P task
|
|
|
|
// Write locks on this may take a while to acquire
|
|
|
|
let tributaries = Arc::new(RwLock::new(HashMap::<[u8; 32], ActiveTributary<D, P>>::new()));
|
|
|
|
|
|
|
|
// Reload active tributaries from the database
|
|
|
|
// TODO: Can MainDb take a borrow?
|
|
|
|
for spec in MainDb(raw_db.clone()).active_tributaries().1 {
|
|
|
|
add_tributary(raw_db.clone(), key.clone(), p2p.clone(), &mut *tributaries.write().await, spec)
|
|
|
|
.await;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle new blocks for each Tributary
|
|
|
|
tokio::spawn(scan_tributaries(
|
|
|
|
raw_db.clone(),
|
|
|
|
key.clone(),
|
|
|
|
p2p.clone(),
|
|
|
|
processor,
|
|
|
|
tributaries.clone(),
|
|
|
|
));
|
|
|
|
|
|
|
|
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
|
|
|
|
// in a while (presumably because we're behind)
|
|
|
|
tokio::spawn(heartbeat_tributaries(p2p.clone(), tributaries.clone()));
|
|
|
|
|
|
|
|
// Handle P2P messages
|
|
|
|
// TODO: We also have to broadcast blocks once they're added
|
2023-04-24 04:53:15 +00:00
|
|
|
tokio::spawn(handle_p2p(Ristretto::generator() * key.deref(), p2p, tributaries));
|
2023-04-15 21:38:47 +00:00
|
|
|
|
2023-04-17 04:50:56 +00:00
|
|
|
loop {
|
2023-04-15 21:38:47 +00:00
|
|
|
// Handle all messages from processors
|
2023-04-17 04:50:56 +00:00
|
|
|
todo!()
|
2023-04-15 21:38:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-11 13:21:35 +00:00
|
|
|
#[tokio::main]
|
2023-04-15 21:38:47 +00:00
|
|
|
async fn main() {
|
2023-04-16 04:51:56 +00:00
|
|
|
let db = MemDb::new(); // TODO
|
2023-04-17 06:10:33 +00:00
|
|
|
|
2023-04-16 07:16:53 +00:00
|
|
|
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO); // TODO
|
2023-04-22 14:49:52 +00:00
|
|
|
let p2p = LocalP2p::new(1).swap_remove(0); // TODO
|
2023-04-17 06:10:33 +00:00
|
|
|
|
|
|
|
let processor = processor::MemProcessor::new(); // TODO
|
|
|
|
|
2023-04-16 04:51:56 +00:00
|
|
|
let serai = || async {
|
|
|
|
loop {
|
|
|
|
let Ok(serai) = Serai::new("ws://127.0.0.1:9944").await else {
|
|
|
|
log::error!("couldn't connect to the Serai node");
|
2023-04-17 06:10:33 +00:00
|
|
|
sleep(Duration::from_secs(5)).await;
|
2023-04-16 04:51:56 +00:00
|
|
|
continue
|
|
|
|
};
|
|
|
|
return serai;
|
|
|
|
}
|
|
|
|
};
|
2023-04-17 06:10:33 +00:00
|
|
|
run(db, key, p2p, processor, serai().await).await
|
2023-04-15 21:38:47 +00:00
|
|
|
}
|