mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-22 10:44:53 +00:00
Further tweaks re: retiry
This commit is contained in:
parent
3b3fdd104b
commit
e4adaa8947
3 changed files with 18 additions and 8 deletions
|
@ -791,11 +791,14 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
|||
});
|
||||
|
||||
// When we reach synchrony on an event requiring signing, send our preprocess for it
|
||||
// TODO: Properly place this into the Tributary scanner, as it's a mess out here
|
||||
let recognized_id = {
|
||||
let raw_db = raw_db.clone();
|
||||
let key = key.clone();
|
||||
|
||||
let tributaries = Arc::new(RwLock::new(HashMap::new()));
|
||||
// Spawn a task to maintain a local view of the tributaries for whenever recognized_id is
|
||||
// called
|
||||
tokio::spawn({
|
||||
let tributaries = tributaries.clone();
|
||||
let mut set_to_genesis = HashMap::new();
|
||||
|
|
|
@ -450,6 +450,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
|||
let (send, mut recv) = mpsc::unbounded_channel();
|
||||
channels.write().await.insert(genesis, send);
|
||||
|
||||
// Per-Tributary P2P message handler
|
||||
tokio::spawn({
|
||||
let p2p = p2p.clone();
|
||||
async move {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use core::future::Future;
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
@ -43,7 +43,6 @@ impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, [u8; 32], u32
|
|||
}
|
||||
|
||||
// Handle a specific Tributary block
|
||||
#[allow(clippy::needless_pass_by_ref_mut)] // False positive?
|
||||
async fn handle_block<
|
||||
D: Db,
|
||||
Pro: Processors,
|
||||
|
@ -192,6 +191,11 @@ pub(crate) async fn scan_tributaries_task<
|
|||
let reader = tributary.reader();
|
||||
let mut tributary_db = TributaryDb::new(raw_db.clone());
|
||||
loop {
|
||||
// Check if the set was retired, and if so, don't further operate
|
||||
if crate::MainDb::<D>::is_tributary_retired(&raw_db, spec.set()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Obtain the next block notification now to prevent obtaining it immediately after
|
||||
// the next block occurs
|
||||
let next_block_notification = tributary.next_block_notification().await;
|
||||
|
@ -256,16 +260,18 @@ pub(crate) async fn scan_tributaries_task<
|
|||
)
|
||||
.await;
|
||||
|
||||
next_block_notification
|
||||
.await
|
||||
.map_err(|_| "")
|
||||
.expect("tributary dropped its notifications?");
|
||||
// Run either when the notification fires, or every interval of block_time
|
||||
let _ = tokio::time::timeout(
|
||||
Duration::from_secs(tributary::Tributary::<D, Transaction, P>::block_time().into()),
|
||||
next_block_notification,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
// TODO
|
||||
Ok(crate::TributaryEvent::TributaryRetired(_)) => todo!(),
|
||||
// The above loop simply checks the DB every few seconds, voiding the need for this event
|
||||
Ok(crate::TributaryEvent::TributaryRetired(_)) => {}
|
||||
Err(broadcast::error::RecvError::Lagged(_)) => {
|
||||
panic!("scan_tributaries lagged to handle tributary_event")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue