Further tweaks re: retiry

This commit is contained in:
Luke Parker 2023-10-14 19:55:14 -04:00
parent 3b3fdd104b
commit e4adaa8947
No known key found for this signature in database
3 changed files with 18 additions and 8 deletions

View file

@ -791,11 +791,14 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
}); });
// When we reach synchrony on an event requiring signing, send our preprocess for it // When we reach synchrony on an event requiring signing, send our preprocess for it
// TODO: Properly place this into the Tributary scanner, as it's a mess out here
let recognized_id = { let recognized_id = {
let raw_db = raw_db.clone(); let raw_db = raw_db.clone();
let key = key.clone(); let key = key.clone();
let tributaries = Arc::new(RwLock::new(HashMap::new())); let tributaries = Arc::new(RwLock::new(HashMap::new()));
// Spawn a task to maintain a local view of the tributaries for whenever recognized_id is
// called
tokio::spawn({ tokio::spawn({
let tributaries = tributaries.clone(); let tributaries = tributaries.clone();
let mut set_to_genesis = HashMap::new(); let mut set_to_genesis = HashMap::new();

View file

@ -450,6 +450,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
let (send, mut recv) = mpsc::unbounded_channel(); let (send, mut recv) = mpsc::unbounded_channel();
channels.write().await.insert(genesis, send); channels.write().await.insert(genesis, send);
// Per-Tributary P2P message handler
tokio::spawn({ tokio::spawn({
let p2p = p2p.clone(); let p2p = p2p.clone();
async move { async move {

View file

@ -1,4 +1,4 @@
use core::future::Future; use core::{future::Future, time::Duration};
use std::sync::Arc; use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
@ -43,7 +43,6 @@ impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, [u8; 32], u32
} }
// Handle a specific Tributary block // Handle a specific Tributary block
#[allow(clippy::needless_pass_by_ref_mut)] // False positive?
async fn handle_block< async fn handle_block<
D: Db, D: Db,
Pro: Processors, Pro: Processors,
@ -192,6 +191,11 @@ pub(crate) async fn scan_tributaries_task<
let reader = tributary.reader(); let reader = tributary.reader();
let mut tributary_db = TributaryDb::new(raw_db.clone()); let mut tributary_db = TributaryDb::new(raw_db.clone());
loop { loop {
// Check if the set was retired, and if so, don't further operate
if crate::MainDb::<D>::is_tributary_retired(&raw_db, spec.set()) {
break;
}
// Obtain the next block notification now to prevent obtaining it immediately after // Obtain the next block notification now to prevent obtaining it immediately after
// the next block occurs // the next block occurs
let next_block_notification = tributary.next_block_notification().await; let next_block_notification = tributary.next_block_notification().await;
@ -256,16 +260,18 @@ pub(crate) async fn scan_tributaries_task<
) )
.await; .await;
next_block_notification // Run either when the notification fires, or every interval of block_time
.await let _ = tokio::time::timeout(
.map_err(|_| "") Duration::from_secs(tributary::Tributary::<D, Transaction, P>::block_time().into()),
.expect("tributary dropped its notifications?"); next_block_notification,
)
.await;
} }
} }
}); });
} }
// TODO // The above loop simply checks the DB every few seconds, voiding the need for this event
Ok(crate::TributaryEvent::TributaryRetired(_)) => todo!(), Ok(crate::TributaryEvent::TributaryRetired(_)) => {}
Err(broadcast::error::RecvError::Lagged(_)) => { Err(broadcast::error::RecvError::Lagged(_)) => {
panic!("scan_tributaries lagged to handle tributary_event") panic!("scan_tributaries lagged to handle tributary_event")
} }