Break coordinator main into multiple functions

Also moves from std::sync::RwLock to tokio::sync::RwLock to prevent wasting
cycles on spinning.
This commit is contained in:
Luke Parker 2023-04-23 23:15:15 -04:00
parent be8c25aef0
commit c476f9b640
No known key found for this signature in database
10 changed files with 263 additions and 245 deletions

View file

@ -42,206 +42,222 @@ lazy_static::lazy_static! {
static ref NEW_TRIBUTARIES: RwLock<VecDeque<TributarySpec>> = RwLock::new(VecDeque::new()); static ref NEW_TRIBUTARIES: RwLock<VecDeque<TributarySpec>> = RwLock::new(VecDeque::new());
} }
async fn run<D: Db, Pro: Processor, P: P2p>( // Specifies a new tributary
async fn create_new_tributary<D: Db>(db: D, spec: TributarySpec) {
// Save it to the database
MainDb(db).add_active_tributary(&spec);
// Add it to the queue
// If we reboot before this is read from the queue, the fact it was saved to the database
// means it'll be handled on reboot
NEW_TRIBUTARIES.write().await.push_back(spec);
}
pub struct ActiveTributary<D: Db, P: P2p> {
spec: TributarySpec,
tributary: Arc<RwLock<Tributary<D, Transaction, P>>>,
}
// Adds a tributary into the specified HahMap
async fn add_tributary<D: Db, P: P2p>(
db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P,
tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>,
spec: TributarySpec,
) {
let tributary = Tributary::<_, Transaction, _>::new(
// TODO: Use a db on a distinct volume
db,
spec.genesis(),
spec.start_time(),
key,
spec.validators(),
p2p,
)
.await
.unwrap();
tributaries.insert(
tributary.genesis(),
ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) },
);
}
pub async fn scan_substrate<D: Db, Pro: Processor>(
db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
mut processor: Pro,
serai: Serai,
) {
let mut db = substrate::SubstrateDb::new(db);
let mut last_substrate_block = db.last_block();
loop {
match substrate::handle_new_blocks(
&mut db,
&key,
create_new_tributary,
&mut processor,
&serai,
&mut last_substrate_block,
)
.await
{
// TODO: Should this use a notification system for new blocks?
// Right now it's sleeping for half the block time.
Ok(()) => sleep(Duration::from_secs(3)).await,
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
#[allow(clippy::type_complexity)]
pub async fn scan_tributaries<D: Db, Pro: Processor, P: P2p>(
raw_db: D, raw_db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P, p2p: P,
mut processor: Pro, mut processor: Pro,
serai: Serai, tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
) { ) {
let add_new_tributary = |db, spec: TributarySpec| async { // Handle new Tributary blocks
// Save it to the database let mut tributary_db = tributary::TributaryDb::new(raw_db.clone());
MainDb(db).add_active_tributary(&spec); loop {
// Add it to the queue // The following handle_new_blocks function may take an arbitrary amount of time
// If we reboot before this is read from the queue, the fact it was saved to the database // Accordingly, it may take a long time to acquire a write lock on the tributaries table
// means it'll be handled on reboot // By definition of NEW_TRIBUTARIES, we allow tributaries to be added almost immediately,
NEW_TRIBUTARIES.write().await.push_back(spec); // meaning the Substrate scanner won't become blocked on this
}; {
let mut new_tributaries = NEW_TRIBUTARIES.write().await;
// Handle new Substrate blocks while let Some(spec) = new_tributaries.pop_front() {
{ add_tributary(
let mut substrate_db = substrate::SubstrateDb::new(raw_db.clone()); raw_db.clone(),
let mut last_substrate_block = substrate_db.last_block(); key.clone(),
p2p.clone(),
let key = key.clone(); // This is a short-lived write acquisition, which is why it should be fine
let mut processor = processor.clone(); &mut *tributaries.write().await,
tokio::spawn(async move { spec,
loop {
match substrate::handle_new_blocks(
&mut substrate_db,
&key,
add_new_tributary,
&mut processor,
&serai,
&mut last_substrate_block,
) )
.await .await;
{
// TODO: Should this use a notification system for new blocks?
// Right now it's sleeping for half the block time.
Ok(()) => sleep(Duration::from_secs(3)).await,
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
} }
});
}
// Handle the Tributaries
{
struct ActiveTributary<D: Db, P: P2p> {
spec: TributarySpec,
tributary: Arc<RwLock<Tributary<D, Transaction, P>>>,
} }
// Arc so this can be shared between the Tributary scanner task and the P2P task // TODO: Instead of holding this lock long term, should this take in Arc RwLock and
// Write locks on this may take a while to acquire // re-acquire read locks?
let tributaries = Arc::new(RwLock::new(HashMap::<[u8; 32], ActiveTributary<D, P>>::new())); for ActiveTributary { spec, tributary } in tributaries.read().await.values() {
tributary::scanner::handle_new_blocks::<_, _, P>(
async fn add_tributary<D: Db, P: P2p>( &mut tributary_db,
db: D, &key,
key: Zeroizing<<Ristretto as Ciphersuite>::F>, &mut processor,
p2p: P,
tributaries: &mut HashMap<[u8; 32], ActiveTributary<D, P>>,
spec: TributarySpec,
) {
let tributary = Tributary::<_, Transaction, _>::new(
// TODO: Use a db on a distinct volume
db,
spec.genesis(),
spec.start_time(),
key,
spec.validators(),
p2p,
)
.await
.unwrap();
tributaries.insert(
tributary.genesis(),
ActiveTributary { spec, tributary: Arc::new(RwLock::new(tributary)) },
);
}
// Reload active tributaries from the database
// TODO: Can MainDb take a borrow?
for spec in MainDb(raw_db.clone()).active_tributaries().1 {
add_tributary(
raw_db.clone(),
key.clone(),
p2p.clone(),
&mut *tributaries.write().await,
spec, spec,
&*tributary.read().await,
) )
.await; .await;
} }
// Handle new Tributary blocks // Sleep for half the block time
let mut tributary_db = tributary::TributaryDb::new(raw_db.clone()); // TODO: Should we define a notification system for when a new block occurs?
{ sleep(Duration::from_secs((Tributary::<D, Transaction, P>::block_time() / 2).into())).await;
let tributaries = tributaries.clone(); }
let p2p = p2p.clone(); }
tokio::spawn(async move {
loop {
// The following handle_new_blocks function may take an arbitrary amount of time
// Accordingly, it may take a long time to acquire a write lock on the tributaries table
// By definition of NEW_TRIBUTARIES, we allow tributaries to be added almost immediately,
// meaning the Substrate scanner won't become blocked on this
{
let mut new_tributaries = NEW_TRIBUTARIES.write().await;
while let Some(spec) = new_tributaries.pop_front() {
add_tributary(
raw_db.clone(),
key.clone(),
p2p.clone(),
// This is a short-lived write acquisition, which is why it should be fine
&mut *tributaries.write().await,
spec,
)
.await;
}
}
// TODO: Instead of holding this lock long term, should this take in Arc RwLock and #[allow(clippy::type_complexity)]
// re-acquire read locks? pub async fn heartbeat_tributaries<D: Db, P: P2p>(
for ActiveTributary { spec, tributary } in tributaries.read().await.values() { p2p: P,
tributary::scanner::handle_new_blocks::<_, _, P>( tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
&mut tributary_db, ) {
&key, let ten_blocks_of_time =
&mut processor, Duration::from_secs((Tributary::<D, Transaction, P>::block_time() * 10).into());
spec,
&*tributary.read().await,
)
.await;
}
// Sleep for half the block time loop {
// TODO: Should we define a notification system for when a new block occurs? for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() {
sleep(Duration::from_secs((Tributary::<D, Transaction, P>::block_time() / 2).into())) let tributary = tributary.read().await;
.await; let tip = tributary.tip().await;
} let block_time = SystemTime::UNIX_EPOCH +
}); Duration::from_secs(tributary.time_of_block(&tip).await.unwrap_or(0));
// Only trigger syncing if the block is more than a minute behind
if SystemTime::now() > (block_time + Duration::from_secs(60)) {
log::warn!("last known tributary block was over a minute ago");
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), tip.to_vec()).await;
}
} }
// If a Tributary has fallen behind, trigger syncing // Only check once every 10 blocks of time
{ sleep(ten_blocks_of_time).await;
let p2p = p2p.clone(); }
let tributaries = tributaries.clone(); }
tokio::spawn(async move {
let ten_blocks_of_time =
Duration::from_secs((Tributary::<D, Transaction, P>::block_time() * 10).into());
loop { #[allow(clippy::type_complexity)]
for ActiveTributary { spec: _, tributary } in tributaries.read().await.values() { pub async fn handle_p2p<D: Db, P: P2p>(
let tributary = tributary.read().await; p2p: P,
let tip = tributary.tip(); tributaries: Arc<RwLock<HashMap<[u8; 32], ActiveTributary<D, P>>>>,
let block_time = SystemTime::UNIX_EPOCH + ) {
Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); loop {
let msg = p2p.receive().await;
match msg.kind {
P2pMessageKind::Tributary(genesis) => {
let tributaries_read = tributaries.read().await;
let Some(tributary) = tributaries_read.get(&genesis) else {
log::debug!("received p2p message for unknown network");
continue;
};
// Only trigger syncing if the block is more than a minute behind // This is misleading being read, as it will mutate the Tributary, yet there's
if SystemTime::now() > (block_time + Duration::from_secs(60)) { // greater efficiency when it is read
log::warn!("last known tributary block was over a minute ago"); // The safety of it is also justified by Tributary::handle_message's documentation
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), tip.to_vec()) if tributary.tributary.read().await.handle_message(&msg.msg).await {
.await; P2p::broadcast(&p2p, msg.kind, msg.msg).await;
}
}
// Only check once every 10 blocks of time
sleep(ten_blocks_of_time).await;
} }
}); }
}
// Handle P2P messages // TODO: Respond with the missing block, if there are any
{ P2pMessageKind::Heartbeat(genesis) => todo!(),
tokio::spawn(async move {
loop {
let msg = p2p.receive().await;
match msg.kind {
P2pMessageKind::Tributary(genesis) => {
let tributaries_read = tributaries.read().await;
let Some(tributary) = tributaries_read.get(&genesis) else {
log::debug!("received p2p message for unknown network");
continue;
};
// This is misleading being read, as it will mutate the Tributary, yet there's
// greater efficiency when it is read
// The safety of it is also justified by Tributary::handle_message's documentation
if tributary.tributary.read().await.handle_message(&msg.msg).await {
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
}
}
// TODO: Respond with the missing block, if there are any
P2pMessageKind::Heartbeat(genesis) => todo!(),
}
}
});
} }
} }
}
pub async fn run<D: Db, Pro: Processor, P: P2p>(
raw_db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P,
processor: Pro,
serai: Serai,
) {
// Handle new Substrate blocks
tokio::spawn(scan_substrate(raw_db.clone(), key.clone(), processor.clone(), serai.clone()));
// Handle the Tributaries
// Arc so this can be shared between the Tributary scanner task and the P2P task
// Write locks on this may take a while to acquire
let tributaries = Arc::new(RwLock::new(HashMap::<[u8; 32], ActiveTributary<D, P>>::new()));
// Reload active tributaries from the database
// TODO: Can MainDb take a borrow?
for spec in MainDb(raw_db.clone()).active_tributaries().1 {
add_tributary(raw_db.clone(), key.clone(), p2p.clone(), &mut *tributaries.write().await, spec)
.await;
}
// Handle new blocks for each Tributary
tokio::spawn(scan_tributaries(
raw_db.clone(),
key.clone(),
p2p.clone(),
processor,
tributaries.clone(),
));
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
// in a while (presumably because we're behind)
tokio::spawn(heartbeat_tributaries(p2p.clone(), tributaries.clone()));
// Handle P2P messages
// TODO: We also have to broadcast blocks once they're added
tokio::spawn(handle_p2p(p2p, tributaries));
loop { loop {
// Handle all messages from processors // Handle all messages from processors

View file

@ -1,12 +1,10 @@
use core::fmt::Debug; use core::fmt::Debug;
use std::{ use std::{sync::Arc, io::Read, collections::VecDeque};
sync::{Arc, RwLock},
io::Read,
collections::VecDeque,
};
use async_trait::async_trait; use async_trait::async_trait;
use tokio::sync::RwLock;
pub use tributary::P2p as TributaryP2p; pub use tributary::P2p as TributaryP2p;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
@ -94,6 +92,7 @@ pub trait P2p: Send + Sync + Clone + Debug + TributaryP2p {
} }
} }
// TODO: Move this to tests
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct LocalP2p(usize, Arc<RwLock<Vec<VecDeque<(usize, Vec<u8>)>>>>); pub struct LocalP2p(usize, Arc<RwLock<Vec<VecDeque<(usize, Vec<u8>)>>>>);
@ -114,11 +113,11 @@ impl P2p for LocalP2p {
type Id = usize; type Id = usize;
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) { async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
self.1.write().unwrap()[to].push_back((self.0, msg)); self.1.write().await[to].push_back((self.0, msg));
} }
async fn broadcast_raw(&self, msg: Vec<u8>) { async fn broadcast_raw(&self, msg: Vec<u8>) {
for (i, msg_queue) in self.1.write().unwrap().iter_mut().enumerate() { for (i, msg_queue) in self.1.write().await.iter_mut().enumerate() {
if i == self.0 { if i == self.0 {
continue; continue;
} }
@ -129,7 +128,7 @@ impl P2p for LocalP2p {
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) { async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
// This is a cursed way to implement an async read from a Vec // This is a cursed way to implement an async read from a Vec
loop { loop {
if let Some(res) = self.1.write().unwrap()[self.0].pop_front() { if let Some(res) = self.1.write().await[self.0].pop_front() {
return res; return res;
} }
tokio::time::sleep(std::time::Duration::from_millis(100)).await; tokio::time::sleep(std::time::Duration::from_millis(100)).await;

View file

@ -1,7 +1,6 @@
use std::{ use std::{sync::Arc, collections::VecDeque};
sync::{Arc, RwLock},
collections::VecDeque, use tokio::sync::RwLock;
};
use processor_messages::{ProcessorMessage, CoordinatorMessage}; use processor_messages::{ProcessorMessage, CoordinatorMessage};
@ -31,7 +30,7 @@ impl MemProcessor {
#[async_trait::async_trait] #[async_trait::async_trait]
impl Processor for MemProcessor { impl Processor for MemProcessor {
async fn send(&mut self, msg: CoordinatorMessage) { async fn send(&mut self, msg: CoordinatorMessage) {
self.0.write().unwrap().push_back(msg) self.0.write().await.push_back(msg)
} }
async fn recv(&mut self) -> Message { async fn recv(&mut self) -> Message {
todo!() todo!()

View file

@ -118,20 +118,20 @@ pub async fn wait_for_tx_inclusion(
hash: [u8; 32], hash: [u8; 32],
) -> [u8; 32] { ) -> [u8; 32] {
loop { loop {
let tip = tributary.tip(); let tip = tributary.tip().await;
if tip == last_checked { if tip == last_checked {
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
continue; continue;
} }
let mut queue = vec![tributary.block(&tip).unwrap()]; let mut queue = vec![tributary.block(&tip).await.unwrap()];
let mut block = None; let mut block = None;
while { while {
let parent = queue.last().unwrap().parent(); let parent = queue.last().unwrap().parent();
if parent == tributary.genesis() { if parent == tributary.genesis() {
false false
} else { } else {
block = Some(tributary.block(&parent).unwrap()); block = Some(tributary.block(&parent).await.unwrap());
block.as_ref().unwrap().hash() != last_checked block.as_ref().unwrap().hash() != last_checked
} }
} { } {
@ -176,7 +176,7 @@ async fn tributary_test() {
} }
} }
let tip = tributaries[0].1.tip(); let tip = tributaries[0].1.tip().await;
if tip != last_block { if tip != last_block {
last_block = tip; last_block = tip;
blocks += 1; blocks += 1;
@ -202,11 +202,18 @@ async fn tributary_test() {
} }
} }
// handle_message informed the Tendermint machine, yet it still has to process it
// Sleep for a second accordingly
// TODO: Is there a better way to handle this?
sleep(Duration::from_secs(1)).await;
// All tributaries should agree on the tip // All tributaries should agree on the tip
let mut final_block = None; let mut final_block = None;
for (_, tributary) in tributaries { for (_, tributary) in tributaries {
final_block = final_block.or_else(|| Some(tributary.tip())); if final_block.is_none() {
if tributary.tip() != final_block.unwrap() { final_block = Some(tributary.tip().await);
}
if tributary.tip().await != final_block.unwrap() {
panic!("tributary had different tip"); panic!("tributary had different tip");
} }
} }

View file

@ -47,7 +47,7 @@ async fn dkg_test() {
txs.push(tx); txs.push(tx);
} }
let block_before_tx = tributaries[0].1.tip(); let block_before_tx = tributaries[0].1.tip().await;
// Publish all commitments but one // Publish all commitments but one
for (i, tx) in txs.iter().enumerate().skip(1) { for (i, tx) in txs.iter().enumerate().skip(1) {
@ -87,10 +87,10 @@ async fn dkg_test() {
// Instantiate a scanner and verify it has nothing to report // Instantiate a scanner and verify it has nothing to report
let (mut scanner_db, mut processor) = new_processor(&keys[0], &spec, &tributaries[0].1).await; let (mut scanner_db, mut processor) = new_processor(&keys[0], &spec, &tributaries[0].1).await;
assert!(processor.0.read().unwrap().is_empty()); assert!(processor.0.read().await.is_empty());
// Publish the last commitment // Publish the last commitment
let block_before_tx = tributaries[0].1.tip(); let block_before_tx = tributaries[0].1.tip().await;
assert!(tributaries[0].1.add_transaction(txs[0].clone()).await); assert!(tributaries[0].1.add_transaction(txs[0].clone()).await);
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await; sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
@ -98,7 +98,7 @@ async fn dkg_test() {
// Verify the scanner emits a KeyGen::Commitments message // Verify the scanner emits a KeyGen::Commitments message
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await;
{ {
let mut msgs = processor.0.write().unwrap(); let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), expected_commitments); assert_eq!(msgs.pop_front().unwrap(), expected_commitments);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
@ -106,7 +106,7 @@ async fn dkg_test() {
// Verify all keys exhibit this scanner behavior // Verify all keys exhibit this scanner behavior
for (i, key) in keys.iter().enumerate() { for (i, key) in keys.iter().enumerate() {
let (_, processor) = new_processor(key, &spec, &tributaries[i].1).await; let (_, processor) = new_processor(key, &spec, &tributaries[i].1).await;
let mut msgs = processor.0.write().unwrap(); let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), expected_commitments); assert_eq!(msgs.pop_front().unwrap(), expected_commitments);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
@ -128,7 +128,7 @@ async fn dkg_test() {
txs.push(tx); txs.push(tx);
} }
let block_before_tx = tributaries[0].1.tip(); let block_before_tx = tributaries[0].1.tip().await;
for (i, tx) in txs.iter().enumerate().skip(1) { for (i, tx) in txs.iter().enumerate().skip(1) {
assert!(tributaries[i].1.add_transaction(tx.clone()).await); assert!(tributaries[i].1.add_transaction(tx.clone()).await);
} }
@ -138,10 +138,10 @@ async fn dkg_test() {
// With just 4 sets of shares, nothing should happen yet // With just 4 sets of shares, nothing should happen yet
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await;
assert!(processor.0.write().unwrap().is_empty()); assert!(processor.0.write().await.is_empty());
// Publish the final set of shares // Publish the final set of shares
let block_before_tx = tributaries[0].1.tip(); let block_before_tx = tributaries[0].1.tip().await;
assert!(tributaries[0].1.add_transaction(txs[0].clone()).await); assert!(tributaries[0].1.add_transaction(txs[0].clone()).await);
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await; sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
@ -170,7 +170,7 @@ async fn dkg_test() {
// Any scanner which has handled the prior blocks should only emit the new event // Any scanner which has handled the prior blocks should only emit the new event
handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await; handle_new_blocks(&mut scanner_db, &keys[0], &mut processor, &spec, &tributaries[0].1).await;
{ {
let mut msgs = processor.0.write().unwrap(); let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), shares_for(0)); assert_eq!(msgs.pop_front().unwrap(), shares_for(0));
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
@ -178,7 +178,7 @@ async fn dkg_test() {
// Yet new scanners should emit all events // Yet new scanners should emit all events
for (i, key) in keys.iter().enumerate() { for (i, key) in keys.iter().enumerate() {
let (_, processor) = new_processor(key, &spec, &tributaries[i].1).await; let (_, processor) = new_processor(key, &spec, &tributaries[i].1).await;
let mut msgs = processor.0.write().unwrap(); let mut msgs = processor.0.write().await;
assert_eq!(msgs.pop_front().unwrap(), expected_commitments); assert_eq!(msgs.pop_front().unwrap(), expected_commitments);
assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty()); assert!(msgs.is_empty());

View file

@ -34,7 +34,7 @@ async fn tx_test() {
OsRng.fill_bytes(&mut commitments); OsRng.fill_bytes(&mut commitments);
// Create the TX with a null signature so we can get its sig hash // Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip(); let block_before_tx = tributaries[sender].1.tip().await;
let mut tx = let mut tx =
Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed()); Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), &key, 0); tx.sign(&mut OsRng, spec.genesis(), &key, 0);
@ -46,7 +46,7 @@ async fn tx_test() {
// All tributaries should have acknowledged this transaction in a block // All tributaries should have acknowledged this transaction in a block
for (_, tributary) in tributaries { for (_, tributary) in tributaries {
let block = tributary.block(&included_in).unwrap(); let block = tributary.block(&included_in).await.unwrap();
assert_eq!(block.transactions, vec![tx.clone()]); assert_eq!(block.transactions, vec![tx.clone()]);
} }
} }

View file

@ -300,14 +300,14 @@ pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
let last_block = db.last_block(tributary.genesis()); let last_block = db.last_block(tributary.genesis());
// Check if there's been a new Tributary block // Check if there's been a new Tributary block
let latest = tributary.tip(); let latest = tributary.tip().await;
if latest == last_block { if latest == last_block {
return; return;
} }
let mut blocks = VecDeque::new(); let mut blocks = VecDeque::new();
// This is a new block, as per the prior if check // This is a new block, as per the prior if check
blocks.push_back(tributary.block(&latest).unwrap()); blocks.push_back(tributary.block(&latest).await.unwrap());
let mut block = None; let mut block = None;
while { while {
@ -317,7 +317,7 @@ pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
false false
} else { } else {
// Get this block // Get this block
block = Some(tributary.block(&parent).unwrap()); block = Some(tributary.block(&parent).await.unwrap());
// If it's the last block we've scanned, it's the end. Else, push it // If it's the last block we've scanned, it's the end. Else, push it
block.as_ref().unwrap().hash() != last_block block.as_ref().unwrap().hash() != last_block
} }

View file

@ -34,8 +34,5 @@ tendermint = { package = "tendermint-machine", path = "./tendermint" }
tokio = { version = "1", features = ["macros", "sync", "time", "rt"] } tokio = { version = "1", features = ["macros", "sync", "time", "rt"] }
[dev-dependencies]
zeroize = "^1.5"
[features] [features]
tests = [] tests = []

View file

@ -1,8 +1,5 @@
use core::fmt::Debug; use core::fmt::Debug;
use std::{ use std::{sync::Arc, io};
sync::{Arc, RwLock},
io,
};
use async_trait::async_trait; use async_trait::async_trait;
@ -20,7 +17,7 @@ use ::tendermint::{
use serai_db::Db; use serai_db::Db;
use tokio::sync::RwLock as AsyncRwLock; use tokio::sync::RwLock;
mod merkle; mod merkle;
pub(crate) use merkle::*; pub(crate) use merkle::*;
@ -90,7 +87,7 @@ pub struct Tributary<D: Db, T: Transaction, P: P2p> {
network: TendermintNetwork<D, T, P>, network: TendermintNetwork<D, T, P>,
synced_block: SyncedBlockSender<TendermintNetwork<D, T, P>>, synced_block: SyncedBlockSender<TendermintNetwork<D, T, P>>,
messages: Arc<AsyncRwLock<MessageSender<TendermintNetwork<D, T, P>>>>, messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
} }
impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> { impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
@ -124,7 +121,7 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
TendermintMachine::new(network.clone(), block_number, start_time, proposal).await; TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
tokio::task::spawn(machine.run()); tokio::task::spawn(machine.run());
Some(Self { genesis, network, synced_block, messages: Arc::new(AsyncRwLock::new(messages)) }) Some(Self { genesis, network, synced_block, messages: Arc::new(RwLock::new(messages)) })
} }
pub fn block_time() -> u32 { pub fn block_time() -> u32 {
@ -134,34 +131,37 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
pub fn genesis(&self) -> [u8; 32] { pub fn genesis(&self) -> [u8; 32] {
self.genesis self.genesis
} }
pub fn block_number(&self) -> u32 {
self.network.blockchain.read().unwrap().block_number() // TODO: block, time_of_block, and commit shouldn't require acquiring the read lock
// These values can be safely read directly from the database since they're static
pub async fn block_number(&self) -> u32 {
self.network.blockchain.read().await.block_number()
} }
pub fn tip(&self) -> [u8; 32] { pub async fn tip(&self) -> [u8; 32] {
self.network.blockchain.read().unwrap().tip() self.network.blockchain.read().await.tip()
} }
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> { pub async fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
self.network.blockchain.read().unwrap().block(hash) self.network.blockchain.read().await.block(hash)
} }
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> { pub async fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
self self
.network .network
.blockchain .blockchain
.read() .read()
.unwrap() .await
.commit(hash) .commit(hash)
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time) .map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
} }
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> { pub async fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
self.network.blockchain.read().unwrap().commit(hash) self.network.blockchain.read().await.commit(hash)
} }
pub fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> { pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
self.network.blockchain.write().unwrap().provide_transaction(tx) self.network.blockchain.write().await.provide_transaction(tx)
} }
pub fn next_nonce(&self, signer: <Ristretto as Ciphersuite>::G) -> Option<u32> { pub async fn next_nonce(&self, signer: <Ristretto as Ciphersuite>::G) -> Option<u32> {
self.network.blockchain.read().unwrap().next_nonce(signer) self.network.blockchain.read().await.next_nonce(signer)
} }
// Returns if the transaction was valid. // Returns if the transaction was valid.
@ -170,7 +170,7 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
pub async fn add_transaction(&self, tx: T) -> bool { pub async fn add_transaction(&self, tx: T) -> bool {
let mut to_broadcast = vec![TRANSACTION_MESSAGE]; let mut to_broadcast = vec![TRANSACTION_MESSAGE];
tx.write(&mut to_broadcast).unwrap(); tx.write(&mut to_broadcast).unwrap();
let res = self.network.blockchain.write().unwrap().add_transaction(true, tx); let res = self.network.blockchain.write().await.add_transaction(true, tx);
if res { if res {
self.network.p2p.broadcast(self.genesis, to_broadcast).await; self.network.p2p.broadcast(self.genesis, to_broadcast).await;
} }
@ -181,7 +181,7 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
// TODO: Since we have a static validator set, we should only need the tail commit? // TODO: Since we have a static validator set, we should only need the tail commit?
pub async fn sync_block(&mut self, block: Block<T>, commit: Vec<u8>) -> bool { pub async fn sync_block(&mut self, block: Block<T>, commit: Vec<u8>) -> bool {
let (tip, block_number) = { let (tip, block_number) = {
let blockchain = self.network.blockchain.read().unwrap(); let blockchain = self.network.blockchain.read().await;
(blockchain.tip(), blockchain.block_number()) (blockchain.tip(), blockchain.block_number())
}; };
@ -215,7 +215,7 @@ impl<D: Db, T: Transaction, P: P2p> Tributary<D, T, P> {
// TODO: Sync mempools with fellow peers // TODO: Sync mempools with fellow peers
// Can we just rebroadcast transactions not included for at least two blocks? // Can we just rebroadcast transactions not included for at least two blocks?
let res = self.network.blockchain.write().unwrap().add_transaction(false, tx); let res = self.network.blockchain.write().await.add_transaction(false, tx);
log::debug!("received transaction message. valid new transaction: {res}"); log::debug!("received transaction message. valid new transaction: {res}");
res res
} }

View file

@ -1,8 +1,5 @@
use core::ops::Deref; use core::ops::Deref;
use std::{ use std::{sync::Arc, collections::HashMap};
sync::{Arc, RwLock},
collections::HashMap,
};
use async_trait::async_trait; use async_trait::async_trait;
@ -34,7 +31,10 @@ use tendermint::{
}, },
}; };
use tokio::time::{Duration, sleep}; use tokio::{
sync::RwLock,
time::{Duration, sleep},
};
use crate::{ use crate::{
TENDERMINT_MESSAGE, ReadWrite, Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, TENDERMINT_MESSAGE, ReadWrite, Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
@ -273,7 +273,7 @@ impl<D: Db, T: Transaction, P: P2p> Network for TendermintNetwork<D, T, P> {
async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> { async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> {
let block = let block =
Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;
self.blockchain.read().unwrap().verify_block(&block).map_err(|e| match e { self.blockchain.read().await.verify_block(&block).map_err(|e| match e {
BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,
_ => TendermintBlockError::Fatal, _ => TendermintBlockError::Fatal,
}) })
@ -301,7 +301,7 @@ impl<D: Db, T: Transaction, P: P2p> Network for TendermintNetwork<D, T, P> {
}; };
loop { loop {
let block_res = self.blockchain.write().unwrap().add_block(&block, commit.encode()); let block_res = self.blockchain.write().await.add_block(&block, commit.encode());
match block_res { match block_res {
Ok(()) => break, Ok(()) => break,
Err(BlockError::NonLocalProvided(hash)) => { Err(BlockError::NonLocalProvided(hash)) => {
@ -316,6 +316,6 @@ impl<D: Db, T: Transaction, P: P2p> Network for TendermintNetwork<D, T, P> {
} }
} }
Some(TendermintBlock(self.blockchain.write().unwrap().build_block().serialize())) Some(TendermintBlock(self.blockchain.write().await.build_block().serialize()))
} }
} }