mirror of
https://github.com/serai-dex/serai.git
synced 2024-11-17 01:17:36 +00:00
Move recognized_id from a channel to an async lambda
Fixes a race condition. Also fixes recognizing batch IDs.
This commit is contained in:
parent
ea8e26eca3
commit
32df302cc4
5 changed files with 128 additions and 110 deletions
|
@ -68,7 +68,7 @@ impl<'a, D: Db> MainDb<'a, D> {
|
||||||
) {
|
) {
|
||||||
let key = Self::batches_in_block_key(network, block.0);
|
let key = Self::batches_in_block_key(network, block.0);
|
||||||
let Some(mut existing) = txn.get(&key) else {
|
let Some(mut existing) = txn.get(&key) else {
|
||||||
txn.put(&key, block.0);
|
txn.put(&key, id);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ impl<'a, D: Db> MainDb<'a, D> {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
existing.extend(block.0);
|
existing.extend(id);
|
||||||
txn.put(&key, existing);
|
txn.put(&key, existing);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#![allow(unreachable_code)]
|
#![allow(unreachable_code)]
|
||||||
#![allow(clippy::diverging_sub_expression)]
|
#![allow(clippy::diverging_sub_expression)]
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::{ops::Deref, future::Future};
|
||||||
use std::{
|
use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{SystemTime, Duration},
|
time::{SystemTime, Duration},
|
||||||
|
@ -21,13 +21,7 @@ use serai_client::{primitives::NetworkId, Public, Serai};
|
||||||
|
|
||||||
use message_queue::{Service, client::MessageQueue};
|
use message_queue::{Service, client::MessageQueue};
|
||||||
|
|
||||||
use tokio::{
|
use tokio::{sync::RwLock, time::sleep};
|
||||||
sync::{
|
|
||||||
mpsc::{self, UnboundedSender},
|
|
||||||
RwLock,
|
|
||||||
},
|
|
||||||
time::sleep,
|
|
||||||
};
|
|
||||||
|
|
||||||
use ::tributary::{
|
use ::tributary::{
|
||||||
ReadWrite, ProvidedError, TransactionKind, TransactionTrait, Block, Tributary, TributaryReader,
|
ReadWrite, ProvidedError, TransactionKind, TransactionTrait, Block, Tributary, TributaryReader,
|
||||||
|
@ -143,10 +137,16 @@ pub async fn scan_substrate<D: Db, Pro: Processors>(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub async fn scan_tributaries<D: Db, Pro: Processors, P: P2p>(
|
pub async fn scan_tributaries<
|
||||||
|
D: Db,
|
||||||
|
Pro: Processors,
|
||||||
|
P: P2p,
|
||||||
|
FRid: Future<Output = Vec<[u8; 32]>>,
|
||||||
|
RID: Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32]) -> FRid,
|
||||||
|
>(
|
||||||
raw_db: D,
|
raw_db: D,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id_send: UnboundedSender<(NetworkId, [u8; 32], RecognizedIdType, [u8; 32])>,
|
recognized_id: RID,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
processors: Pro,
|
processors: Pro,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
|
@ -184,10 +184,10 @@ pub async fn scan_tributaries<D: Db, Pro: Processors, P: P2p>(
|
||||||
}
|
}
|
||||||
|
|
||||||
for (spec, reader) in &tributary_readers {
|
for (spec, reader) in &tributary_readers {
|
||||||
tributary::scanner::handle_new_blocks::<_, _, _, _, P>(
|
tributary::scanner::handle_new_blocks::<_, _, _, _, _, _, P>(
|
||||||
&mut tributary_db,
|
&mut tributary_db,
|
||||||
&key,
|
&key,
|
||||||
&recognized_id_send,
|
recognized_id.clone(),
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx| {
|
|set, tx| {
|
||||||
let serai = serai.clone();
|
let serai = serai.clone();
|
||||||
|
@ -537,6 +537,12 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
||||||
Some(Transaction::SubstrateBlock(block))
|
Some(Transaction::SubstrateBlock(block))
|
||||||
}
|
}
|
||||||
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => {
|
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => {
|
||||||
|
log::info!(
|
||||||
|
"informed of batch (sign ID {}, attempt {}) for block {}",
|
||||||
|
hex::encode(id.id),
|
||||||
|
id.attempt,
|
||||||
|
hex::encode(block),
|
||||||
|
);
|
||||||
// If this is the first attempt instance, synchronize around the block first
|
// If this is the first attempt instance, synchronize around the block first
|
||||||
if id.attempt == 0 {
|
if id.attempt == 0 {
|
||||||
// Save the preprocess to disk so we can publish it later
|
// Save the preprocess to disk so we can publish it later
|
||||||
|
@ -678,51 +684,42 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle new blocks for each Tributary
|
// When we reach synchrony on an event requiring signing, send our preprocess for it
|
||||||
// TODO: This channel is unsafe. The Tributary may send an event, which then is marked handled,
|
let recognized_id = {
|
||||||
// before it actually is. This must be a blocking function.
|
|
||||||
let (recognized_id_send, mut recognized_id_recv) = mpsc::unbounded_channel();
|
|
||||||
{
|
|
||||||
let raw_db = raw_db.clone();
|
let raw_db = raw_db.clone();
|
||||||
tokio::spawn(scan_tributaries(
|
let key = key.clone();
|
||||||
raw_db,
|
let tributaries = tributaries.clone();
|
||||||
key.clone(),
|
move |network, genesis, id_type, id| {
|
||||||
recognized_id_send,
|
|
||||||
p2p.clone(),
|
|
||||||
processors.clone(),
|
|
||||||
serai.clone(),
|
|
||||||
tributaries.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// When we reach consensus on a new external block, send our BatchPreprocess for it
|
|
||||||
tokio::spawn({
|
|
||||||
let raw_db = raw_db.clone();
|
let raw_db = raw_db.clone();
|
||||||
let key = key.clone();
|
let key = key.clone();
|
||||||
let tributaries = tributaries.clone();
|
let tributaries = tributaries.clone();
|
||||||
async move {
|
async move {
|
||||||
loop {
|
let (ids, txs) = match id_type {
|
||||||
if let Some((network, genesis, id_type, id)) = recognized_id_recv.recv().await {
|
|
||||||
let txs = match id_type {
|
|
||||||
RecognizedIdType::Block => {
|
RecognizedIdType::Block => {
|
||||||
|
let block = id;
|
||||||
|
|
||||||
|
let ids = MainDb::<D>::batches_in_block(&raw_db, network, block);
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
for id in MainDb::<D>::batches_in_block(&raw_db, network, id) {
|
for id in &ids {
|
||||||
txs.push(Transaction::BatchPreprocess(SignData {
|
txs.push(Transaction::BatchPreprocess(SignData {
|
||||||
plan: id,
|
plan: *id,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
data: MainDb::<D>::first_preprocess(&raw_db, id),
|
data: MainDb::<D>::first_preprocess(&raw_db, *id),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
txs
|
(ids, txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
RecognizedIdType::Plan => vec![Transaction::SignPreprocess(SignData {
|
RecognizedIdType::Plan => (
|
||||||
|
vec![id],
|
||||||
|
vec![Transaction::SignPreprocess(SignData {
|
||||||
plan: id,
|
plan: id,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
data: MainDb::<D>::first_preprocess(&raw_db, id),
|
data: MainDb::<D>::first_preprocess(&raw_db, id),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
})],
|
})],
|
||||||
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let tributaries = tributaries.read().await;
|
let tributaries = tributaries.read().await;
|
||||||
|
@ -742,13 +739,25 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||||
|
|
||||||
publish_transaction(&tributary, tx).await;
|
publish_transaction(&tributary, tx).await;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
log::warn!("recognized_id_send was dropped. are we shutting down?");
|
ids
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle new blocks for each Tributary
|
||||||
|
{
|
||||||
|
let raw_db = raw_db.clone();
|
||||||
|
tokio::spawn(scan_tributaries(
|
||||||
|
raw_db,
|
||||||
|
key.clone(),
|
||||||
|
recognized_id,
|
||||||
|
p2p.clone(),
|
||||||
|
processors.clone(),
|
||||||
|
serai.clone(),
|
||||||
|
tributaries.clone(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
|
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
|
||||||
// in a while (presumably because we're behind)
|
// in a while (presumably because we're behind)
|
||||||
|
|
|
@ -11,7 +11,7 @@ use frost::Participant;
|
||||||
|
|
||||||
use sp_runtime::traits::Verify;
|
use sp_runtime::traits::Verify;
|
||||||
|
|
||||||
use tokio::{time::sleep, sync::mpsc};
|
use tokio::time::sleep;
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db, MemDb};
|
use serai_db::{DbTxn, Db, MemDb};
|
||||||
|
|
||||||
|
@ -83,11 +83,12 @@ async fn dkg_test() {
|
||||||
) -> (TributaryDb<MemDb>, MemProcessors) {
|
) -> (TributaryDb<MemDb>, MemProcessors) {
|
||||||
let mut scanner_db = TributaryDb(MemDb::new());
|
let mut scanner_db = TributaryDb(MemDb::new());
|
||||||
let processors = MemProcessors::new();
|
let processors = MemProcessors::new();
|
||||||
// Uses a brand new channel since this channel won't be used within this test
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
||||||
handle_new_blocks::<_, _, _, _, LocalP2p>(
|
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
key,
|
key,
|
||||||
&mpsc::unbounded_channel().0,
|
|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called in new_processors")
|
||||||
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
|_, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
||||||
spec,
|
spec,
|
||||||
|
@ -108,10 +109,12 @@ async fn dkg_test() {
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
// Verify the scanner emits a KeyGen::Commitments message
|
||||||
handle_new_blocks::<_, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&mpsc::unbounded_channel().0,
|
|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||||
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
|_, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
||||||
&spec,
|
&spec,
|
||||||
|
@ -186,10 +189,12 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
// With just 4 sets of shares, nothing should happen yet
|
||||||
handle_new_blocks::<_, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&mpsc::unbounded_channel().0,
|
|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called after some shares")
|
||||||
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
|_, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
||||||
&spec,
|
&spec,
|
||||||
|
@ -227,10 +232,10 @@ async fn dkg_test() {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
// Any scanner which has handled the prior blocks should only emit the new event
|
||||||
handle_new_blocks::<_, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&mpsc::unbounded_channel().0,
|
|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX") },
|
|_, _| async { panic!("test tried to publish a new Serai TX") },
|
||||||
&spec,
|
&spec,
|
||||||
|
@ -294,10 +299,12 @@ async fn dkg_test() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The scanner should successfully try to publish a transaction with a validly signed signature
|
// The scanner should successfully try to publish a transaction with a validly signed signature
|
||||||
handle_new_blocks::<_, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&mpsc::unbounded_channel().0,
|
|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
||||||
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx| {
|
|set, tx| {
|
||||||
let spec = spec.clone();
|
let spec = spec.clone();
|
||||||
|
|
|
@ -15,8 +15,6 @@ use frost::{
|
||||||
};
|
};
|
||||||
use frost_schnorrkel::Schnorrkel;
|
use frost_schnorrkel::Schnorrkel;
|
||||||
|
|
||||||
use tokio::sync::mpsc::UnboundedSender;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
Signature,
|
Signature,
|
||||||
primitives::NetworkId,
|
primitives::NetworkId,
|
||||||
|
@ -224,8 +222,10 @@ pub fn generated_key_pair<D: Db>(
|
||||||
pub async fn handle_application_tx<
|
pub async fn handle_application_tx<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
F: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Encoded) -> F,
|
PST: Clone + Fn(ValidatorSet, Encoded) -> FPst,
|
||||||
|
FRid: Future<Output = Vec<[u8; 32]>>,
|
||||||
|
RID: Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32]) -> FRid,
|
||||||
>(
|
>(
|
||||||
tx: Transaction,
|
tx: Transaction,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
|
@ -233,7 +233,7 @@ pub async fn handle_application_tx<
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: &UnboundedSender<(NetworkId, [u8; 32], RecognizedIdType, [u8; 32])>,
|
recognized_id: RID,
|
||||||
txn: &mut <D as Db>::Transaction<'_>,
|
txn: &mut <D as Db>::Transaction<'_>,
|
||||||
) {
|
) {
|
||||||
// Used to determine if an ID is acceptable
|
// Used to determine if an ID is acceptable
|
||||||
|
@ -431,11 +431,10 @@ pub async fn handle_application_tx<
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::ExternalBlock(block) => {
|
Transaction::ExternalBlock(block) => {
|
||||||
// Because this external block has been finalized, its batch ID should be authorized
|
// Because this external block has been finalized, its batch IDs should be authorized
|
||||||
TributaryDb::<D>::recognize_id(txn, Zone::Batch.label(), genesis, block);
|
for id in recognized_id(spec.set().network, genesis, RecognizedIdType::Block, block).await {
|
||||||
recognized_id
|
TributaryDb::<D>::recognize_id(txn, Zone::Batch.label(), genesis, id);
|
||||||
.send((spec.set().network, genesis, RecognizedIdType::Block, block))
|
}
|
||||||
.expect("recognized_id_recv was dropped. are we shutting down?");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
Transaction::SubstrateBlock(block) => {
|
||||||
|
@ -446,9 +445,10 @@ pub async fn handle_application_tx<
|
||||||
|
|
||||||
for id in plan_ids {
|
for id in plan_ids {
|
||||||
TributaryDb::<D>::recognize_id(txn, Zone::Sign.label(), genesis, id);
|
TributaryDb::<D>::recognize_id(txn, Zone::Sign.label(), genesis, id);
|
||||||
recognized_id
|
assert_eq!(
|
||||||
.send((spec.set().network, genesis, RecognizedIdType::Plan, id))
|
recognized_id(spec.set().network, genesis, RecognizedIdType::Plan, id).await,
|
||||||
.expect("recognized_id_recv was dropped. are we shutting down?");
|
vec![id]
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,6 @@ use serai_client::{
|
||||||
primitives::NetworkId, validator_sets::primitives::ValidatorSet, subxt::utils::Encoded,
|
primitives::NetworkId, validator_sets::primitives::ValidatorSet, subxt::utils::Encoded,
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::sync::mpsc::UnboundedSender;
|
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
Transaction as TributaryTransaction, Block, TributaryReader,
|
Transaction as TributaryTransaction, Block, TributaryReader,
|
||||||
tendermint::{
|
tendermint::{
|
||||||
|
@ -39,13 +37,15 @@ pub enum RecognizedIdType {
|
||||||
async fn handle_block<
|
async fn handle_block<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
F: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Encoded) -> F,
|
PST: Clone + Fn(ValidatorSet, Encoded) -> FPst,
|
||||||
|
FRid: Future<Output = Vec<[u8; 32]>>,
|
||||||
|
RID: Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32]) -> FRid,
|
||||||
P: P2p,
|
P: P2p,
|
||||||
>(
|
>(
|
||||||
db: &mut TributaryDb<D>,
|
db: &mut TributaryDb<D>,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: &UnboundedSender<(NetworkId, [u8; 32], RecognizedIdType, [u8; 32])>,
|
recognized_id: RID,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
|
@ -79,14 +79,14 @@ async fn handle_block<
|
||||||
// TODO: disconnect the node from network/ban from further participation in Tributary
|
// TODO: disconnect the node from network/ban from further participation in Tributary
|
||||||
}
|
}
|
||||||
TributaryTransaction::Application(tx) => {
|
TributaryTransaction::Application(tx) => {
|
||||||
handle_application_tx::<D, _, _, _>(
|
handle_application_tx::<D, _, _, _, _, _>(
|
||||||
tx,
|
tx,
|
||||||
spec,
|
spec,
|
||||||
processors,
|
processors,
|
||||||
publish_serai_tx.clone(),
|
publish_serai_tx.clone(),
|
||||||
genesis,
|
genesis,
|
||||||
key,
|
key,
|
||||||
recognized_id,
|
recognized_id.clone(),
|
||||||
&mut txn,
|
&mut txn,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
@ -105,13 +105,15 @@ async fn handle_block<
|
||||||
pub async fn handle_new_blocks<
|
pub async fn handle_new_blocks<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
F: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Encoded) -> F,
|
PST: Clone + Fn(ValidatorSet, Encoded) -> FPst,
|
||||||
|
FRid: Future<Output = Vec<[u8; 32]>>,
|
||||||
|
RID: Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32]) -> FRid,
|
||||||
P: P2p,
|
P: P2p,
|
||||||
>(
|
>(
|
||||||
db: &mut TributaryDb<D>,
|
db: &mut TributaryDb<D>,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: &UnboundedSender<(NetworkId, [u8; 32], RecognizedIdType, [u8; 32])>,
|
recognized_id: RID,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
|
@ -121,10 +123,10 @@ pub async fn handle_new_blocks<
|
||||||
let mut last_block = db.last_block(genesis);
|
let mut last_block = db.last_block(genesis);
|
||||||
while let Some(next) = tributary.block_after(&last_block) {
|
while let Some(next) = tributary.block_after(&last_block) {
|
||||||
let block = tributary.block(&next).unwrap();
|
let block = tributary.block(&next).unwrap();
|
||||||
handle_block::<_, _, _, _, P>(
|
handle_block::<_, _, _, _, _, _, P>(
|
||||||
db,
|
db,
|
||||||
key,
|
key,
|
||||||
recognized_id,
|
recognized_id.clone(),
|
||||||
processors,
|
processors,
|
||||||
publish_serai_tx.clone(),
|
publish_serai_tx.clone(),
|
||||||
spec,
|
spec,
|
||||||
|
|
Loading…
Reference in a new issue