mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-12 13:55:28 +00:00
Remove async-trait from processor/
Part of https://github.com/serai-dex/issues/607.
This commit is contained in:
parent
2c4c33e632
commit
e78236276a
29 changed files with 1481 additions and 1378 deletions
|
@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use core::future::Future;
|
||||||
use std::sync::{LazyLock, Arc, Mutex};
|
use std::sync::{LazyLock, Arc, Mutex};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
@ -169,59 +170,74 @@ impl Coordinator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl signers::Coordinator for CoordinatorSend {
|
impl signers::Coordinator for CoordinatorSend {
|
||||||
type EphemeralError = ();
|
type EphemeralError = ();
|
||||||
|
|
||||||
async fn send(
|
fn send(
|
||||||
&mut self,
|
&mut self,
|
||||||
msg: messages::sign::ProcessorMessage,
|
msg: messages::sign::ProcessorMessage,
|
||||||
) -> Result<(), Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
self.send(&messages::ProcessorMessage::Sign(msg));
|
async move {
|
||||||
Ok(())
|
self.send(&messages::ProcessorMessage::Sign(msg));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_cosign(
|
fn publish_cosign(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
block: [u8; 32],
|
block: [u8; 32],
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
self.send(&messages::ProcessorMessage::Coordinator(
|
async move {
|
||||||
messages::coordinator::ProcessorMessage::CosignedBlock {
|
self.send(&messages::ProcessorMessage::Coordinator(
|
||||||
block_number,
|
messages::coordinator::ProcessorMessage::CosignedBlock {
|
||||||
block,
|
block_number,
|
||||||
signature: signature.encode(),
|
block,
|
||||||
},
|
signature: signature.encode(),
|
||||||
));
|
},
|
||||||
Ok(())
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError> {
|
fn publish_batch(
|
||||||
self.send(&messages::ProcessorMessage::Substrate(
|
&mut self,
|
||||||
messages::substrate::ProcessorMessage::Batch { batch },
|
batch: Batch,
|
||||||
));
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
Ok(())
|
async move {
|
||||||
|
self.send(&messages::ProcessorMessage::Substrate(
|
||||||
|
messages::substrate::ProcessorMessage::Batch { batch },
|
||||||
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError> {
|
fn publish_signed_batch(
|
||||||
self.send(&messages::ProcessorMessage::Coordinator(
|
&mut self,
|
||||||
messages::coordinator::ProcessorMessage::SignedBatch { batch },
|
batch: SignedBatch,
|
||||||
));
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
Ok(())
|
async move {
|
||||||
|
self.send(&messages::ProcessorMessage::Coordinator(
|
||||||
|
messages::coordinator::ProcessorMessage::SignedBatch { batch },
|
||||||
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_slash_report_signature(
|
fn publish_slash_report_signature(
|
||||||
&mut self,
|
&mut self,
|
||||||
session: Session,
|
session: Session,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
self.send(&messages::ProcessorMessage::Coordinator(
|
async move {
|
||||||
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
self.send(&messages::ProcessorMessage::Coordinator(
|
||||||
session,
|
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
||||||
signature: signature.encode(),
|
session,
|
||||||
},
|
signature: signature.encode(),
|
||||||
));
|
},
|
||||||
Ok(())
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
rand_core = { version = "0.6", default-features = false }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -96,7 +96,6 @@ use serai_client::{
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#[async_trait]
|
|
||||||
impl TransactionTrait<Bitcoin> for Transaction {
|
impl TransactionTrait<Bitcoin> for Transaction {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn fee(&self, network: &Bitcoin) -> u64 {
|
async fn fee(&self, network: &Bitcoin) -> u64 {
|
||||||
|
@ -210,7 +209,6 @@ impl Bitcoin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Network for Bitcoin {
|
impl Network for Bitcoin {
|
||||||
// 2 inputs should be 2 * 230 = 460 weight units
|
// 2 inputs should be 2 * 230 = 460 weight units
|
||||||
// The output should be ~36 bytes, or 144 weight units
|
// The output should be ~36 bytes, or 144 weight units
|
||||||
|
|
|
@ -31,7 +31,6 @@ impl<D: Db> fmt::Debug for Block<D> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db> primitives::Block for Block<D> {
|
impl<D: Db> primitives::Block for Block<D> {
|
||||||
type Header = BlockHeader;
|
type Header = BlockHeader;
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
|
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
|
||||||
|
|
||||||
use serai_client::primitives::{NetworkId, Coin, Amount};
|
use serai_client::primitives::{NetworkId, Coin, Amount};
|
||||||
|
@ -18,7 +20,6 @@ pub(crate) struct Rpc<D: Db> {
|
||||||
pub(crate) rpc: BRpc,
|
pub(crate) rpc: BRpc,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db> ScannerFeed for Rpc<D> {
|
impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
||||||
// 6 confirmations is widely accepted as secure and shouldn't occur
|
// 6 confirmations is widely accepted as secure and shouldn't occur
|
||||||
|
@ -32,71 +33,89 @@ impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
|
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
|
fn latest_finalized_block_number(
|
||||||
db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError)
|
&self,
|
||||||
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||||
|
async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) }
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
|
fn time_of_block(
|
||||||
let number = usize::try_from(number).unwrap();
|
|
||||||
|
|
||||||
/*
|
|
||||||
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
|
|
||||||
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
|
|
||||||
CLTV). This creates a monotonic median time which we use as the block time.
|
|
||||||
*/
|
|
||||||
// This implements `GetMedianTimePast`
|
|
||||||
let median = {
|
|
||||||
const MEDIAN_TIMESPAN: usize = 11;
|
|
||||||
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
|
|
||||||
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
|
|
||||||
timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
|
|
||||||
}
|
|
||||||
timestamps.sort();
|
|
||||||
timestamps[timestamps.len() / 2]
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This block's timestamp is guaranteed to be greater than this median:
|
|
||||||
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
|
|
||||||
/src/validation.cpp#L4182-L4184
|
|
||||||
|
|
||||||
This does not guarantee the median always increases however. Take the following trivial
|
|
||||||
example, as the window is initially built:
|
|
||||||
|
|
||||||
0 block has time 0 // Prior blocks: []
|
|
||||||
1 block has time 1 // Prior blocks: [0]
|
|
||||||
2 block has time 2 // Prior blocks: [0, 1]
|
|
||||||
3 block has time 2 // Prior blocks: [0, 1, 2]
|
|
||||||
|
|
||||||
These two blocks have the same time (both greater than the median of their prior blocks) and
|
|
||||||
the same median.
|
|
||||||
|
|
||||||
The median will never decrease however. The values pushed onto the window will always be
|
|
||||||
greater than the median. If a value greater than the median is popped, the median will remain
|
|
||||||
the same (due to the counterbalance of the pushed value). If a value less than the median is
|
|
||||||
popped, the median will increase (either to another instance of the same value, yet one
|
|
||||||
closer to the end of the repeating sequence, or to a higher value).
|
|
||||||
*/
|
|
||||||
Ok(median.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn unchecked_block_header_by_number(
|
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||||
Ok(BlockHeader(
|
async move {
|
||||||
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header,
|
let number = usize::try_from(number).unwrap();
|
||||||
))
|
|
||||||
|
/*
|
||||||
|
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
|
||||||
|
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
|
||||||
|
CLTV). This creates a monotonic median time which we use as the block time.
|
||||||
|
*/
|
||||||
|
// This implements `GetMedianTimePast`
|
||||||
|
let median = {
|
||||||
|
const MEDIAN_TIMESPAN: usize = 11;
|
||||||
|
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
|
||||||
|
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
|
||||||
|
timestamps
|
||||||
|
.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
|
||||||
|
}
|
||||||
|
timestamps.sort();
|
||||||
|
timestamps[timestamps.len() / 2]
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This block's timestamp is guaranteed to be greater than this median:
|
||||||
|
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
|
||||||
|
/src/validation.cpp#L4182-L4184
|
||||||
|
|
||||||
|
This does not guarantee the median always increases however. Take the following trivial
|
||||||
|
example, as the window is initially built:
|
||||||
|
|
||||||
|
0 block has time 0 // Prior blocks: []
|
||||||
|
1 block has time 1 // Prior blocks: [0]
|
||||||
|
2 block has time 2 // Prior blocks: [0, 1]
|
||||||
|
3 block has time 2 // Prior blocks: [0, 1, 2]
|
||||||
|
|
||||||
|
These two blocks have the same time (both greater than the median of their prior blocks) and
|
||||||
|
the same median.
|
||||||
|
|
||||||
|
The median will never decrease however. The values pushed onto the window will always be
|
||||||
|
greater than the median. If a value greater than the median is popped, the median will
|
||||||
|
remain the same (due to the counterbalance of the pushed value). If a value less than the
|
||||||
|
median is popped, the median will increase (either to another instance of the same value,
|
||||||
|
yet one closer to the end of the repeating sequence, or to a higher value).
|
||||||
|
*/
|
||||||
|
Ok(median.into())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unchecked_block_by_number(
|
fn unchecked_block_header_by_number(
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<Self::Block, Self::EphemeralError> {
|
) -> impl Send
|
||||||
Ok(Block(
|
+ Future<Output = Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError>>
|
||||||
self.db.clone(),
|
{
|
||||||
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
|
async move {
|
||||||
))
|
Ok(BlockHeader(
|
||||||
|
self
|
||||||
|
.rpc
|
||||||
|
.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?)
|
||||||
|
.await?
|
||||||
|
.header,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unchecked_block_by_number(
|
||||||
|
&self,
|
||||||
|
number: u64,
|
||||||
|
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>> {
|
||||||
|
async move {
|
||||||
|
Ok(Block(
|
||||||
|
self.db.clone(),
|
||||||
|
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dust(coin: Coin) -> Amount {
|
fn dust(coin: Coin) -> Amount {
|
||||||
|
@ -137,22 +156,26 @@ impl<D: Db> ScannerFeed for Rpc<D> {
|
||||||
Amount(10_000)
|
Amount(10_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn cost_to_aggregate(
|
fn cost_to_aggregate(
|
||||||
&self,
|
&self,
|
||||||
coin: Coin,
|
coin: Coin,
|
||||||
_reference_block: &Self::Block,
|
_reference_block: &Self::Block,
|
||||||
) -> Result<Amount, Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>> {
|
||||||
assert_eq!(coin, Coin::Bitcoin);
|
async move {
|
||||||
// TODO
|
assert_eq!(coin, Coin::Bitcoin);
|
||||||
Ok(Amount(0))
|
// TODO
|
||||||
|
Ok(Amount(0))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
|
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
|
fn publish(
|
||||||
self.rpc.send_raw_transaction(&tx.0).await.map(|_| ())
|
&self,
|
||||||
|
tx: Transaction,
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
|
async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,18 +1,4 @@
|
||||||
/*
|
use core::future::Future;
|
||||||
We want to be able to return received outputs. We do that by iterating over the inputs to find an
|
|
||||||
address format we recognize, then setting that address as the address to return to.
|
|
||||||
|
|
||||||
Since inputs only contain the script signatures, yet addresses are for script public keys, we
|
|
||||||
need to pull up the output spent by an input and read the script public key from that. While we
|
|
||||||
could use `txindex=1`, and an asynchronous call to the Bitcoin node, we:
|
|
||||||
|
|
||||||
1) Can maintain a much smaller index ourselves
|
|
||||||
2) Don't want the asynchronous call (which would require the flow be async, allowed to
|
|
||||||
potentially error, and more latent)
|
|
||||||
3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet)
|
|
||||||
|
|
||||||
This task builds that index.
|
|
||||||
*/
|
|
||||||
|
|
||||||
use bitcoin_serai::bitcoin::ScriptBuf;
|
use bitcoin_serai::bitcoin::ScriptBuf;
|
||||||
|
|
||||||
|
@ -35,72 +21,88 @@ pub(crate) fn script_pubkey_for_on_chain_output(
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
We want to be able to return received outputs. We do that by iterating over the inputs to find an
|
||||||
|
address format we recognize, then setting that address as the address to return to.
|
||||||
|
|
||||||
|
Since inputs only contain the script signatures, yet addresses are for script public keys, we
|
||||||
|
need to pull up the output spent by an input and read the script public key from that. While we
|
||||||
|
could use `txindex=1`, and an asynchronous call to the Bitcoin node, we:
|
||||||
|
|
||||||
|
1) Can maintain a much smaller index ourselves
|
||||||
|
2) Don't want the asynchronous call (which would require the flow be async, allowed to
|
||||||
|
potentially error, and more latent)
|
||||||
|
3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet)
|
||||||
|
|
||||||
|
This task builds that index.
|
||||||
|
*/
|
||||||
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let latest_block_number = self
|
async move {
|
||||||
.0
|
let latest_block_number = self
|
||||||
.rpc
|
|
||||||
.get_latest_block_number()
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?;
|
|
||||||
let latest_block_number = u64::try_from(latest_block_number).unwrap();
|
|
||||||
// `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself)
|
|
||||||
let finalized_block_number =
|
|
||||||
latest_block_number.checked_sub(Rpc::<D>::CONFIRMATIONS - 1).ok_or(format!(
|
|
||||||
"blockchain only just started and doesn't have {} blocks yet",
|
|
||||||
Rpc::<D>::CONFIRMATIONS
|
|
||||||
))?;
|
|
||||||
|
|
||||||
/*
|
|
||||||
`finalized_block_number` is the latest block number minus confirmations. The blockchain may
|
|
||||||
undetectably re-organize though, as while the scanner will maintain an index of finalized
|
|
||||||
blocks and panics on reorganization, this runs prior to the scanner and that index.
|
|
||||||
|
|
||||||
A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this
|
|
||||||
saves the script public keys *by the transaction hash an output index*. Accordingly, it isn't
|
|
||||||
invalidated on reorganization. The only risk would be if the new chain reorganized to
|
|
||||||
include a transaction to Serai which we didn't index the parents of. If that happens, we'll
|
|
||||||
panic when we scan the transaction, causing the invariant to be detected.
|
|
||||||
*/
|
|
||||||
|
|
||||||
let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db);
|
|
||||||
let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1);
|
|
||||||
|
|
||||||
let mut iterated = false;
|
|
||||||
for b in next_block ..= finalized_block_number {
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
// Fetch the block
|
|
||||||
let block_hash = self
|
|
||||||
.0
|
.0
|
||||||
.rpc
|
.rpc
|
||||||
.get_block_hash(b.try_into().unwrap())
|
.get_latest_block_number()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?;
|
.map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?;
|
||||||
let block = self
|
let latest_block_number = u64::try_from(latest_block_number).unwrap();
|
||||||
.0
|
// `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself)
|
||||||
.rpc
|
let finalized_block_number =
|
||||||
.get_block(&block_hash)
|
latest_block_number.checked_sub(Rpc::<D>::CONFIRMATIONS - 1).ok_or(format!(
|
||||||
.await
|
"blockchain only just started and doesn't have {} blocks yet",
|
||||||
.map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?;
|
Rpc::<D>::CONFIRMATIONS
|
||||||
|
))?;
|
||||||
|
|
||||||
let mut txn = self.0.db.txn();
|
/*
|
||||||
|
`finalized_block_number` is the latest block number minus confirmations. The blockchain may
|
||||||
|
undetectably re-organize though, as while the scanner will maintain an index of finalized
|
||||||
|
blocks and panics on reorganization, this runs prior to the scanner and that index.
|
||||||
|
|
||||||
for tx in &block.txdata {
|
A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this
|
||||||
let txid = hash_bytes(tx.compute_txid().to_raw_hash());
|
saves the script public keys *by the transaction hash an output index*. Accordingly, it
|
||||||
for (o, output) in tx.output.iter().enumerate() {
|
isn't invalidated on reorganization. The only risk would be if the new chain reorganized to
|
||||||
let o = u32::try_from(o).unwrap();
|
include a transaction to Serai which we didn't index the parents of. If that happens, we'll
|
||||||
// Set the script public key for this transaction
|
panic when we scan the transaction, causing the invariant to be detected.
|
||||||
db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes());
|
*/
|
||||||
|
|
||||||
|
let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db);
|
||||||
|
let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1);
|
||||||
|
|
||||||
|
let mut iterated = false;
|
||||||
|
for b in next_block ..= finalized_block_number {
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
// Fetch the block
|
||||||
|
let block_hash = self
|
||||||
|
.0
|
||||||
|
.rpc
|
||||||
|
.get_block_hash(b.try_into().unwrap())
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?;
|
||||||
|
let block = self
|
||||||
|
.0
|
||||||
|
.rpc
|
||||||
|
.get_block(&block_hash)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?;
|
||||||
|
|
||||||
|
let mut txn = self.0.db.txn();
|
||||||
|
|
||||||
|
for tx in &block.txdata {
|
||||||
|
let txid = hash_bytes(tx.compute_txid().to_raw_hash());
|
||||||
|
for (o, output) in tx.output.iter().enumerate() {
|
||||||
|
let o = u32::try_from(o).unwrap();
|
||||||
|
// Set the script public key for this transaction
|
||||||
|
db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
db::LatestBlockToYieldAsFinalized::set(&mut txn, &b);
|
db::LatestBlockToYieldAsFinalized::set(&mut txn, &b);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
}
|
||||||
|
Ok(iterated)
|
||||||
}
|
}
|
||||||
Ok(iterated)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
|
|
||||||
const-hex = { version = "1", default-features = false }
|
const-hex = { version = "1", default-features = false }
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
rand_core = { version = "0.6", default-features = false }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -24,7 +24,6 @@ impl primitives::BlockHeader for BlockHeader {
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct Block(pub(crate) MBlock, Vec<Transaction>);
|
pub(crate) struct Block(pub(crate) MBlock, Vec<Transaction>);
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl primitives::Block for Block {
|
impl primitives::Block for Block {
|
||||||
type Header = BlockHeader;
|
type Header = BlockHeader;
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use monero_wallet::rpc::{RpcError, Rpc as RpcTrait};
|
use monero_wallet::rpc::{RpcError, Rpc as RpcTrait};
|
||||||
use monero_simple_request_rpc::SimpleRequestRpc;
|
use monero_simple_request_rpc::SimpleRequestRpc;
|
||||||
|
|
||||||
|
@ -16,7 +18,6 @@ pub(crate) struct Rpc {
|
||||||
pub(crate) rpc: SimpleRequestRpc,
|
pub(crate) rpc: SimpleRequestRpc,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl ScannerFeed for Rpc {
|
impl ScannerFeed for Rpc {
|
||||||
const NETWORK: NetworkId = NetworkId::Monero;
|
const NETWORK: NetworkId = NetworkId::Monero;
|
||||||
// Outputs aren't spendable until 10 blocks later due to the 10-block lock
|
// Outputs aren't spendable until 10 blocks later due to the 10-block lock
|
||||||
|
@ -32,28 +33,44 @@ impl ScannerFeed for Rpc {
|
||||||
|
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
|
fn latest_finalized_block_number(
|
||||||
Ok(self.rpc.get_height().await?.checked_sub(1).expect("connected to an invalid Monero RPC").try_into().unwrap())
|
&self,
|
||||||
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||||
|
async move {
|
||||||
|
Ok(
|
||||||
|
self
|
||||||
|
.rpc
|
||||||
|
.get_height()
|
||||||
|
.await?
|
||||||
|
.checked_sub(1)
|
||||||
|
.expect("connected to an invalid Monero RPC")
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
|
fn time_of_block(
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn unchecked_block_header_by_number(
|
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||||
Ok(BlockHeader(
|
async move{todo!("TODO")}
|
||||||
self.rpc.get_block_by_number(number.try_into().unwrap()).await?
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unchecked_block_by_number(
|
fn unchecked_block_header_by_number(
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<Self::Block, Self::EphemeralError> {
|
) -> impl Send
|
||||||
todo!("TODO")
|
+ Future<Output = Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError>>
|
||||||
|
{
|
||||||
|
async move { Ok(BlockHeader(self.rpc.get_block_by_number(number.try_into().unwrap()).await?)) }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unchecked_block_by_number(
|
||||||
|
&self,
|
||||||
|
number: u64,
|
||||||
|
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>> {
|
||||||
|
async move { todo!("TODO") }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dust(coin: Coin) -> Amount {
|
fn dust(coin: Coin) -> Amount {
|
||||||
|
@ -62,22 +79,26 @@ impl ScannerFeed for Rpc {
|
||||||
todo!("TODO")
|
todo!("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn cost_to_aggregate(
|
fn cost_to_aggregate(
|
||||||
&self,
|
&self,
|
||||||
coin: Coin,
|
coin: Coin,
|
||||||
_reference_block: &Self::Block,
|
_reference_block: &Self::Block,
|
||||||
) -> Result<Amount, Self::EphemeralError> {
|
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>> {
|
||||||
assert_eq!(coin, Coin::Bitcoin);
|
async move {
|
||||||
// TODO
|
assert_eq!(coin, Coin::Bitcoin);
|
||||||
Ok(Amount(0))
|
// TODO
|
||||||
|
Ok(Amount(0))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl TransactionPublisher<Transaction> for Rpc {
|
impl TransactionPublisher<Transaction> for Rpc {
|
||||||
type EphemeralError = RpcError;
|
type EphemeralError = RpcError;
|
||||||
|
|
||||||
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
|
fn publish(
|
||||||
self.rpc.publish_transaction(&tx.0).await
|
&self,
|
||||||
|
tx: Transaction,
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||||
|
async move { self.rpc.publish_transaction(&tx.0).await }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
|
|
||||||
group = { version = "0.13", default-features = false }
|
group = { version = "0.13", default-features = false }
|
||||||
|
|
||||||
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
|
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -22,7 +22,6 @@ pub trait BlockHeader: Send + Sync + Sized + Clone + Debug {
|
||||||
/// necessary to literally define it as whatever the external network defines as a block. For
|
/// necessary to literally define it as whatever the external network defines as a block. For
|
||||||
/// external networks which finalize block(s), this block type should be a representation of all
|
/// external networks which finalize block(s), this block type should be a representation of all
|
||||||
/// transactions within a period finalization (whether block or epoch).
|
/// transactions within a period finalization (whether block or epoch).
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait Block: Send + Sync + Sized + Clone + Debug {
|
pub trait Block: Send + Sync + Sized + Clone + Debug {
|
||||||
/// The type used for this block's header.
|
/// The type used for this block's header.
|
||||||
type Header: BlockHeader;
|
type Header: BlockHeader;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use core::time::Duration;
|
use core::{future::Future, time::Duration};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||||
|
@ -78,8 +78,7 @@ impl TaskHandle {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A task to be continually ran.
|
/// A task to be continually ran.
|
||||||
#[async_trait::async_trait]
|
pub trait ContinuallyRan: Sized + Send {
|
||||||
pub trait ContinuallyRan: Sized {
|
|
||||||
/// The amount of seconds before this task should be polled again.
|
/// The amount of seconds before this task should be polled again.
|
||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||||
/// The maximum amount of seconds before this task should be run again.
|
/// The maximum amount of seconds before this task should be run again.
|
||||||
|
@ -91,60 +90,66 @@ pub trait ContinuallyRan: Sized {
|
||||||
///
|
///
|
||||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||||
/// (without waiting for whatever timer they were already on).
|
/// (without waiting for whatever timer they were already on).
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String>;
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
||||||
|
|
||||||
/// Continually run the task.
|
/// Continually run the task.
|
||||||
async fn continually_run(mut self, mut task: Task, dependents: Vec<TaskHandle>) {
|
fn continually_run(
|
||||||
// The default number of seconds to sleep before running the task again
|
mut self,
|
||||||
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
mut task: Task,
|
||||||
// The current number of seconds to sleep before running the task again
|
dependents: Vec<TaskHandle>,
|
||||||
// We increment this upon errors in order to not flood the logs with errors
|
) -> impl Send + Future<Output = ()> {
|
||||||
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
async move {
|
||||||
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
// The default number of seconds to sleep before running the task again
|
||||||
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||||
// Set a limit of sleeping for two minutes
|
// The current number of seconds to sleep before running the task again
|
||||||
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
// We increment this upon errors in order to not flood the logs with errors
|
||||||
};
|
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||||
|
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||||
|
// Set a limit of sleeping for two minutes
|
||||||
|
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||||
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// If we were told to close/all handles were dropped, drop it
|
// If we were told to close/all handles were dropped, drop it
|
||||||
{
|
{
|
||||||
let should_close = task.close.try_recv();
|
let should_close = task.close.try_recv();
|
||||||
match should_close {
|
match should_close {
|
||||||
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||||
Err(mpsc::error::TryRecvError::Empty) => {}
|
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
match self.run_iteration().await {
|
match self.run_iteration().await {
|
||||||
Ok(run_dependents) => {
|
Ok(run_dependents) => {
|
||||||
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||||
current_sleep_before_next_task = default_sleep_before_next_task;
|
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
|
||||||
if run_dependents {
|
if run_dependents {
|
||||||
for dependent in &dependents {
|
for dependent in &dependents {
|
||||||
dependent.run_now();
|
dependent.run_now();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
Err(e) => {
|
||||||
Err(e) => {
|
log::warn!("{}", e);
|
||||||
log::warn!("{}", e);
|
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't run the task again for another few seconds UNLESS told to run now
|
|
||||||
tokio::select! {
|
|
||||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
|
||||||
msg = task.run_now.recv() => {
|
|
||||||
// Check if this is firing because the handle was dropped
|
|
||||||
if msg.is_none() {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task.closed.send(()).unwrap();
|
// Don't run the task again for another few seconds UNLESS told to run now
|
||||||
|
tokio::select! {
|
||||||
|
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||||
|
msg = task.run_now.recv() => {
|
||||||
|
// Check if this is firing because the handle was dropped
|
||||||
|
if msg.is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task.closed.send(()).unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,9 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Macros
|
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
|
|
||||||
# Encoders
|
# Encoders
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use core::marker::PhantomData;
|
use core::{marker::PhantomData, future::Future};
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
use group::GroupEncoding;
|
use group::GroupEncoding;
|
||||||
|
@ -185,317 +185,323 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
// Fetch the highest acknowledged block
|
async move {
|
||||||
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
// Fetch the highest acknowledged block
|
||||||
else {
|
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
||||||
// If we've never acknowledged a block, return
|
else {
|
||||||
return Ok(false);
|
// If we've never acknowledged a block, return
|
||||||
};
|
|
||||||
|
|
||||||
// A boolean of if we've made any progress to return at the end of the function
|
|
||||||
let mut made_progress = false;
|
|
||||||
|
|
||||||
// Start by intaking any Burns we have sitting around
|
|
||||||
// It's important we run this regardless of if we have a new block to handle
|
|
||||||
made_progress |= self.intake_burns().await?;
|
|
||||||
|
|
||||||
/*
|
|
||||||
Eventualities increase upon one of two cases:
|
|
||||||
|
|
||||||
1) We're fulfilling Burns
|
|
||||||
2) We acknowledged a block
|
|
||||||
|
|
||||||
We can't know the processor has intaked all Burns it should have when we process block `b`.
|
|
||||||
We solve this by executing a consensus protocol whenever a resolution for an Eventuality
|
|
||||||
created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on such
|
|
||||||
blocks (and all preceding Burns).
|
|
||||||
|
|
||||||
This means we can only iterate up to the block currently pending acknowledgement.
|
|
||||||
|
|
||||||
We only know blocks will need acknowledgement *for sure* if they were scanned. The only other
|
|
||||||
causes are key activation and retirement (both scheduled outside the scan window). This makes
|
|
||||||
the exclusive upper bound the *next block to scan*.
|
|
||||||
*/
|
|
||||||
let exclusive_upper_bound = {
|
|
||||||
// Fetch the next to scan block
|
|
||||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
|
||||||
.expect("EventualityTask run before writing the start block");
|
|
||||||
// If we haven't done any work, return
|
|
||||||
if next_to_scan == 0 {
|
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
};
|
||||||
next_to_scan
|
|
||||||
};
|
|
||||||
|
|
||||||
// Fetch the next block to check
|
// A boolean of if we've made any progress to return at the end of the function
|
||||||
let next_to_check = EventualityDb::<S>::next_to_check_for_eventualities_block(&self.db)
|
let mut made_progress = false;
|
||||||
.expect("EventualityTask run before writing the start block");
|
|
||||||
|
|
||||||
// Check all blocks
|
// Start by intaking any Burns we have sitting around
|
||||||
for b in next_to_check .. exclusive_upper_bound {
|
// It's important we run this regardless of if we have a new block to handle
|
||||||
let is_block_notable = ScannerGlobalDb::<S>::is_block_notable(&self.db, b);
|
made_progress |= self.intake_burns().await?;
|
||||||
if is_block_notable {
|
|
||||||
/*
|
|
||||||
If this block is notable *and* not acknowledged, break.
|
|
||||||
|
|
||||||
This is so if Burns queued prior to this block's acknowledgement caused any Eventualities
|
/*
|
||||||
(which may resolve this block), we have them. If it wasn't for that, it'd be so if this
|
Eventualities increase upon one of two cases:
|
||||||
block's acknowledgement caused any Eventualities, we have them, though those would only
|
|
||||||
potentially resolve in the next block (letting us scan this block without delay).
|
1) We're fulfilling Burns
|
||||||
*/
|
2) We acknowledged a block
|
||||||
if b > highest_acknowledged {
|
|
||||||
break;
|
We can't know the processor has intaked all Burns it should have when we process block `b`.
|
||||||
|
We solve this by executing a consensus protocol whenever a resolution for an Eventuality
|
||||||
|
created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on
|
||||||
|
such blocks (and all preceding Burns).
|
||||||
|
|
||||||
|
This means we can only iterate up to the block currently pending acknowledgement.
|
||||||
|
|
||||||
|
We only know blocks will need acknowledgement *for sure* if they were scanned. The only
|
||||||
|
other causes are key activation and retirement (both scheduled outside the scan window).
|
||||||
|
This makes the exclusive upper bound the *next block to scan*.
|
||||||
|
*/
|
||||||
|
let exclusive_upper_bound = {
|
||||||
|
// Fetch the next to scan block
|
||||||
|
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||||
|
.expect("EventualityTask run before writing the start block");
|
||||||
|
// If we haven't done any work, return
|
||||||
|
if next_to_scan == 0 {
|
||||||
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
next_to_scan
|
||||||
|
};
|
||||||
|
|
||||||
// Since this block is notable, ensure we've intaked all the Burns preceding it
|
// Fetch the next block to check
|
||||||
// We can know with certainty that the channel is fully populated at this time since we've
|
let next_to_check = EventualityDb::<S>::next_to_check_for_eventualities_block(&self.db)
|
||||||
// acknowledged a newer block (so we've handled the state up to this point and any new
|
.expect("EventualityTask run before writing the start block");
|
||||||
// state will be for the newer block)
|
|
||||||
#[allow(unused_assignments)]
|
|
||||||
{
|
|
||||||
made_progress |= self.intake_burns().await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we're handling this block, we are making progress
|
// Check all blocks
|
||||||
made_progress = true;
|
for b in next_to_check .. exclusive_upper_bound {
|
||||||
|
let is_block_notable = ScannerGlobalDb::<S>::is_block_notable(&self.db, b);
|
||||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
if is_block_notable {
|
||||||
|
|
||||||
log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id()));
|
|
||||||
|
|
||||||
let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b);
|
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
|
|
||||||
// Fetch the data from the scanner
|
|
||||||
let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b);
|
|
||||||
assert_eq!(scan_data.block_number, b);
|
|
||||||
let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } =
|
|
||||||
scan_data;
|
|
||||||
let mut outputs = received_external_outputs;
|
|
||||||
|
|
||||||
for key in &keys {
|
|
||||||
// If this is the key's activation block, activate it
|
|
||||||
if key.activation_block_number == b {
|
|
||||||
Sch::activate_key(&mut txn, key.key);
|
|
||||||
}
|
|
||||||
|
|
||||||
let completed_eventualities = {
|
|
||||||
let mut eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
|
||||||
let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities);
|
|
||||||
EventualityDb::<S>::set_eventualities(&mut txn, key.key, &eventualities);
|
|
||||||
completed_eventualities
|
|
||||||
};
|
|
||||||
|
|
||||||
for (tx, eventuality) in &completed_eventualities {
|
|
||||||
log::info!(
|
|
||||||
"eventuality {} resolved by {}",
|
|
||||||
hex::encode(eventuality.id()),
|
|
||||||
hex::encode(tx.as_ref())
|
|
||||||
);
|
|
||||||
CompletedEventualities::send(&mut txn, &key.key, eventuality.id());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch all non-External outputs
|
|
||||||
let mut non_external_outputs = block.scan_for_outputs(key.key);
|
|
||||||
non_external_outputs.retain(|output| output.kind() != OutputType::External);
|
|
||||||
// Drop any outputs less than the dust limit
|
|
||||||
non_external_outputs.retain(|output| {
|
|
||||||
let balance = output.balance();
|
|
||||||
balance.amount.0 >= S::dust(balance.coin).0
|
|
||||||
});
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now that we have all non-External outputs, we filter them to be only the outputs which
|
|
||||||
are from transactions which resolve our own Eventualities *if* the multisig is retiring.
|
|
||||||
This implements step 6 of `spec/processor/Multisig Rotation.md`.
|
|
||||||
|
|
||||||
We may receive a Change output. The only issue with accumulating this would be if it
|
|
||||||
extends the multisig's lifetime (by increasing the amount of outputs yet to be
|
|
||||||
forwarded). By checking it's one we made, either:
|
|
||||||
1) It's a legitimate Change output to be forwarded
|
|
||||||
2) It's a Change output created by a user burning coins (specifying the Change address),
|
|
||||||
which can only be created while the multisig is actively handling `Burn`s (therefore
|
|
||||||
ensuring this multisig cannot be kept alive ad-infinitum)
|
|
||||||
|
|
||||||
The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably get
|
|
||||||
ignored if not usable however.
|
|
||||||
*/
|
|
||||||
if key.stage == LifetimeStage::Finishing {
|
|
||||||
non_external_outputs
|
|
||||||
.retain(|output| completed_eventualities.contains_key(&output.transaction_id()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, for non-External outputs we didn't make, we check they're worth more than the
|
|
||||||
// cost to aggregate them to avoid some profitable spam attacks by malicious miners
|
|
||||||
{
|
|
||||||
// Fetch and cache the costs to aggregate as this call may be expensive
|
|
||||||
let coins =
|
|
||||||
non_external_outputs.iter().map(|output| output.balance().coin).collect::<HashSet<_>>();
|
|
||||||
let mut costs_to_aggregate = HashMap::new();
|
|
||||||
for coin in coins {
|
|
||||||
costs_to_aggregate.insert(
|
|
||||||
coin,
|
|
||||||
self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| {
|
|
||||||
format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}")
|
|
||||||
})?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only retain out outputs/outputs sufficiently worthwhile
|
|
||||||
non_external_outputs.retain(|output| {
|
|
||||||
completed_eventualities.contains_key(&output.transaction_id()) || {
|
|
||||||
let balance = output.balance();
|
|
||||||
balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now, we iterate over all Forwarded outputs and queue their InInstructions
|
|
||||||
for output in
|
|
||||||
non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded)
|
|
||||||
{
|
|
||||||
let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else {
|
|
||||||
// Output sent to the forwarding address yet not one we made
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(forwarded) = eventuality.singular_spent_output() else {
|
|
||||||
// This was a TX made by us, yet someone burned to the forwarding address as it doesn't
|
|
||||||
// follow the structure of forwarding transactions
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some((return_address, mut in_instruction)) =
|
|
||||||
ScannerGlobalDb::<S>::return_address_and_in_instruction_for_forwarded_output(
|
|
||||||
&txn, &forwarded,
|
|
||||||
)
|
|
||||||
else {
|
|
||||||
// This was a TX made by us, coincidentally with the necessary structure, yet wasn't
|
|
||||||
// forwarding an output
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// We use the original amount, minus twice the cost to aggregate
|
|
||||||
// If the fees we paid to forward this now (less than the cost to aggregate now, yet not
|
|
||||||
// necessarily the cost to aggregate historically) caused this amount to be less, reduce
|
|
||||||
// it accordingly
|
|
||||||
in_instruction.balance.amount.0 =
|
|
||||||
in_instruction.balance.amount.0.min(output.balance().amount.0);
|
|
||||||
|
|
||||||
queue_output_until_block::<S>(
|
|
||||||
&mut txn,
|
|
||||||
b + S::WINDOW_LENGTH,
|
|
||||||
&OutputWithInInstruction { output: output.clone(), return_address, in_instruction },
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulate all of these outputs
|
|
||||||
outputs.extend(non_external_outputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the scheduler
|
|
||||||
{
|
|
||||||
let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns };
|
|
||||||
scheduler_update.outputs.sort_by(sort_outputs);
|
|
||||||
scheduler_update.forwards.sort_by(sort_outputs);
|
|
||||||
scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
|
||||||
|
|
||||||
let empty = {
|
|
||||||
let a: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.outputs.iter();
|
|
||||||
let b: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.forwards.iter();
|
|
||||||
let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output);
|
|
||||||
let mut all_outputs = a.chain(b).chain(c).peekable();
|
|
||||||
|
|
||||||
// If we received any output, sanity check this block is notable
|
|
||||||
let empty = all_outputs.peek().is_none();
|
|
||||||
if !empty {
|
|
||||||
assert!(is_block_notable, "accumulating output(s) in non-notable block");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity check we've never accumulated these outputs before
|
|
||||||
for output in all_outputs {
|
|
||||||
assert!(
|
|
||||||
!EventualityDb::<S>::prior_accumulated_output(&txn, &output.id()),
|
|
||||||
"prior accumulated an output with this ID"
|
|
||||||
);
|
|
||||||
EventualityDb::<S>::accumulated_output(&mut txn, &output.id());
|
|
||||||
}
|
|
||||||
|
|
||||||
empty
|
|
||||||
};
|
|
||||||
|
|
||||||
if !empty {
|
|
||||||
// Accumulate the outputs
|
|
||||||
/*
|
/*
|
||||||
This uses the `keys_with_stages` for the current block, yet this block is notable.
|
If this block is notable *and* not acknowledged, break.
|
||||||
Accordingly, all future intaked Burns will use at least this block when determining
|
|
||||||
what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. If
|
This is so if Burns queued prior to this block's acknowledgement caused any
|
||||||
this block wasn't notable, we'd potentially intake Burns with the LifetimeStage
|
Eventualities (which may resolve this block), we have them. If it wasn't for that, it'd
|
||||||
determined off an earlier block than this (enabling an earlier LifetimeStage to be used
|
be so if this block's acknowledgement caused any Eventualities, we have them, though
|
||||||
after a later one was already used).
|
those would only potentially resolve in the next block (letting us scan this block
|
||||||
|
without delay).
|
||||||
*/
|
*/
|
||||||
let new_eventualities =
|
if b > highest_acknowledged {
|
||||||
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
|
break;
|
||||||
// Intake the new Eventualities
|
|
||||||
for key in new_eventualities.keys() {
|
|
||||||
keys
|
|
||||||
.iter()
|
|
||||||
.find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice())
|
|
||||||
.expect("intaking Eventuality for key which isn't active");
|
|
||||||
}
|
}
|
||||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for key in &keys {
|
// Since this block is notable, ensure we've intaked all the Burns preceding it
|
||||||
// If this is the block at which forwarding starts for this key, flush it
|
// We can know with certainty that the channel is fully populated at this time since
|
||||||
// We do this after we issue the above update for any efficiencies gained by doing so
|
// we've acknowledged a newer block (so we've handled the state up to this point and any
|
||||||
if key.block_at_which_forwarding_starts == Some(b) {
|
// new state will be for the newer block)
|
||||||
assert!(
|
#[allow(unused_assignments)]
|
||||||
key.key != keys.last().unwrap().key,
|
{
|
||||||
"key which was forwarding was the last key (which has no key after it to forward to)"
|
made_progress |= self.intake_burns().await?;
|
||||||
);
|
}
|
||||||
let new_eventualities =
|
|
||||||
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
|
|
||||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
|
// Since we're handling this block, we are making progress
|
||||||
if key.stage == LifetimeStage::Finishing {
|
made_progress = true;
|
||||||
let eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
|
||||||
if eventualities.active_eventualities.is_empty() {
|
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||||
|
|
||||||
|
log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id()));
|
||||||
|
|
||||||
|
let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b);
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Fetch the data from the scanner
|
||||||
|
let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b);
|
||||||
|
assert_eq!(scan_data.block_number, b);
|
||||||
|
let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } =
|
||||||
|
scan_data;
|
||||||
|
let mut outputs = received_external_outputs;
|
||||||
|
|
||||||
|
for key in &keys {
|
||||||
|
// If this is the key's activation block, activate it
|
||||||
|
if key.activation_block_number == b {
|
||||||
|
Sch::activate_key(&mut txn, key.key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let completed_eventualities = {
|
||||||
|
let mut eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||||
|
let completed_eventualities =
|
||||||
|
block.check_for_eventuality_resolutions(&mut eventualities);
|
||||||
|
EventualityDb::<S>::set_eventualities(&mut txn, key.key, &eventualities);
|
||||||
|
completed_eventualities
|
||||||
|
};
|
||||||
|
|
||||||
|
for (tx, eventuality) in &completed_eventualities {
|
||||||
log::info!(
|
log::info!(
|
||||||
"key {} has finished and is being retired",
|
"eventuality {} resolved by {}",
|
||||||
hex::encode(key.key.to_bytes().as_ref())
|
hex::encode(eventuality.id()),
|
||||||
|
hex::encode(tx.as_ref())
|
||||||
);
|
);
|
||||||
|
CompletedEventualities::send(&mut txn, &key.key, eventuality.id());
|
||||||
|
}
|
||||||
|
|
||||||
// Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never
|
// Fetch all non-External outputs
|
||||||
// has a malleable view of the keys.
|
let mut non_external_outputs = block.scan_for_outputs(key.key);
|
||||||
ScannerGlobalDb::<S>::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key);
|
non_external_outputs.retain(|output| output.kind() != OutputType::External);
|
||||||
|
// Drop any outputs less than the dust limit
|
||||||
|
non_external_outputs.retain(|output| {
|
||||||
|
let balance = output.balance();
|
||||||
|
balance.amount.0 >= S::dust(balance.coin).0
|
||||||
|
});
|
||||||
|
|
||||||
// We tell the scheduler to retire it now as we're done with it, and this fn doesn't
|
/*
|
||||||
// require it be called with a canonical order
|
Now that we have all non-External outputs, we filter them to be only the outputs which
|
||||||
Sch::retire_key(&mut txn, key.key);
|
are from transactions which resolve our own Eventualities *if* the multisig is retiring.
|
||||||
|
This implements step 6 of `spec/processor/Multisig Rotation.md`.
|
||||||
|
|
||||||
|
We may receive a Change output. The only issue with accumulating this would be if it
|
||||||
|
extends the multisig's lifetime (by increasing the amount of outputs yet to be
|
||||||
|
forwarded). By checking it's one we made, either:
|
||||||
|
1) It's a legitimate Change output to be forwarded
|
||||||
|
2) It's a Change output created by a user burning coins (specifying the Change address),
|
||||||
|
which can only be created while the multisig is actively handling `Burn`s (therefore
|
||||||
|
ensuring this multisig cannot be kept alive ad-infinitum)
|
||||||
|
|
||||||
|
The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably
|
||||||
|
get ignored if not usable however.
|
||||||
|
*/
|
||||||
|
if key.stage == LifetimeStage::Finishing {
|
||||||
|
non_external_outputs
|
||||||
|
.retain(|output| completed_eventualities.contains_key(&output.transaction_id()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, for non-External outputs we didn't make, we check they're worth more than the
|
||||||
|
// cost to aggregate them to avoid some profitable spam attacks by malicious miners
|
||||||
|
{
|
||||||
|
// Fetch and cache the costs to aggregate as this call may be expensive
|
||||||
|
let coins = non_external_outputs
|
||||||
|
.iter()
|
||||||
|
.map(|output| output.balance().coin)
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
let mut costs_to_aggregate = HashMap::new();
|
||||||
|
for coin in coins {
|
||||||
|
costs_to_aggregate.insert(
|
||||||
|
coin,
|
||||||
|
self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| {
|
||||||
|
format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}")
|
||||||
|
})?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only retain out outputs/outputs sufficiently worthwhile
|
||||||
|
non_external_outputs.retain(|output| {
|
||||||
|
completed_eventualities.contains_key(&output.transaction_id()) || {
|
||||||
|
let balance = output.balance();
|
||||||
|
balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now, we iterate over all Forwarded outputs and queue their InInstructions
|
||||||
|
for output in
|
||||||
|
non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded)
|
||||||
|
{
|
||||||
|
let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else {
|
||||||
|
// Output sent to the forwarding address yet not one we made
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some(forwarded) = eventuality.singular_spent_output() else {
|
||||||
|
// This was a TX made by us, yet someone burned to the forwarding address as it
|
||||||
|
// doesn't follow the structure of forwarding transactions
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((return_address, mut in_instruction)) =
|
||||||
|
ScannerGlobalDb::<S>::return_address_and_in_instruction_for_forwarded_output(
|
||||||
|
&txn, &forwarded,
|
||||||
|
)
|
||||||
|
else {
|
||||||
|
// This was a TX made by us, coincidentally with the necessary structure, yet wasn't
|
||||||
|
// forwarding an output
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// We use the original amount, minus twice the cost to aggregate
|
||||||
|
// If the fees we paid to forward this now (less than the cost to aggregate now, yet not
|
||||||
|
// necessarily the cost to aggregate historically) caused this amount to be less, reduce
|
||||||
|
// it accordingly
|
||||||
|
in_instruction.balance.amount.0 =
|
||||||
|
in_instruction.balance.amount.0.min(output.balance().amount.0);
|
||||||
|
|
||||||
|
queue_output_until_block::<S>(
|
||||||
|
&mut txn,
|
||||||
|
b + S::WINDOW_LENGTH,
|
||||||
|
&OutputWithInInstruction { output: output.clone(), return_address, in_instruction },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate all of these outputs
|
||||||
|
outputs.extend(non_external_outputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the scheduler
|
||||||
|
{
|
||||||
|
let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns };
|
||||||
|
scheduler_update.outputs.sort_by(sort_outputs);
|
||||||
|
scheduler_update.forwards.sort_by(sort_outputs);
|
||||||
|
scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||||
|
|
||||||
|
let empty = {
|
||||||
|
let a: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.outputs.iter();
|
||||||
|
let b: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.forwards.iter();
|
||||||
|
let c =
|
||||||
|
scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output);
|
||||||
|
let mut all_outputs = a.chain(b).chain(c).peekable();
|
||||||
|
|
||||||
|
// If we received any output, sanity check this block is notable
|
||||||
|
let empty = all_outputs.peek().is_none();
|
||||||
|
if !empty {
|
||||||
|
assert!(is_block_notable, "accumulating output(s) in non-notable block");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check we've never accumulated these outputs before
|
||||||
|
for output in all_outputs {
|
||||||
|
assert!(
|
||||||
|
!EventualityDb::<S>::prior_accumulated_output(&txn, &output.id()),
|
||||||
|
"prior accumulated an output with this ID"
|
||||||
|
);
|
||||||
|
EventualityDb::<S>::accumulated_output(&mut txn, &output.id());
|
||||||
|
}
|
||||||
|
|
||||||
|
empty
|
||||||
|
};
|
||||||
|
|
||||||
|
if !empty {
|
||||||
|
// Accumulate the outputs
|
||||||
|
/*
|
||||||
|
This uses the `keys_with_stages` for the current block, yet this block is notable.
|
||||||
|
Accordingly, all future intaked Burns will use at least this block when determining
|
||||||
|
what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented.
|
||||||
|
If this block wasn't notable, we'd potentially intake Burns with the LifetimeStage
|
||||||
|
determined off an earlier block than this (enabling an earlier LifetimeStage to be
|
||||||
|
used after a later one was already used).
|
||||||
|
*/
|
||||||
|
let new_eventualities =
|
||||||
|
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
|
||||||
|
// Intake the new Eventualities
|
||||||
|
for key in new_eventualities.keys() {
|
||||||
|
keys
|
||||||
|
.iter()
|
||||||
|
.find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice())
|
||||||
|
.expect("intaking Eventuality for key which isn't active");
|
||||||
|
}
|
||||||
|
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for key in &keys {
|
||||||
|
// If this is the block at which forwarding starts for this key, flush it
|
||||||
|
// We do this after we issue the above update for any efficiencies gained by doing so
|
||||||
|
if key.block_at_which_forwarding_starts == Some(b) {
|
||||||
|
assert!(
|
||||||
|
key.key != keys.last().unwrap().key,
|
||||||
|
"key which was forwarding was the last key (which has no key after it to forward to)"
|
||||||
|
);
|
||||||
|
let new_eventualities =
|
||||||
|
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
|
||||||
|
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
|
||||||
|
if key.stage == LifetimeStage::Finishing {
|
||||||
|
let eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||||
|
if eventualities.active_eventualities.is_empty() {
|
||||||
|
log::info!(
|
||||||
|
"key {} has finished and is being retired",
|
||||||
|
hex::encode(key.key.to_bytes().as_ref())
|
||||||
|
);
|
||||||
|
|
||||||
|
// Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never
|
||||||
|
// has a malleable view of the keys.
|
||||||
|
ScannerGlobalDb::<S>::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key);
|
||||||
|
|
||||||
|
// We tell the scheduler to retire it now as we're done with it, and this fn doesn't
|
||||||
|
// require it be called with a canonical order
|
||||||
|
Sch::retire_key(&mut txn, key.key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the next-to-check block
|
||||||
|
EventualityDb::<S>::set_next_to_check_for_eventualities_block(&mut txn, next_to_check);
|
||||||
|
|
||||||
|
// If this block was notable, update the latest-handled notable block
|
||||||
|
if is_block_notable {
|
||||||
|
EventualityDb::<S>::set_latest_handled_notable_block(&mut txn, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the next-to-check block
|
// Run dependents if we successfully checked any blocks
|
||||||
EventualityDb::<S>::set_next_to_check_for_eventualities_block(&mut txn, next_to_check);
|
Ok(made_progress)
|
||||||
|
|
||||||
// If this block was notable, update the latest-handled notable block
|
|
||||||
if is_block_notable {
|
|
||||||
EventualityDb::<S>::set_latest_handled_notable_block(&mut txn, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run dependents if we successfully checked any blocks
|
|
||||||
Ok(made_progress)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use serai_db::{Get, DbTxn, Db};
|
use core::future::Future;
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db};
|
||||||
use primitives::{task::ContinuallyRan, BlockHeader};
|
use primitives::{task::ContinuallyRan, BlockHeader};
|
||||||
|
|
||||||
use crate::ScannerFeed;
|
use crate::ScannerFeed;
|
||||||
|
@ -56,58 +57,59 @@ impl<D: Db, S: ScannerFeed> IndexTask<D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
// Fetch the latest finalized block
|
async move {
|
||||||
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
// Fetch the latest finalized block
|
||||||
.expect("IndexTask run before writing the start block");
|
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
||||||
let latest_finalized = match self.feed.latest_finalized_block_number().await {
|
.expect("IndexTask run before writing the start block");
|
||||||
Ok(latest_finalized) => latest_finalized,
|
let latest_finalized = match self.feed.latest_finalized_block_number().await {
|
||||||
Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?,
|
Ok(latest_finalized) => latest_finalized,
|
||||||
};
|
Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?,
|
||||||
|
|
||||||
if latest_finalized < our_latest_finalized {
|
|
||||||
// Explicitly log this as an error as returned ephemeral errors are logged with debug
|
|
||||||
// This doesn't panic as the node should sync along our indexed chain, and if it doesn't,
|
|
||||||
// we'll panic at that point in time
|
|
||||||
log::error!(
|
|
||||||
"node is out of sync, latest finalized {} is behind our indexed {}",
|
|
||||||
latest_finalized,
|
|
||||||
our_latest_finalized
|
|
||||||
);
|
|
||||||
Err("node is out of sync".to_string())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index the hashes of all blocks until the latest finalized block
|
|
||||||
for b in (our_latest_finalized + 1) ..= latest_finalized {
|
|
||||||
let block = match self.feed.unchecked_block_header_by_number(b).await {
|
|
||||||
Ok(block) => block,
|
|
||||||
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check this descends from our indexed chain
|
if latest_finalized < our_latest_finalized {
|
||||||
{
|
// Explicitly log this as an error as returned ephemeral errors are logged with debug
|
||||||
let expected_parent =
|
// This doesn't panic as the node should sync along our indexed chain, and if it doesn't,
|
||||||
IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block");
|
// we'll panic at that point in time
|
||||||
if block.parent() != expected_parent {
|
log::error!(
|
||||||
panic!(
|
"node is out of sync, latest finalized {} is behind our indexed {}",
|
||||||
"current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})",
|
latest_finalized,
|
||||||
hex::encode(block.parent()),
|
our_latest_finalized
|
||||||
b - 1,
|
);
|
||||||
hex::encode(expected_parent)
|
Err("node is out of sync".to_string())?;
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the latest finalized block
|
// Index the hashes of all blocks until the latest finalized block
|
||||||
let mut txn = self.db.txn();
|
for b in (our_latest_finalized + 1) ..= latest_finalized {
|
||||||
IndexDb::set_block(&mut txn, b, block.id());
|
let block = match self.feed.unchecked_block_header_by_number(b).await {
|
||||||
IndexDb::set_latest_finalized_block(&mut txn, b);
|
Ok(block) => block,
|
||||||
txn.commit();
|
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
||||||
}
|
};
|
||||||
|
|
||||||
// Have dependents run if we updated the latest finalized block
|
// Check this descends from our indexed chain
|
||||||
Ok(our_latest_finalized != latest_finalized)
|
{
|
||||||
|
let expected_parent =
|
||||||
|
IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block");
|
||||||
|
if block.parent() != expected_parent {
|
||||||
|
panic!(
|
||||||
|
"current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})",
|
||||||
|
hex::encode(block.parent()),
|
||||||
|
b - 1,
|
||||||
|
hex::encode(expected_parent)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the latest finalized block
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
IndexDb::set_block(&mut txn, b, block.id());
|
||||||
|
IndexDb::set_latest_finalized_block(&mut txn, b);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Have dependents run if we updated the latest finalized block
|
||||||
|
Ok(our_latest_finalized != latest_finalized)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{marker::PhantomData, fmt::Debug};
|
use core::{marker::PhantomData, future::Future, fmt::Debug};
|
||||||
use std::{io, collections::HashMap};
|
use std::{io, collections::HashMap};
|
||||||
|
|
||||||
use group::GroupEncoding;
|
use group::GroupEncoding;
|
||||||
|
@ -59,7 +59,6 @@ impl<B: Block> BlockExt for B {
|
||||||
/// A feed usable to scan a blockchain.
|
/// A feed usable to scan a blockchain.
|
||||||
///
|
///
|
||||||
/// This defines the primitive types used, along with various getters necessary for indexing.
|
/// This defines the primitive types used, along with various getters necessary for indexing.
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||||
/// The ID of the network being scanned for.
|
/// The ID of the network being scanned for.
|
||||||
const NETWORK: NetworkId;
|
const NETWORK: NetworkId;
|
||||||
|
@ -110,38 +109,43 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||||
///
|
///
|
||||||
/// The block number is its zero-indexed position within a linear view of the external network's
|
/// The block number is its zero-indexed position within a linear view of the external network's
|
||||||
/// consensus. The genesis block accordingly has block number 0.
|
/// consensus. The genesis block accordingly has block number 0.
|
||||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError>;
|
fn latest_finalized_block_number(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Fetch the timestamp of a block (represented in seconds since the epoch).
|
/// Fetch the timestamp of a block (represented in seconds since the epoch).
|
||||||
///
|
///
|
||||||
/// This must be monotonically incrementing. Two blocks may share a timestamp.
|
/// This must be monotonically incrementing. Two blocks may share a timestamp.
|
||||||
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError>;
|
fn time_of_block(
|
||||||
|
&self,
|
||||||
|
number: u64,
|
||||||
|
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Fetch a block header by its number.
|
/// Fetch a block header by its number.
|
||||||
///
|
///
|
||||||
/// This does not check the returned BlockHeader is the header for the block we indexed.
|
/// This does not check the returned BlockHeader is the header for the block we indexed.
|
||||||
async fn unchecked_block_header_by_number(
|
fn unchecked_block_header_by_number(
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<<Self::Block as Block>::Header, Self::EphemeralError>;
|
) -> impl Send + Future<Output = Result<<Self::Block as Block>::Header, Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Fetch a block by its number.
|
/// Fetch a block by its number.
|
||||||
///
|
///
|
||||||
/// This does not check the returned Block is the block we indexed.
|
/// This does not check the returned Block is the block we indexed.
|
||||||
async fn unchecked_block_by_number(
|
fn unchecked_block_by_number(
|
||||||
&self,
|
&self,
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<Self::Block, Self::EphemeralError>;
|
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Fetch a block by its number.
|
/// Fetch a block by its number.
|
||||||
///
|
///
|
||||||
/// Panics if the block requested wasn't indexed.
|
/// Panics if the block requested wasn't indexed.
|
||||||
async fn block_by_number(
|
fn block_by_number(
|
||||||
&self,
|
&self,
|
||||||
getter: &(impl Send + Sync + Get),
|
getter: &(impl Send + Sync + Get),
|
||||||
number: u64,
|
number: u64,
|
||||||
) -> Result<Self::Block, String> {
|
) -> impl Send + Future<Output = Result<Self::Block, String>> {
|
||||||
let block = match self.unchecked_block_by_number(number).await {
|
async move {let block = match self.unchecked_block_by_number(number).await {
|
||||||
Ok(block) => block,
|
Ok(block) => block,
|
||||||
Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?,
|
Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?,
|
||||||
};
|
};
|
||||||
|
@ -159,7 +163,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(block)
|
Ok(block)}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The dust threshold for the specified coin.
|
/// The dust threshold for the specified coin.
|
||||||
|
@ -171,11 +175,11 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||||
/// The cost to aggregate an input as of the specified block.
|
/// The cost to aggregate an input as of the specified block.
|
||||||
///
|
///
|
||||||
/// This is defined as the transaction fee for a 2-input, 1-output transaction.
|
/// This is defined as the transaction fee for a 2-input, 1-output transaction.
|
||||||
async fn cost_to_aggregate(
|
fn cost_to_aggregate(
|
||||||
&self,
|
&self,
|
||||||
coin: Coin,
|
coin: Coin,
|
||||||
reference_block: &Self::Block,
|
reference_block: &Self::Block,
|
||||||
) -> Result<Amount, Self::EphemeralError>;
|
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The key type for this ScannerFeed.
|
/// The key type for this ScannerFeed.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use core::marker::PhantomData;
|
use core::{marker::PhantomData, future::Future};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
@ -65,113 +65,119 @@ impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let highest_reportable = {
|
async move {
|
||||||
// Fetch the next to scan block
|
let highest_reportable = {
|
||||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
// Fetch the next to scan block
|
||||||
|
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||||
|
.expect("ReportTask run before writing the start block");
|
||||||
|
// If we haven't done any work, return
|
||||||
|
if next_to_scan == 0 {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
// The last scanned block is the block prior to this
|
||||||
|
#[allow(clippy::let_and_return)]
|
||||||
|
let last_scanned = next_to_scan - 1;
|
||||||
|
// The last scanned block is the highest reportable block as we only scan blocks within a
|
||||||
|
// window where it's safe to immediately report the block
|
||||||
|
// See `eventuality.rs` for more info
|
||||||
|
last_scanned
|
||||||
|
};
|
||||||
|
|
||||||
|
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
|
||||||
.expect("ReportTask run before writing the start block");
|
.expect("ReportTask run before writing the start block");
|
||||||
// If we haven't done any work, return
|
|
||||||
if next_to_scan == 0 {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
// The last scanned block is the block prior to this
|
|
||||||
#[allow(clippy::let_and_return)]
|
|
||||||
let last_scanned = next_to_scan - 1;
|
|
||||||
// The last scanned block is the highest reportable block as we only scan blocks within a
|
|
||||||
// window where it's safe to immediately report the block
|
|
||||||
// See `eventuality.rs` for more info
|
|
||||||
last_scanned
|
|
||||||
};
|
|
||||||
|
|
||||||
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
|
for b in next_to_potentially_report ..= highest_reportable {
|
||||||
.expect("ReportTask run before writing the start block");
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
for b in next_to_potentially_report ..= highest_reportable {
|
// Receive the InInstructions for this block
|
||||||
let mut txn = self.db.txn();
|
// We always do this as we can't trivially tell if we should recv InInstructions before we
|
||||||
|
// do
|
||||||
|
let InInstructionData {
|
||||||
|
external_key_for_session_to_sign_batch,
|
||||||
|
returnable_in_instructions: in_instructions,
|
||||||
|
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, b);
|
||||||
|
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, b);
|
||||||
|
if !notable {
|
||||||
|
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
|
||||||
|
}
|
||||||
|
// If this block is notable, create the Batch(s) for it
|
||||||
|
if notable {
|
||||||
|
let network = S::NETWORK;
|
||||||
|
let block_hash = index::block_id(&txn, b);
|
||||||
|
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||||
|
|
||||||
// Receive the InInstructions for this block
|
// start with empty batch
|
||||||
// We always do this as we can't trivially tell if we should recv InInstructions before we do
|
let mut batches = vec![Batch {
|
||||||
let InInstructionData {
|
network,
|
||||||
external_key_for_session_to_sign_batch,
|
id: batch_id,
|
||||||
returnable_in_instructions: in_instructions,
|
block: BlockHash(block_hash),
|
||||||
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, b);
|
instructions: vec![],
|
||||||
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, b);
|
}];
|
||||||
if !notable {
|
// We also track the return information for the InInstructions within a Batch in case
|
||||||
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
|
// they error
|
||||||
}
|
let mut return_information = vec![vec![]];
|
||||||
// If this block is notable, create the Batch(s) for it
|
|
||||||
if notable {
|
|
||||||
let network = S::NETWORK;
|
|
||||||
let block_hash = index::block_id(&txn, b);
|
|
||||||
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
|
||||||
|
|
||||||
// start with empty batch
|
for Returnable { return_address, in_instruction } in in_instructions {
|
||||||
let mut batches =
|
let balance = in_instruction.balance;
|
||||||
vec![Batch { network, id: batch_id, block: BlockHash(block_hash), instructions: vec![] }];
|
|
||||||
// We also track the return information for the InInstructions within a Batch in case they
|
|
||||||
// error
|
|
||||||
let mut return_information = vec![vec![]];
|
|
||||||
|
|
||||||
for Returnable { return_address, in_instruction } in in_instructions {
|
let batch = batches.last_mut().unwrap();
|
||||||
let balance = in_instruction.balance;
|
batch.instructions.push(in_instruction);
|
||||||
|
|
||||||
let batch = batches.last_mut().unwrap();
|
// check if batch is over-size
|
||||||
batch.instructions.push(in_instruction);
|
if batch.encode().len() > MAX_BATCH_SIZE {
|
||||||
|
// pop the last instruction so it's back in size
|
||||||
|
let in_instruction = batch.instructions.pop().unwrap();
|
||||||
|
|
||||||
// check if batch is over-size
|
// bump the id for the new batch
|
||||||
if batch.encode().len() > MAX_BATCH_SIZE {
|
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||||
// pop the last instruction so it's back in size
|
|
||||||
let in_instruction = batch.instructions.pop().unwrap();
|
|
||||||
|
|
||||||
// bump the id for the new batch
|
// make a new batch with this instruction included
|
||||||
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
batches.push(Batch {
|
||||||
|
network,
|
||||||
|
id: batch_id,
|
||||||
|
block: BlockHash(block_hash),
|
||||||
|
instructions: vec![in_instruction],
|
||||||
|
});
|
||||||
|
// Since we're allocating a new batch, allocate a new set of return addresses for it
|
||||||
|
return_information.push(vec![]);
|
||||||
|
}
|
||||||
|
|
||||||
// make a new batch with this instruction included
|
// For the set of return addresses for the InInstructions for the batch we just pushed
|
||||||
batches.push(Batch {
|
// onto, push this InInstruction's return addresses
|
||||||
network,
|
return_information
|
||||||
id: batch_id,
|
.last_mut()
|
||||||
block: BlockHash(block_hash),
|
.unwrap()
|
||||||
instructions: vec![in_instruction],
|
.push(return_address.map(|address| ReturnInformation { address, balance }));
|
||||||
});
|
|
||||||
// Since we're allocating a new batch, allocate a new set of return addresses for it
|
|
||||||
return_information.push(vec![]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For the set of return addresses for the InInstructions for the batch we just pushed
|
// Save the return addresses to the database
|
||||||
// onto, push this InInstruction's return addresses
|
assert_eq!(batches.len(), return_information.len());
|
||||||
return_information
|
for (batch, return_information) in batches.iter().zip(&return_information) {
|
||||||
.last_mut()
|
assert_eq!(batch.instructions.len(), return_information.len());
|
||||||
.unwrap()
|
ReportDb::<S>::save_external_key_for_session_to_sign_batch(
|
||||||
.push(return_address.map(|address| ReturnInformation { address, balance }));
|
&mut txn,
|
||||||
|
batch.id,
|
||||||
|
&external_key_for_session_to_sign_batch,
|
||||||
|
);
|
||||||
|
ReportDb::<S>::save_return_information(&mut txn, batch.id, return_information);
|
||||||
|
}
|
||||||
|
|
||||||
|
for batch in batches {
|
||||||
|
Batches::send(&mut txn, &batch);
|
||||||
|
BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the return addresses to the database
|
// Update the next to potentially report block
|
||||||
assert_eq!(batches.len(), return_information.len());
|
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, b + 1);
|
||||||
for (batch, return_information) in batches.iter().zip(&return_information) {
|
|
||||||
assert_eq!(batch.instructions.len(), return_information.len());
|
|
||||||
ReportDb::<S>::save_external_key_for_session_to_sign_batch(
|
|
||||||
&mut txn,
|
|
||||||
batch.id,
|
|
||||||
&external_key_for_session_to_sign_batch,
|
|
||||||
);
|
|
||||||
ReportDb::<S>::save_return_information(&mut txn, batch.id, return_information);
|
|
||||||
}
|
|
||||||
|
|
||||||
for batch in batches {
|
txn.commit();
|
||||||
Batches::send(&mut txn, &batch);
|
|
||||||
BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the next to potentially report block
|
// Run dependents if we decided to report any blocks
|
||||||
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, b + 1);
|
Ok(next_to_potentially_report <= highest_reportable)
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run dependents if we decided to report any blocks
|
|
||||||
Ok(next_to_potentially_report <= highest_reportable)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use core::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use scale::Decode;
|
use scale::Decode;
|
||||||
|
@ -107,258 +108,262 @@ impl<D: Db, S: ScannerFeed> ScanTask<D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
// Fetch the safe to scan block
|
async move {
|
||||||
let latest_scannable =
|
// Fetch the safe to scan block
|
||||||
latest_scannable_block::<S>(&self.db).expect("ScanTask run before writing the start block");
|
let latest_scannable =
|
||||||
// Fetch the next block to scan
|
latest_scannable_block::<S>(&self.db).expect("ScanTask run before writing the start block");
|
||||||
let next_to_scan = ScanDb::<S>::next_to_scan_for_outputs_block(&self.db)
|
// Fetch the next block to scan
|
||||||
.expect("ScanTask run before writing the start block");
|
let next_to_scan = ScanDb::<S>::next_to_scan_for_outputs_block(&self.db)
|
||||||
|
.expect("ScanTask run before writing the start block");
|
||||||
|
|
||||||
for b in next_to_scan ..= latest_scannable {
|
for b in next_to_scan ..= latest_scannable {
|
||||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||||
|
|
||||||
log::info!("scanning block: {} ({b})", hex::encode(block.id()));
|
log::info!("scanning block: {} ({b})", hex::encode(block.id()));
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
assert_eq!(ScanDb::<S>::next_to_scan_for_outputs_block(&txn).unwrap(), b);
|
assert_eq!(ScanDb::<S>::next_to_scan_for_outputs_block(&txn).unwrap(), b);
|
||||||
|
|
||||||
// Tidy the keys, then fetch them
|
// Tidy the keys, then fetch them
|
||||||
// We don't have to tidy them here, we just have to somewhere, so why not here?
|
// We don't have to tidy them here, we just have to somewhere, so why not here?
|
||||||
ScannerGlobalDb::<S>::tidy_keys(&mut txn);
|
ScannerGlobalDb::<S>::tidy_keys(&mut txn);
|
||||||
let keys = ScannerGlobalDb::<S>::active_keys_as_of_next_to_scan_for_outputs_block(&txn)
|
let keys = ScannerGlobalDb::<S>::active_keys_as_of_next_to_scan_for_outputs_block(&txn)
|
||||||
.expect("scanning for a blockchain without any keys set");
|
.expect("scanning for a blockchain without any keys set");
|
||||||
|
|
||||||
// The scan data for this block
|
// The scan data for this block
|
||||||
let mut scan_data = SenderScanData {
|
let mut scan_data = SenderScanData {
|
||||||
block_number: b,
|
block_number: b,
|
||||||
received_external_outputs: vec![],
|
received_external_outputs: vec![],
|
||||||
forwards: vec![],
|
forwards: vec![],
|
||||||
returns: vec![],
|
returns: vec![],
|
||||||
};
|
};
|
||||||
// The InInstructions for this block
|
// The InInstructions for this block
|
||||||
let mut in_instructions = vec![];
|
let mut in_instructions = vec![];
|
||||||
|
|
||||||
// The outputs queued for this block
|
|
||||||
let queued_outputs = {
|
|
||||||
let mut queued_outputs = ScanDb::<S>::take_queued_outputs(&mut txn, b);
|
|
||||||
// Sort the queued outputs in case they weren't queued in a deterministic fashion
|
|
||||||
queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
|
||||||
queued_outputs
|
|
||||||
};
|
|
||||||
for queued_output in queued_outputs {
|
|
||||||
in_instructions.push((
|
|
||||||
queued_output.output.id(),
|
|
||||||
Returnable {
|
|
||||||
return_address: queued_output.return_address,
|
|
||||||
in_instruction: queued_output.in_instruction,
|
|
||||||
},
|
|
||||||
));
|
|
||||||
scan_data.received_external_outputs.push(queued_output.output);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We subtract the cost to aggregate from some outputs we scan
|
|
||||||
// This cost is fetched with an asynchronous function which may be non-trivial
|
|
||||||
// We cache the result of this function here to avoid calling it multiple times
|
|
||||||
let mut costs_to_aggregate = HashMap::with_capacity(1);
|
|
||||||
|
|
||||||
// Scan for each key
|
|
||||||
for key in &keys {
|
|
||||||
for output in block.scan_for_outputs(key.key) {
|
|
||||||
assert_eq!(output.key(), key.key);
|
|
||||||
|
|
||||||
/*
|
|
||||||
The scan task runs ahead of time, obtaining ordering on the external network's blocks
|
|
||||||
with relation to events on the Serai network. This is done via publishing a Batch which
|
|
||||||
contains the InInstructions from External outputs. Accordingly, the scan process only
|
|
||||||
has to yield External outputs.
|
|
||||||
|
|
||||||
It'd appear to make sense to scan for all outputs, and after scanning for all outputs,
|
|
||||||
yield all outputs. The issue is we can't identify outputs we created here. We can only
|
|
||||||
identify the outputs we receive and their *declared intention*.
|
|
||||||
|
|
||||||
We only want to handle Change/Branch/Forwarded outputs we made ourselves. For
|
|
||||||
Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet
|
|
||||||
accepting new outputs solely because they claim to be Forwarded would increase the size
|
|
||||||
of the multisig). For Change/Branch, it's because such outputs which aren't ours are
|
|
||||||
pointless. They wouldn't hurt to accumulate though.
|
|
||||||
|
|
||||||
The issue is they would hurt to accumulate. We want to filter outputs which are less
|
|
||||||
than their cost to aggregate, a variable itself variable to the current blockchain. We
|
|
||||||
can filter such outputs here, yet if we drop a Change output, we create an insolvency.
|
|
||||||
We'd need to track the loss and offset it later. That means we can't filter such
|
|
||||||
outputs, as we expect any Change output we make.
|
|
||||||
|
|
||||||
The issue is the Change outputs we don't make. Someone can create an output declaring
|
|
||||||
to be Change, yet not actually Change. If we don't filter it, it'd be queued for
|
|
||||||
accumulation, yet it may cost more to accumulate than it's worth.
|
|
||||||
|
|
||||||
The solution is to let the Eventuality task, which does know if we made an output or
|
|
||||||
not (or rather, if a transaction is identical to a transaction which should exist
|
|
||||||
regarding effects) decide to keep/yield the outputs which we should only keep if we
|
|
||||||
made them (as Serai itself should not make worthless outputs, so we can assume they're
|
|
||||||
worthwhile, and even if they're not economically, they are technically).
|
|
||||||
|
|
||||||
The alternative, we drop outputs here with a generic filter rule and then report back
|
|
||||||
the insolvency created, still doesn't work as we'd only be creating an insolvency if
|
|
||||||
the output was actually made by us (and not simply someone else sending in). We can
|
|
||||||
have the Eventuality task report the insolvency, yet that requires the scanner be
|
|
||||||
responsible for such filter logic. It's more flexible, and has a cleaner API,
|
|
||||||
to do so at a higher level.
|
|
||||||
*/
|
|
||||||
if output.kind() != OutputType::External {
|
|
||||||
// While we don't report these outputs, we still need consensus on this block and
|
|
||||||
// accordingly still need to set it as notable
|
|
||||||
let balance = output.balance();
|
|
||||||
// We ensure it's over the dust limit to prevent people sending 1 satoshi from causing
|
|
||||||
// an invocation of a consensus/signing protocol
|
|
||||||
if balance.amount.0 >= S::dust(balance.coin).0 {
|
|
||||||
ScannerGlobalDb::<S>::flag_notable_due_to_non_external_output(&mut txn, b);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check this isn't dust
|
|
||||||
let balance_to_use = {
|
|
||||||
let mut balance = output.balance();
|
|
||||||
|
|
||||||
// First, subtract 2 * the cost to aggregate, as detailed in
|
|
||||||
// `spec/processor/UTXO Management.md`
|
|
||||||
|
|
||||||
// We cache this, so if it isn't yet cached, insert it into the cache
|
|
||||||
if let std::collections::hash_map::Entry::Vacant(e) =
|
|
||||||
costs_to_aggregate.entry(balance.coin)
|
|
||||||
{
|
|
||||||
e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}",
|
|
||||||
balance.coin
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
}
|
|
||||||
let cost_to_aggregate = costs_to_aggregate[&balance.coin];
|
|
||||||
balance.amount.0 -= 2 * cost_to_aggregate.0;
|
|
||||||
|
|
||||||
// Now, check it's still past the dust threshold
|
|
||||||
if balance.amount.0 < S::dust(balance.coin).0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
balance
|
|
||||||
};
|
|
||||||
|
|
||||||
// Fetch the InInstruction/return addr for this output
|
|
||||||
let output_with_in_instruction = match in_instruction_from_output::<S>(&output) {
|
|
||||||
(return_address, Some(instruction)) => OutputWithInInstruction {
|
|
||||||
output,
|
|
||||||
return_address,
|
|
||||||
in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use },
|
|
||||||
},
|
|
||||||
(Some(address), None) => {
|
|
||||||
// Since there was no instruction here, return this since we parsed a return address
|
|
||||||
if key.stage != LifetimeStage::Finishing {
|
|
||||||
scan_data.returns.push(Return { address, output });
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Since we didn't receive an instruction nor can we return this, queue this for
|
|
||||||
// accumulation and move on
|
|
||||||
(None, None) => {
|
|
||||||
if key.stage != LifetimeStage::Finishing {
|
|
||||||
scan_data.received_external_outputs.push(output);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Drop External outputs if they're to a multisig which won't report them
|
|
||||||
// This means we should report any External output we save to disk here
|
|
||||||
#[allow(clippy::match_same_arms)]
|
|
||||||
match key.stage {
|
|
||||||
// This multisig isn't yet reporting its External outputs to avoid a DoS
|
|
||||||
// Queue the output to be reported when this multisig starts reporting
|
|
||||||
LifetimeStage::ActiveYetNotReporting => {
|
|
||||||
ScanDb::<S>::queue_output_until_block(
|
|
||||||
&mut txn,
|
|
||||||
key.block_at_which_reporting_starts,
|
|
||||||
&output_with_in_instruction,
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// We should report External outputs in these cases
|
|
||||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => {}
|
|
||||||
// We should report External outputs only once forwarded, where they'll appear as
|
|
||||||
// OutputType::Forwarded. We save them now for when they appear
|
|
||||||
LifetimeStage::Forwarding => {
|
|
||||||
// When the forwarded output appears, we can see which Plan it's associated with and
|
|
||||||
// from there recover this output
|
|
||||||
scan_data.forwards.push(output_with_in_instruction);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// We should drop these as we should not be handling new External outputs at this
|
|
||||||
// time
|
|
||||||
LifetimeStage::Finishing => {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ensures we didn't miss a `continue` above
|
|
||||||
assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange));
|
|
||||||
|
|
||||||
|
// The outputs queued for this block
|
||||||
|
let queued_outputs = {
|
||||||
|
let mut queued_outputs = ScanDb::<S>::take_queued_outputs(&mut txn, b);
|
||||||
|
// Sort the queued outputs in case they weren't queued in a deterministic fashion
|
||||||
|
queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||||
|
queued_outputs
|
||||||
|
};
|
||||||
|
for queued_output in queued_outputs {
|
||||||
in_instructions.push((
|
in_instructions.push((
|
||||||
output_with_in_instruction.output.id(),
|
queued_output.output.id(),
|
||||||
Returnable {
|
Returnable {
|
||||||
return_address: output_with_in_instruction.return_address,
|
return_address: queued_output.return_address,
|
||||||
in_instruction: output_with_in_instruction.in_instruction,
|
in_instruction: queued_output.in_instruction,
|
||||||
},
|
},
|
||||||
));
|
));
|
||||||
scan_data.received_external_outputs.push(output_with_in_instruction.output);
|
scan_data.received_external_outputs.push(queued_output.output);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the InInstructions by the output ID
|
// We subtract the cost to aggregate from some outputs we scan
|
||||||
in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| {
|
// This cost is fetched with an asynchronous function which may be non-trivial
|
||||||
use core::cmp::{Ordering, Ord};
|
// We cache the result of this function here to avoid calling it multiple times
|
||||||
let res = output_id_a.as_ref().cmp(output_id_b.as_ref());
|
let mut costs_to_aggregate = HashMap::with_capacity(1);
|
||||||
assert!(res != Ordering::Equal, "two outputs within a collection had the same ID");
|
|
||||||
res
|
// Scan for each key
|
||||||
});
|
for key in &keys {
|
||||||
// Check we haven't prior reported an InInstruction for this output
|
for output in block.scan_for_outputs(key.key) {
|
||||||
// This is a sanity check which is intended to prevent multiple instances of sriXYZ on-chain
|
assert_eq!(output.key(), key.key);
|
||||||
// due to a single output
|
|
||||||
for (id, _) in &in_instructions {
|
/*
|
||||||
assert!(
|
The scan task runs ahead of time, obtaining ordering on the external network's blocks
|
||||||
!ScanDb::<S>::prior_reported_in_instruction_for_output(&txn, id),
|
with relation to events on the Serai network. This is done via publishing a Batch
|
||||||
"prior reported an InInstruction for an output with this ID"
|
which contains the InInstructions from External outputs. Accordingly, the scan
|
||||||
|
process only has to yield External outputs.
|
||||||
|
|
||||||
|
It'd appear to make sense to scan for all outputs, and after scanning for all
|
||||||
|
outputs, yield all outputs. The issue is we can't identify outputs we created here.
|
||||||
|
We can only identify the outputs we receive and their *declared intention*.
|
||||||
|
|
||||||
|
We only want to handle Change/Branch/Forwarded outputs we made ourselves. For
|
||||||
|
Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet
|
||||||
|
accepting new outputs solely because they claim to be Forwarded would increase the
|
||||||
|
size of the multisig). For Change/Branch, it's because such outputs which aren't ours
|
||||||
|
are pointless. They wouldn't hurt to accumulate though.
|
||||||
|
|
||||||
|
The issue is they would hurt to accumulate. We want to filter outputs which are less
|
||||||
|
than their cost to aggregate, a variable itself variable to the current blockchain.
|
||||||
|
We can filter such outputs here, yet if we drop a Change output, we create an
|
||||||
|
insolvency. We'd need to track the loss and offset it later. That means we can't
|
||||||
|
filter such outputs, as we expect any Change output we make.
|
||||||
|
|
||||||
|
The issue is the Change outputs we don't make. Someone can create an output declaring
|
||||||
|
to be Change, yet not actually Change. If we don't filter it, it'd be queued for
|
||||||
|
accumulation, yet it may cost more to accumulate than it's worth.
|
||||||
|
|
||||||
|
The solution is to let the Eventuality task, which does know if we made an output or
|
||||||
|
not (or rather, if a transaction is identical to a transaction which should exist
|
||||||
|
regarding effects) decide to keep/yield the outputs which we should only keep if we
|
||||||
|
made them (as Serai itself should not make worthless outputs, so we can assume
|
||||||
|
they're worthwhile, and even if they're not economically, they are technically).
|
||||||
|
|
||||||
|
The alternative, we drop outputs here with a generic filter rule and then report back
|
||||||
|
the insolvency created, still doesn't work as we'd only be creating an insolvency if
|
||||||
|
the output was actually made by us (and not simply someone else sending in). We can
|
||||||
|
have the Eventuality task report the insolvency, yet that requires the scanner be
|
||||||
|
responsible for such filter logic. It's more flexible, and has a cleaner API,
|
||||||
|
to do so at a higher level.
|
||||||
|
*/
|
||||||
|
if output.kind() != OutputType::External {
|
||||||
|
// While we don't report these outputs, we still need consensus on this block and
|
||||||
|
// accordingly still need to set it as notable
|
||||||
|
let balance = output.balance();
|
||||||
|
// We ensure it's over the dust limit to prevent people sending 1 satoshi from
|
||||||
|
// causing an invocation of a consensus/signing protocol
|
||||||
|
if balance.amount.0 >= S::dust(balance.coin).0 {
|
||||||
|
ScannerGlobalDb::<S>::flag_notable_due_to_non_external_output(&mut txn, b);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check this isn't dust
|
||||||
|
let balance_to_use = {
|
||||||
|
let mut balance = output.balance();
|
||||||
|
|
||||||
|
// First, subtract 2 * the cost to aggregate, as detailed in
|
||||||
|
// `spec/processor/UTXO Management.md`
|
||||||
|
|
||||||
|
// We cache this, so if it isn't yet cached, insert it into the cache
|
||||||
|
if let std::collections::hash_map::Entry::Vacant(e) =
|
||||||
|
costs_to_aggregate.entry(balance.coin)
|
||||||
|
{
|
||||||
|
e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}",
|
||||||
|
balance.coin
|
||||||
|
)
|
||||||
|
})?);
|
||||||
|
}
|
||||||
|
let cost_to_aggregate = costs_to_aggregate[&balance.coin];
|
||||||
|
balance.amount.0 -= 2 * cost_to_aggregate.0;
|
||||||
|
|
||||||
|
// Now, check it's still past the dust threshold
|
||||||
|
if balance.amount.0 < S::dust(balance.coin).0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
balance
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fetch the InInstruction/return addr for this output
|
||||||
|
let output_with_in_instruction = match in_instruction_from_output::<S>(&output) {
|
||||||
|
(return_address, Some(instruction)) => OutputWithInInstruction {
|
||||||
|
output,
|
||||||
|
return_address,
|
||||||
|
in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use },
|
||||||
|
},
|
||||||
|
(Some(address), None) => {
|
||||||
|
// Since there was no instruction here, return this since we parsed a return
|
||||||
|
// address
|
||||||
|
if key.stage != LifetimeStage::Finishing {
|
||||||
|
scan_data.returns.push(Return { address, output });
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Since we didn't receive an instruction nor can we return this, queue this for
|
||||||
|
// accumulation and move on
|
||||||
|
(None, None) => {
|
||||||
|
if key.stage != LifetimeStage::Finishing {
|
||||||
|
scan_data.received_external_outputs.push(output);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Drop External outputs if they're to a multisig which won't report them
|
||||||
|
// This means we should report any External output we save to disk here
|
||||||
|
#[allow(clippy::match_same_arms)]
|
||||||
|
match key.stage {
|
||||||
|
// This multisig isn't yet reporting its External outputs to avoid a DoS
|
||||||
|
// Queue the output to be reported when this multisig starts reporting
|
||||||
|
LifetimeStage::ActiveYetNotReporting => {
|
||||||
|
ScanDb::<S>::queue_output_until_block(
|
||||||
|
&mut txn,
|
||||||
|
key.block_at_which_reporting_starts,
|
||||||
|
&output_with_in_instruction,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// We should report External outputs in these cases
|
||||||
|
LifetimeStage::Active | LifetimeStage::UsingNewForChange => {}
|
||||||
|
// We should report External outputs only once forwarded, where they'll appear as
|
||||||
|
// OutputType::Forwarded. We save them now for when they appear
|
||||||
|
LifetimeStage::Forwarding => {
|
||||||
|
// When the forwarded output appears, we can see which Plan it's associated with
|
||||||
|
// and from there recover this output
|
||||||
|
scan_data.forwards.push(output_with_in_instruction);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// We should drop these as we should not be handling new External outputs at this
|
||||||
|
// time
|
||||||
|
LifetimeStage::Finishing => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Ensures we didn't miss a `continue` above
|
||||||
|
assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange));
|
||||||
|
|
||||||
|
in_instructions.push((
|
||||||
|
output_with_in_instruction.output.id(),
|
||||||
|
Returnable {
|
||||||
|
return_address: output_with_in_instruction.return_address,
|
||||||
|
in_instruction: output_with_in_instruction.in_instruction,
|
||||||
|
},
|
||||||
|
));
|
||||||
|
scan_data.received_external_outputs.push(output_with_in_instruction.output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the InInstructions by the output ID
|
||||||
|
in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| {
|
||||||
|
use core::cmp::{Ordering, Ord};
|
||||||
|
let res = output_id_a.as_ref().cmp(output_id_b.as_ref());
|
||||||
|
assert!(res != Ordering::Equal, "two outputs within a collection had the same ID");
|
||||||
|
res
|
||||||
|
});
|
||||||
|
// Check we haven't prior reported an InInstruction for this output
|
||||||
|
// This is a sanity check which is intended to prevent multiple instances of sriXYZ
|
||||||
|
// on-chain due to a single output
|
||||||
|
for (id, _) in &in_instructions {
|
||||||
|
assert!(
|
||||||
|
!ScanDb::<S>::prior_reported_in_instruction_for_output(&txn, id),
|
||||||
|
"prior reported an InInstruction for an output with this ID"
|
||||||
|
);
|
||||||
|
ScanDb::<S>::reported_in_instruction_for_output(&mut txn, id);
|
||||||
|
}
|
||||||
|
// Reformat the InInstructions to just the InInstructions
|
||||||
|
let in_instructions = in_instructions
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_id, in_instruction)| in_instruction)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
// Send the InInstructions to the report task
|
||||||
|
// We need to also specify which key is responsible for signing the Batch for these, which
|
||||||
|
// will always be the oldest key (as the new key signing the Batch signifies handover
|
||||||
|
// acceptance)
|
||||||
|
ScanToReportDb::<S>::send_in_instructions(
|
||||||
|
&mut txn,
|
||||||
|
b,
|
||||||
|
&InInstructionData {
|
||||||
|
external_key_for_session_to_sign_batch: keys[0].key,
|
||||||
|
returnable_in_instructions: in_instructions,
|
||||||
|
},
|
||||||
);
|
);
|
||||||
ScanDb::<S>::reported_in_instruction_for_output(&mut txn, id);
|
|
||||||
|
// Send the scan data to the eventuality task
|
||||||
|
ScanToEventualityDb::<S>::send_scan_data(&mut txn, b, &scan_data);
|
||||||
|
// Update the next to scan block
|
||||||
|
ScanDb::<S>::set_next_to_scan_for_outputs_block(&mut txn, b + 1);
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
// Reformat the InInstructions to just the InInstructions
|
|
||||||
let in_instructions =
|
|
||||||
in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::<Vec<_>>();
|
|
||||||
// Send the InInstructions to the report task
|
|
||||||
// We need to also specify which key is responsible for signing the Batch for these, which
|
|
||||||
// will always be the oldest key (as the new key signing the Batch signifies handover
|
|
||||||
// acceptance)
|
|
||||||
ScanToReportDb::<S>::send_in_instructions(
|
|
||||||
&mut txn,
|
|
||||||
b,
|
|
||||||
&InInstructionData {
|
|
||||||
external_key_for_session_to_sign_batch: keys[0].key,
|
|
||||||
returnable_in_instructions: in_instructions,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
// Send the scan data to the eventuality task
|
// Run dependents if we successfully scanned any blocks
|
||||||
ScanToEventualityDb::<S>::send_scan_data(&mut txn, b, &scan_data);
|
Ok(next_to_scan <= latest_scannable)
|
||||||
// Update the next to scan block
|
|
||||||
ScanDb::<S>::set_next_to_scan_for_outputs_block(&mut txn, b + 1);
|
|
||||||
txn.commit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run dependents if we successfully scanned any blocks
|
|
||||||
Ok(next_to_scan <= latest_scannable)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use core::marker::PhantomData;
|
use core::{marker::PhantomData, future::Future};
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
@ -52,115 +52,121 @@ impl<D: Db, S: ScannerFeed> SubstrateTask<D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let mut made_progress = false;
|
async move {
|
||||||
loop {
|
let mut made_progress = false;
|
||||||
// Fetch the next action to handle
|
loop {
|
||||||
let mut txn = self.db.txn();
|
// Fetch the next action to handle
|
||||||
let Some(action) = SubstrateDb::<S>::next_action(&mut txn) else {
|
let mut txn = self.db.txn();
|
||||||
drop(txn);
|
let Some(action) = SubstrateDb::<S>::next_action(&mut txn) else {
|
||||||
return Ok(made_progress);
|
drop(txn);
|
||||||
};
|
return Ok(made_progress);
|
||||||
|
};
|
||||||
|
|
||||||
match action {
|
match action {
|
||||||
Action::AcknowledgeBatch(AcknowledgeBatch {
|
Action::AcknowledgeBatch(AcknowledgeBatch {
|
||||||
batch_id,
|
batch_id,
|
||||||
in_instruction_succeededs,
|
in_instruction_succeededs,
|
||||||
mut burns,
|
mut burns,
|
||||||
key_to_activate,
|
key_to_activate,
|
||||||
}) => {
|
}) => {
|
||||||
// Check if we have the information for this batch
|
// Check if we have the information for this batch
|
||||||
let Some(block_number) = report::take_block_number_for_batch::<S>(&mut txn, batch_id)
|
let Some(block_number) = report::take_block_number_for_batch::<S>(&mut txn, batch_id)
|
||||||
else {
|
else {
|
||||||
// If we don't, drop this txn (restoring the action to the database)
|
// If we don't, drop this txn (restoring the action to the database)
|
||||||
drop(txn);
|
drop(txn);
|
||||||
return Ok(made_progress);
|
return Ok(made_progress);
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let external_key_for_session_to_sign_batch =
|
let external_key_for_session_to_sign_batch =
|
||||||
report::take_external_key_for_session_to_sign_batch::<S>(&mut txn, batch_id).unwrap();
|
report::take_external_key_for_session_to_sign_batch::<S>(&mut txn, batch_id)
|
||||||
AcknowledgedBatches::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id);
|
.unwrap();
|
||||||
}
|
AcknowledgedBatches::send(
|
||||||
|
&mut txn,
|
||||||
// Mark we made progress and handle this
|
&external_key_for_session_to_sign_batch,
|
||||||
made_progress = true;
|
batch_id,
|
||||||
|
|
||||||
assert!(
|
|
||||||
ScannerGlobalDb::<S>::is_block_notable(&txn, block_number),
|
|
||||||
"acknowledging a block which wasn't notable"
|
|
||||||
);
|
|
||||||
if let Some(prior_highest_acknowledged_block) =
|
|
||||||
ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
|
||||||
{
|
|
||||||
// If a single block produced multiple Batches, the block number won't increment
|
|
||||||
assert!(
|
|
||||||
block_number >= prior_highest_acknowledged_block,
|
|
||||||
"acknowledging blocks out-of-order"
|
|
||||||
);
|
|
||||||
for b in (prior_highest_acknowledged_block + 1) .. block_number {
|
|
||||||
assert!(
|
|
||||||
!ScannerGlobalDb::<S>::is_block_notable(&txn, b),
|
|
||||||
"skipped acknowledging a block which was notable"
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ScannerGlobalDb::<S>::set_highest_acknowledged_block(&mut txn, block_number);
|
// Mark we made progress and handle this
|
||||||
if let Some(key_to_activate) = key_to_activate {
|
made_progress = true;
|
||||||
ScannerGlobalDb::<S>::queue_key(
|
|
||||||
&mut txn,
|
assert!(
|
||||||
block_number + S::WINDOW_LENGTH,
|
ScannerGlobalDb::<S>::is_block_notable(&txn, block_number),
|
||||||
key_to_activate,
|
"acknowledging a block which wasn't notable"
|
||||||
);
|
);
|
||||||
}
|
if let Some(prior_highest_acknowledged_block) =
|
||||||
|
ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||||
|
{
|
||||||
|
// If a single block produced multiple Batches, the block number won't increment
|
||||||
|
assert!(
|
||||||
|
block_number >= prior_highest_acknowledged_block,
|
||||||
|
"acknowledging blocks out-of-order"
|
||||||
|
);
|
||||||
|
for b in (prior_highest_acknowledged_block + 1) .. block_number {
|
||||||
|
assert!(
|
||||||
|
!ScannerGlobalDb::<S>::is_block_notable(&txn, b),
|
||||||
|
"skipped acknowledging a block which was notable"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Return the balances for any InInstructions which failed to execute
|
ScannerGlobalDb::<S>::set_highest_acknowledged_block(&mut txn, block_number);
|
||||||
{
|
if let Some(key_to_activate) = key_to_activate {
|
||||||
let return_information = report::take_return_information::<S>(&mut txn, batch_id)
|
ScannerGlobalDb::<S>::queue_key(
|
||||||
.expect("didn't save the return information for Batch we published");
|
&mut txn,
|
||||||
assert_eq!(
|
block_number + S::WINDOW_LENGTH,
|
||||||
|
key_to_activate,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the balances for any InInstructions which failed to execute
|
||||||
|
{
|
||||||
|
let return_information = report::take_return_information::<S>(&mut txn, batch_id)
|
||||||
|
.expect("didn't save the return information for Batch we published");
|
||||||
|
assert_eq!(
|
||||||
in_instruction_succeededs.len(),
|
in_instruction_succeededs.len(),
|
||||||
return_information.len(),
|
return_information.len(),
|
||||||
"amount of InInstruction succeededs differed from amount of return information saved"
|
"amount of InInstruction succeededs differed from amount of return information saved"
|
||||||
);
|
);
|
||||||
|
|
||||||
// We map these into standard Burns
|
// We map these into standard Burns
|
||||||
for (succeeded, return_information) in
|
for (succeeded, return_information) in
|
||||||
in_instruction_succeededs.into_iter().zip(return_information)
|
in_instruction_succeededs.into_iter().zip(return_information)
|
||||||
{
|
{
|
||||||
if succeeded {
|
if succeeded {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(report::ReturnInformation { address, balance }) = return_information {
|
if let Some(report::ReturnInformation { address, balance }) = return_information {
|
||||||
burns.push(OutInstructionWithBalance {
|
burns.push(OutInstructionWithBalance {
|
||||||
instruction: OutInstruction { address: address.into(), data: None },
|
instruction: OutInstruction { address: address.into(), data: None },
|
||||||
balance,
|
balance,
|
||||||
});
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We send these Burns as stemming from this block we just acknowledged
|
||||||
|
// This causes them to be acted on after we accumulate the outputs from this block
|
||||||
|
SubstrateToEventualityDb::send_burns::<S>(&mut txn, block_number, burns);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We send these Burns as stemming from this block we just acknowledged
|
Action::QueueBurns(burns) => {
|
||||||
// This causes them to be acted on after we accumulate the outputs from this block
|
// We can instantly handle this so long as we've handled all prior actions
|
||||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, block_number, burns);
|
made_progress = true;
|
||||||
|
|
||||||
|
let queue_as_of = ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||||
|
.expect("queueing Burns yet never acknowledged a block");
|
||||||
|
|
||||||
|
SubstrateToEventualityDb::send_burns::<S>(&mut txn, queue_as_of, burns);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Action::QueueBurns(burns) => {
|
txn.commit();
|
||||||
// We can instantly handle this so long as we've handled all prior actions
|
|
||||||
made_progress = true;
|
|
||||||
|
|
||||||
let queue_as_of = ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
|
||||||
.expect("queueing Burns yet never acknowledged a block");
|
|
||||||
|
|
||||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, queue_as_of, burns);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,13 +14,12 @@ all-features = true
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[package.metadata.cargo-machete]
|
[package.metadata.cargo-machete]
|
||||||
ignored = ["borsh", "scale"]
|
ignored = ["borsh"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
rand_core = { version = "0.6", default-features = false }
|
||||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use core::future::Future;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ristretto};
|
||||||
|
@ -75,114 +76,115 @@ impl<D: Db, E: GroupEncoding> BatchSignerTask<D, E> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, E: Send + GroupEncoding> ContinuallyRan for BatchSignerTask<D, E> {
|
impl<D: Db, E: Send + GroupEncoding> ContinuallyRan for BatchSignerTask<D, E> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let mut iterated = false;
|
async move {
|
||||||
|
let mut iterated = false;
|
||||||
|
|
||||||
// Check for new batches to sign
|
// Check for new batches to sign
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else {
|
let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else {
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
iterated = true;
|
iterated = true;
|
||||||
|
|
||||||
// Save this to the database as a transaction to sign
|
// Save this to the database as a transaction to sign
|
||||||
self.active_signing_protocols.insert(batch.id);
|
self.active_signing_protocols.insert(batch.id);
|
||||||
ActiveSigningProtocols::set(
|
ActiveSigningProtocols::set(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
self.session,
|
self.session,
|
||||||
&self.active_signing_protocols.iter().copied().collect(),
|
&self.active_signing_protocols.iter().copied().collect(),
|
||||||
);
|
);
|
||||||
Batches::set(&mut txn, batch.id, &batch);
|
Batches::set(&mut txn, batch.id, &batch);
|
||||||
|
|
||||||
let mut machines = Vec::with_capacity(self.keys.len());
|
let mut machines = Vec::with_capacity(self.keys.len());
|
||||||
for keys in &self.keys {
|
for keys in &self.keys {
|
||||||
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch)));
|
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch)));
|
||||||
}
|
|
||||||
for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) {
|
|
||||||
BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for acknowledged Batches (meaning we should no longer sign for these Batches)
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
let last_acknowledged = LastAcknowledgedBatch::get(&txn);
|
|
||||||
if Some(id) > last_acknowledged {
|
|
||||||
LastAcknowledgedBatch::set(&mut txn, &id);
|
|
||||||
}
|
}
|
||||||
|
for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) {
|
||||||
|
BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// Check for acknowledged Batches (meaning we should no longer sign for these Batches)
|
||||||
We may have yet to register this signing protocol.
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically have
|
{
|
||||||
`BatchesToSign` populated with a new batch _while iterating over `AcknowledgedBatches`_, and
|
let last_acknowledged = LastAcknowledgedBatch::get(&txn);
|
||||||
then have `AcknowledgedBatched` populated. In that edge case, we will see the
|
if Some(id) > last_acknowledged {
|
||||||
acknowledgement notification before we see the transaction.
|
LastAcknowledgedBatch::set(&mut txn, &id);
|
||||||
|
|
||||||
In such a case, we break (dropping the txn, re-queueing the acknowledgement notification).
|
|
||||||
On the task's next iteration, we'll process the Batch from `BatchesToSign` and be
|
|
||||||
able to make progress.
|
|
||||||
*/
|
|
||||||
if !self.active_signing_protocols.remove(&id) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
// Since it was, remove this as an active signing protocol
|
|
||||||
ActiveSigningProtocols::set(
|
|
||||||
&mut txn,
|
|
||||||
self.session,
|
|
||||||
&self.active_signing_protocols.iter().copied().collect(),
|
|
||||||
);
|
|
||||||
// Clean up the database
|
|
||||||
Batches::del(&mut txn, id);
|
|
||||||
SignedBatches::del(&mut txn, id);
|
|
||||||
|
|
||||||
// We retire with a txn so we either successfully flag this Batch as acknowledged, and
|
|
||||||
// won't re-register it (making this retire safe), or we don't flag it, meaning we will
|
|
||||||
// re-register it, yet that's safe as we have yet to retire it
|
|
||||||
self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id));
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle any messages sent to us
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
match self.attempt_manager.handle(msg) {
|
|
||||||
Response::Messages(msgs) => {
|
|
||||||
for msg in msgs {
|
|
||||||
BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Response::Signature { id, signature } => {
|
|
||||||
let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") };
|
/*
|
||||||
let batch =
|
We may have yet to register this signing protocol.
|
||||||
Batches::get(&txn, id).expect("signed a Batch we didn't save to the database");
|
|
||||||
let signed_batch = SignedBatch { batch, signature: signature.into() };
|
While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically
|
||||||
SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch);
|
have `BatchesToSign` populated with a new batch _while iterating over
|
||||||
|
`AcknowledgedBatches`_, and then have `AcknowledgedBatched` populated. In that edge case,
|
||||||
|
we will see the acknowledgement notification before we see the transaction.
|
||||||
|
|
||||||
|
In such a case, we break (dropping the txn, re-queueing the acknowledgement notification).
|
||||||
|
On the task's next iteration, we'll process the Batch from `BatchesToSign` and be
|
||||||
|
able to make progress.
|
||||||
|
*/
|
||||||
|
if !self.active_signing_protocols.remove(&id) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
// Since it was, remove this as an active signing protocol
|
||||||
|
ActiveSigningProtocols::set(
|
||||||
|
&mut txn,
|
||||||
|
self.session,
|
||||||
|
&self.active_signing_protocols.iter().copied().collect(),
|
||||||
|
);
|
||||||
|
// Clean up the database
|
||||||
|
Batches::del(&mut txn, id);
|
||||||
|
SignedBatches::del(&mut txn, id);
|
||||||
|
|
||||||
|
// We retire with a txn so we either successfully flag this Batch as acknowledged, and
|
||||||
|
// won't re-register it (making this retire safe), or we don't flag it, meaning we will
|
||||||
|
// re-register it, yet that's safe as we have yet to retire it
|
||||||
|
self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id));
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
// Handle any messages sent to us
|
||||||
}
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
Ok(iterated)
|
match self.attempt_manager.handle(msg) {
|
||||||
|
Response::Messages(msgs) => {
|
||||||
|
for msg in msgs {
|
||||||
|
BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Response::Signature { id, signature } => {
|
||||||
|
let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") };
|
||||||
|
let batch =
|
||||||
|
Batches::get(&txn, id).expect("signed a Batch we didn't save to the database");
|
||||||
|
let signed_batch = SignedBatch { batch, signature: signature.into() };
|
||||||
|
SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(iterated)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use scale::Decode;
|
use scale::Decode;
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
@ -19,149 +21,157 @@ impl<D: Db, C: Coordinator> CoordinatorTask<D, C> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
|
impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let mut iterated = false;
|
async move {
|
||||||
|
let mut iterated = false;
|
||||||
|
|
||||||
for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) {
|
for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) {
|
||||||
// Publish the messages generated by this key's signers
|
// Publish the messages generated by this key's signers
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.send(msg)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.send(msg)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.send(msg)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.send(msg)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish the cosigns from this session
|
|
||||||
{
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
while let Some(((block_number, block_id), signature)) = Cosign::try_recv(&mut txn, session)
|
|
||||||
{
|
|
||||||
iterated = true;
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.publish_cosign(block_number, block_id, <_>::decode(&mut signature.as_slice()).unwrap())
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't publish Cosign: {e:?}"))?;
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this session signed its slash report, publish its signature
|
|
||||||
{
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) {
|
|
||||||
iterated = true;
|
iterated = true;
|
||||||
|
|
||||||
self
|
self
|
||||||
.coordinator
|
.coordinator
|
||||||
.publish_slash_report_signature(
|
.send(msg)
|
||||||
session,
|
|
||||||
<_>::decode(&mut slash_report_signature.as_slice()).unwrap(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
||||||
format!("couldn't send slash report signature to the coordinator: {e:?}")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish the Batches
|
loop {
|
||||||
{
|
let mut txn = self.db.txn();
|
||||||
let mut txn = self.db.txn();
|
let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else {
|
||||||
while let Some(batch) = scanner::Batches::try_recv(&mut txn) {
|
break;
|
||||||
iterated = true;
|
};
|
||||||
self
|
iterated = true;
|
||||||
.coordinator
|
|
||||||
.publish_batch(batch)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't publish Batch: {e:?}"))?;
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish the signed Batches
|
self
|
||||||
{
|
.coordinator
|
||||||
let mut txn = self.db.txn();
|
.send(msg)
|
||||||
// The last acknowledged Batch may exceed the last Batch we published if we didn't sign for
|
.await
|
||||||
// the prior Batch(es) (and accordingly didn't publish them)
|
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
||||||
let last_batch =
|
|
||||||
crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn));
|
|
||||||
let mut next_batch = last_batch.map_or(0, |id| id + 1);
|
|
||||||
while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) {
|
|
||||||
iterated = true;
|
|
||||||
db::LastPublishedBatch::set(&mut txn, &batch.batch.id);
|
|
||||||
self
|
|
||||||
.coordinator
|
|
||||||
.publish_signed_batch(batch)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't publish Batch: {e:?}"))?;
|
|
||||||
next_batch += 1;
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(iterated)
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.send(msg)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.send(msg)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?;
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish the cosigns from this session
|
||||||
|
{
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
while let Some(((block_number, block_id), signature)) =
|
||||||
|
Cosign::try_recv(&mut txn, session)
|
||||||
|
{
|
||||||
|
iterated = true;
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.publish_cosign(
|
||||||
|
block_number,
|
||||||
|
block_id,
|
||||||
|
<_>::decode(&mut signature.as_slice()).unwrap(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't publish Cosign: {e:?}"))?;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session signed its slash report, publish its signature
|
||||||
|
{
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) {
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.publish_slash_report_signature(
|
||||||
|
session,
|
||||||
|
<_>::decode(&mut slash_report_signature.as_slice()).unwrap(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
format!("couldn't send slash report signature to the coordinator: {e:?}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish the Batches
|
||||||
|
{
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
while let Some(batch) = scanner::Batches::try_recv(&mut txn) {
|
||||||
|
iterated = true;
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.publish_batch(batch)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't publish Batch: {e:?}"))?;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish the signed Batches
|
||||||
|
{
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
// The last acknowledged Batch may exceed the last Batch we published if we didn't sign for
|
||||||
|
// the prior Batch(es) (and accordingly didn't publish them)
|
||||||
|
let last_batch =
|
||||||
|
crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn));
|
||||||
|
let mut next_batch = last_batch.map_or(0, |id| id + 1);
|
||||||
|
while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) {
|
||||||
|
iterated = true;
|
||||||
|
db::LastPublishedBatch::set(&mut txn, &batch.batch.id);
|
||||||
|
self
|
||||||
|
.coordinator
|
||||||
|
.publish_signed_batch(batch)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("couldn't publish Batch: {e:?}"))?;
|
||||||
|
next_batch += 1;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(iterated)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use ciphersuite::Ristretto;
|
use ciphersuite::Ristretto;
|
||||||
use frost::dkg::ThresholdKeys;
|
use frost::dkg::ThresholdKeys;
|
||||||
|
|
||||||
|
@ -48,75 +50,76 @@ impl<D: Db> CosignerTask<D> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db> ContinuallyRan for CosignerTask<D> {
|
impl<D: Db> ContinuallyRan for CosignerTask<D> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let mut iterated = false;
|
async move {
|
||||||
|
let mut iterated = false;
|
||||||
|
|
||||||
// Check the cosign to work on
|
// Check the cosign to work on
|
||||||
{
|
{
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
if let Some(cosign) = ToCosign::get(&txn, self.session) {
|
if let Some(cosign) = ToCosign::get(&txn, self.session) {
|
||||||
// If this wasn't already signed for...
|
// If this wasn't already signed for...
|
||||||
if LatestCosigned::get(&txn, self.session) < Some(cosign.0) {
|
if LatestCosigned::get(&txn, self.session) < Some(cosign.0) {
|
||||||
// If this isn't the cosign we're currently working on, meaning it's fresh
|
// If this isn't the cosign we're currently working on, meaning it's fresh
|
||||||
if self.current_cosign != Some(cosign) {
|
if self.current_cosign != Some(cosign) {
|
||||||
// Retire the current cosign
|
// Retire the current cosign
|
||||||
if let Some(current_cosign) = self.current_cosign {
|
if let Some(current_cosign) = self.current_cosign {
|
||||||
assert!(current_cosign.0 < cosign.0);
|
assert!(current_cosign.0 < cosign.0);
|
||||||
self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0));
|
self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0));
|
||||||
}
|
|
||||||
|
|
||||||
// Set the cosign being worked on
|
|
||||||
self.current_cosign = Some(cosign);
|
|
||||||
|
|
||||||
let mut machines = Vec::with_capacity(self.keys.len());
|
|
||||||
{
|
|
||||||
let message = cosign_block_msg(cosign.0, cosign.1);
|
|
||||||
for keys in &self.keys {
|
|
||||||
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the cosign being worked on
|
||||||
|
self.current_cosign = Some(cosign);
|
||||||
|
|
||||||
|
let mut machines = Vec::with_capacity(self.keys.len());
|
||||||
|
{
|
||||||
|
let message = cosign_block_msg(cosign.0, cosign.1);
|
||||||
|
for keys in &self.keys {
|
||||||
|
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) {
|
||||||
|
CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) {
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle any messages sent to us
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
|
match self.attempt_manager.handle(msg) {
|
||||||
|
Response::Messages(msgs) => {
|
||||||
|
for msg in msgs {
|
||||||
CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
Response::Signature { id, signature } => {
|
||||||
|
let VariantSignId::Cosign(block_number) = id else {
|
||||||
|
panic!("CosignerTask signed a non-Cosign")
|
||||||
|
};
|
||||||
|
assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0));
|
||||||
|
|
||||||
txn.commit();
|
let cosign = self.current_cosign.take().unwrap();
|
||||||
|
LatestCosigned::set(&mut txn, self.session, &cosign.0);
|
||||||
|
// Send the cosign
|
||||||
|
Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle any messages sent to us
|
txn.commit();
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
match self.attempt_manager.handle(msg) {
|
|
||||||
Response::Messages(msgs) => {
|
|
||||||
for msg in msgs {
|
|
||||||
CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Response::Signature { id, signature } => {
|
|
||||||
let VariantSignId::Cosign(block_number) = id else {
|
|
||||||
panic!("CosignerTask signed a non-Cosign")
|
|
||||||
};
|
|
||||||
assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0));
|
|
||||||
|
|
||||||
let cosign = self.current_cosign.take().unwrap();
|
|
||||||
LatestCosigned::set(&mut txn, self.session, &cosign.0);
|
|
||||||
// Send the cosign
|
|
||||||
Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
Ok(iterated)
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(iterated)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, marker::PhantomData};
|
use core::{future::Future, fmt::Debug, marker::PhantomData};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
@ -43,7 +43,6 @@ mod transaction;
|
||||||
use transaction::TransactionSignerTask;
|
use transaction::TransactionSignerTask;
|
||||||
|
|
||||||
/// A connection to the Coordinator which messages can be published with.
|
/// A connection to the Coordinator which messages can be published with.
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait Coordinator: 'static + Send + Sync {
|
pub trait Coordinator: 'static + Send + Sync {
|
||||||
/// An error encountered when interacting with a coordinator.
|
/// An error encountered when interacting with a coordinator.
|
||||||
///
|
///
|
||||||
|
@ -52,32 +51,38 @@ pub trait Coordinator: 'static + Send + Sync {
|
||||||
type EphemeralError: Debug;
|
type EphemeralError: Debug;
|
||||||
|
|
||||||
/// Send a `messages::sign::ProcessorMessage`.
|
/// Send a `messages::sign::ProcessorMessage`.
|
||||||
async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>;
|
fn send(
|
||||||
|
&mut self,
|
||||||
|
message: ProcessorMessage,
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Publish a cosign.
|
/// Publish a cosign.
|
||||||
async fn publish_cosign(
|
fn publish_cosign(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
block_id: [u8; 32],
|
block_id: [u8; 32],
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), Self::EphemeralError>;
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Publish a `Batch`.
|
/// Publish a `Batch`.
|
||||||
async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>;
|
fn publish_batch(&mut self, batch: Batch)
|
||||||
|
-> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Publish a `SignedBatch`.
|
/// Publish a `SignedBatch`.
|
||||||
async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>;
|
fn publish_signed_batch(
|
||||||
|
&mut self,
|
||||||
|
batch: SignedBatch,
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
|
|
||||||
/// Publish a slash report's signature.
|
/// Publish a slash report's signature.
|
||||||
async fn publish_slash_report_signature(
|
fn publish_slash_report_signature(
|
||||||
&mut self,
|
&mut self,
|
||||||
session: Session,
|
session: Session,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), Self::EphemeralError>;
|
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An object capable of publishing a transaction.
|
/// An object capable of publishing a transaction.
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait TransactionPublisher<T: Transaction>: 'static + Send + Sync + Clone {
|
pub trait TransactionPublisher<T: Transaction>: 'static + Send + Sync + Clone {
|
||||||
/// An error encountered when publishing a transaction.
|
/// An error encountered when publishing a transaction.
|
||||||
///
|
///
|
||||||
|
@ -92,7 +97,7 @@ pub trait TransactionPublisher<T: Transaction>: 'static + Send + Sync + Clone {
|
||||||
///
|
///
|
||||||
/// The transaction already being present in the mempool/on-chain MUST NOT be considered an
|
/// The transaction already being present in the mempool/on-chain MUST NOT be considered an
|
||||||
/// error.
|
/// error.
|
||||||
async fn publish(&self, tx: T) -> Result<(), Self::EphemeralError>;
|
fn publish(&self, tx: T) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Tasks {
|
struct Tasks {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use core::marker::PhantomData;
|
use core::{marker::PhantomData, future::Future};
|
||||||
|
|
||||||
use ciphersuite::Ristretto;
|
use ciphersuite::Ristretto;
|
||||||
use frost::dkg::ThresholdKeys;
|
use frost::dkg::ThresholdKeys;
|
||||||
|
@ -51,70 +51,72 @@ impl<D: Db, S: ScannerFeed> SlashReportSignerTask<D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
let mut iterated = false;
|
async move {
|
||||||
|
let mut iterated = false;
|
||||||
|
|
||||||
// Check for the slash report to sign
|
// Check for the slash report to sign
|
||||||
if !self.has_slash_report {
|
if !self.has_slash_report {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else {
|
let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else {
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
};
|
};
|
||||||
// We only commit this upon successfully signing this slash report
|
// We only commit this upon successfully signing this slash report
|
||||||
drop(txn);
|
drop(txn);
|
||||||
iterated = true;
|
iterated = true;
|
||||||
|
|
||||||
self.has_slash_report = true;
|
self.has_slash_report = true;
|
||||||
|
|
||||||
let mut machines = Vec::with_capacity(self.keys.len());
|
let mut machines = Vec::with_capacity(self.keys.len());
|
||||||
{
|
{
|
||||||
let message = report_slashes_message(
|
let message = report_slashes_message(
|
||||||
&ValidatorSet { network: S::NETWORK, session: self.session },
|
&ValidatorSet { network: S::NETWORK, session: self.session },
|
||||||
&SlashReportStruct(slash_report.try_into().unwrap()),
|
&SlashReportStruct(slash_report.try_into().unwrap()),
|
||||||
);
|
);
|
||||||
for keys in &self.keys {
|
for keys in &self.keys {
|
||||||
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
|
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines) {
|
|
||||||
SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle any messages sent to us
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session)
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
iterated = true;
|
|
||||||
|
|
||||||
match self.attempt_manager.handle(msg) {
|
|
||||||
Response::Messages(msgs) => {
|
|
||||||
for msg in msgs {
|
|
||||||
SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Response::Signature { id, signature } => {
|
let mut txn = self.db.txn();
|
||||||
let VariantSignId::SlashReport(session) = id else {
|
for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines)
|
||||||
panic!("SlashReportSignerTask signed a non-SlashReport")
|
{
|
||||||
};
|
SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
assert_eq!(session, self.session);
|
|
||||||
// Drain the channel
|
|
||||||
SlashReport::try_recv(&mut txn, self.session).unwrap();
|
|
||||||
// Send the signature
|
|
||||||
SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode());
|
|
||||||
}
|
}
|
||||||
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
// Handle any messages sent to us
|
||||||
}
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
iterated = true;
|
||||||
|
|
||||||
Ok(iterated)
|
match self.attempt_manager.handle(msg) {
|
||||||
|
Response::Messages(msgs) => {
|
||||||
|
for msg in msgs {
|
||||||
|
SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Response::Signature { id, signature } => {
|
||||||
|
let VariantSignId::SlashReport(session) = id else {
|
||||||
|
panic!("SlashReportSignerTask signed a non-SlashReport")
|
||||||
|
};
|
||||||
|
assert_eq!(session, self.session);
|
||||||
|
// Drain the channel
|
||||||
|
SlashReport::try_recv(&mut txn, self.session).unwrap();
|
||||||
|
// Send the signature
|
||||||
|
SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(iterated)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use core::future::Future;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
|
@ -88,11 +89,10 @@ impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>> ContinuallyRan
|
impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>> ContinuallyRan
|
||||||
for TransactionSignerTask<D, ST, P>
|
for TransactionSignerTask<D, ST, P>
|
||||||
{
|
{
|
||||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {async{
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
// Check for new transactions to sign
|
// Check for new transactions to sign
|
||||||
|
@ -233,3 +233,4 @@ impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>
|
||||||
Ok(iterated)
|
Ok(iterated)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ pub async fn test_scanner<N: Network>(
|
||||||
let block_id = block.id();
|
let block_id = block.id();
|
||||||
|
|
||||||
// Verify the Scanner picked them up
|
// Verify the Scanner picked them up
|
||||||
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async {
|
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async move {
|
||||||
let outputs =
|
let outputs =
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block { is_retirement_block, block, outputs } => {
|
ScannerEvent::Block { is_retirement_block, block, outputs } => {
|
||||||
|
|
Loading…
Reference in a new issue