Replace acknowledge_block with acknowledge_batch

This commit is contained in:
Luke Parker 2024-08-30 01:33:40 -04:00
parent 76cbe6cf1e
commit f21838e0d5
3 changed files with 53 additions and 15 deletions

View file

@ -317,21 +317,33 @@ impl<S: ScannerFeed> Scanner<S> {
Self { eventuality_handle, _S: PhantomData }
}
/// Acknowledge a block.
/// Acknowledge a Batch having been published on Serai.
///
/// This means this block was ordered on Serai in relation to `Burn` events, and all validators
/// have achieved synchrony on it.
/// This means the specified Batch was ordered on Serai in relation to Burn events, and all
/// validators have achieved synchrony on it.
///
/// `in_instruction_succeededs` is the result of executing each InInstruction within this batch,
/// true if it succeeded and false if it did not (and did not cause any state changes on Serai).
///
/// `burns` is a list of Burns to queue with the acknowledgement of this Batch for efficiency's
/// sake. Any Burns passed here MUST NOT be passed into any other call of `acknowledge_batch` nor
/// `queue_burns`. Doing so will cause them to be executed multiple times.
///
/// The calls to this function must be ordered with regards to `queue_burns`.
pub fn acknowledge_block(
pub fn acknowledge_batch(
&mut self,
mut txn: impl DbTxn,
block_number: u64,
batch_id: u32,
in_instruction_succeededs: Vec<bool>,
mut burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>,
) {
log::info!("acknowledging block {block_number}");
log::info!("acknowledging batch {batch_id}");
// TODO: We need to take all of these arguments and send them to a task
// Then, when we do have this block number, we need to execute this function
let block_number = report::take_block_number_for_batch::<S>(&mut txn, batch_id)
.expect("didn't have the block number for a Batch");
assert!(
ScannerGlobalDb::<S>::is_block_notable(&txn, block_number),
@ -369,7 +381,6 @@ impl<S: ScannerFeed> Scanner<S> {
);
// We map these into standard Burns
let mut returns = vec![];
for (succeeded, return_information) in
in_instruction_succeededs.into_iter().zip(return_information)
{
@ -378,15 +389,18 @@ impl<S: ScannerFeed> Scanner<S> {
}
if let Some(report::ReturnInformation { address, balance }) = return_information {
returns.push(OutInstructionWithBalance {
burns.push(OutInstructionWithBalance {
instruction: OutInstruction { address: address.into(), data: None },
balance,
});
}
}
// We send them as stemming from this block
// TODO: These should be handled with any Burns from this block
SubstrateToEventualityDb::send_burns(&mut txn, block_number, &returns);
}
if !burns.is_empty() {
// We send these Burns as stemming from this block we just acknowledged
// This causes them to be acted on after we accumulate the outputs from this block
SubstrateToEventualityDb::send_burns(&mut txn, block_number, &burns);
}
// Commit the txn
@ -402,7 +416,9 @@ impl<S: ScannerFeed> Scanner<S> {
/// The scanner only updates the scheduler with new outputs upon acknowledging a block. The
/// ability to fulfill Burns, and therefore their order, is dependent on the current output
/// state. This immediately sets a bound that this function is ordered with regards to
/// `acknowledge_block`.
/// `acknowledge_batch`.
///
/// The Burns specified here MUST NOT also be passed to `acknowledge_batch`.
/*
The fact Burns can be queued during any Substrate block is problematic. The scanner is allowed
to scan anything within the window set by the Eventuality task. The Eventuality task is allowed
@ -427,6 +443,10 @@ impl<S: ScannerFeed> Scanner<S> {
unnecessary).
*/
pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: &Vec<OutInstructionWithBalance>) {
if burns.is_empty() {
return;
}
let queue_as_of = ScannerGlobalDb::<S>::highest_acknowledged_block(txn)
.expect("queueing Burns yet never acknowledged a block");

View file

@ -17,6 +17,9 @@ create_db!(
// The next Batch ID to use
NextBatchId: () -> u32,
// The block number which caused a batch
BlockNumberForBatch: (batch: u32) -> u64,
// The return addresses for the InInstructions within a Batch
SerializedReturnAddresses: (batch: u32) -> Vec<u8>,
}
@ -39,12 +42,19 @@ impl<S: ScannerFeed> ReportDb<S> {
NextToPotentiallyReportBlock::get(getter)
}
pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 {
pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn, block_number: u64) -> u32 {
let id = NextBatchId::get(txn).unwrap_or(0);
NextBatchId::set(txn, &(id + 1));
BlockNumberForBatch::set(txn, id, &block_number);
id
}
pub(crate) fn take_block_number_for_batch(txn: &mut impl DbTxn, id: u32) -> Option<u64> {
let block_number = BlockNumberForBatch::get(txn, id)?;
BlockNumberForBatch::del(txn, id);
Some(block_number)
}
pub(crate) fn save_return_information(
txn: &mut impl DbTxn,
id: u32,
@ -67,6 +77,7 @@ impl<S: ScannerFeed> ReportDb<S> {
id: u32,
) -> Option<Vec<Option<ReturnInformation<S>>>> {
let buf = SerializedReturnAddresses::get(txn, id)?;
SerializedReturnAddresses::del(txn, id);
let mut buf = buf.as_slice();
let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8));

View file

@ -25,6 +25,13 @@ pub(crate) fn take_return_information<S: ScannerFeed>(
ReportDb::<S>::take_return_information(txn, id)
}
pub(crate) fn take_block_number_for_batch<S: ScannerFeed>(
txn: &mut impl DbTxn,
id: u32,
) -> Option<u64> {
ReportDb::<S>::take_block_number_for_batch(txn, id)
}
/*
This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion.
@ -89,7 +96,7 @@ impl<D: Db, S: ScannerFeed, B: BatchPublisher> ContinuallyRan for ReportTask<D,
if notable {
let network = S::NETWORK;
let block_hash = index::block_id(&txn, b);
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
// start with empty batch
let mut batches =
@ -110,7 +117,7 @@ impl<D: Db, S: ScannerFeed, B: BatchPublisher> ContinuallyRan for ReportTask<D,
let in_instruction = batch.instructions.pop().unwrap();
// bump the id for the new batch
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
// make a new batch with this instruction included
batches.push(Batch {