mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-03 17:40:34 +00:00
Have the Scanner assign batch IDs
This commit is contained in:
parent
caa695511b
commit
d74cbe2cce
5 changed files with 55 additions and 24 deletions
|
@ -408,6 +408,7 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
||||||
.get_mut(&key_vec)
|
.get_mut(&key_vec)
|
||||||
.expect("key we don't have a scheduler for acknowledged a block")
|
.expect("key we don't have a scheduler for acknowledged a block")
|
||||||
.add_outputs(scanner.ack_block(key, block_id).await);
|
.add_outputs(scanner.ack_block(key, block_id).await);
|
||||||
|
|
||||||
sign_plans(
|
sign_plans(
|
||||||
&mut main_db,
|
&mut main_db,
|
||||||
&coin,
|
&coin,
|
||||||
|
@ -459,18 +460,15 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
||||||
|
|
||||||
msg = scanner.events.recv() => {
|
msg = scanner.events.recv() => {
|
||||||
match msg.unwrap() {
|
match msg.unwrap() {
|
||||||
ScannerEvent::Block(key, block, time, outputs) => {
|
ScannerEvent::Block { key, block, time, batch, outputs } => {
|
||||||
let key = key.to_bytes().as_ref().to_vec();
|
let key = key.to_bytes().as_ref().to_vec();
|
||||||
|
|
||||||
let mut block_hash = [0; 32];
|
let mut block_hash = [0; 32];
|
||||||
block_hash.copy_from_slice(block.as_ref());
|
block_hash.copy_from_slice(block.as_ref());
|
||||||
|
|
||||||
// TODO
|
|
||||||
let id = 0;
|
|
||||||
|
|
||||||
let batch = Batch {
|
let batch = Batch {
|
||||||
network: C::NETWORK,
|
network: C::NETWORK,
|
||||||
id,
|
id: batch,
|
||||||
block: BlockHash(block_hash),
|
block: BlockHash(block_hash),
|
||||||
instructions: outputs.iter().filter_map(|output| {
|
instructions: outputs.iter().filter_map(|output| {
|
||||||
// If these aren't externally received funds, don't handle it as an instruction
|
// If these aren't externally received funds, don't handle it as an instruction
|
||||||
|
|
|
@ -22,7 +22,13 @@ use crate::{
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ScannerEvent<C: Coin> {
|
pub enum ScannerEvent<C: Coin> {
|
||||||
// Block scanned
|
// Block scanned
|
||||||
Block(<C::Curve as Ciphersuite>::G, <C::Block as Block<C>>::Id, SystemTime, Vec<C::Output>),
|
Block {
|
||||||
|
key: <C::Curve as Ciphersuite>::G,
|
||||||
|
block: <C::Block as Block<C>>::Id,
|
||||||
|
time: SystemTime,
|
||||||
|
batch: u32,
|
||||||
|
outputs: Vec<C::Output>,
|
||||||
|
},
|
||||||
// Eventuality completion found on-chain
|
// Eventuality completion found on-chain
|
||||||
Completed([u8; 32], <C::Transaction as Transaction<C>>::Id),
|
Completed([u8; 32], <C::Transaction as Transaction<C>>::Id),
|
||||||
}
|
}
|
||||||
|
@ -111,18 +117,17 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
||||||
self.0.get(Self::seen_key(id)).is_some()
|
self.0.get(Self::seen_key(id)).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn next_batch_key() -> Vec<u8> {
|
||||||
|
Self::scanner_key(b"next_batch", [])
|
||||||
|
}
|
||||||
|
fn batch_key(key: &<C::Curve as Ciphersuite>::G, block: &<C::Block as Block<C>>::Id) -> Vec<u8> {
|
||||||
|
Self::scanner_key(b"batch", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
||||||
|
}
|
||||||
fn outputs_key(
|
fn outputs_key(
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<C::Curve as Ciphersuite>::G,
|
||||||
block: &<C::Block as Block<C>>::Id,
|
block: &<C::Block as Block<C>>::Id,
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
let key_bytes = key.to_bytes();
|
Self::scanner_key(b"outputs", [key.to_bytes().as_ref(), block.as_ref()].concat())
|
||||||
let key = key_bytes.as_ref();
|
|
||||||
// This should be safe without the bincode serialize. Using bincode lets us not worry/have to
|
|
||||||
// think about this
|
|
||||||
let db_key = bincode::serialize(&(key, block.as_ref())).unwrap();
|
|
||||||
// Assert this is actually length prefixing
|
|
||||||
debug_assert!(db_key.len() >= (1 + key.len() + 1 + block.as_ref().len()));
|
|
||||||
Self::scanner_key(b"outputs", db_key)
|
|
||||||
}
|
}
|
||||||
fn save_outputs(
|
fn save_outputs(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -130,12 +135,34 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
||||||
key: &<C::Curve as Ciphersuite>::G,
|
key: &<C::Curve as Ciphersuite>::G,
|
||||||
block: &<C::Block as Block<C>>::Id,
|
block: &<C::Block as Block<C>>::Id,
|
||||||
outputs: &[C::Output],
|
outputs: &[C::Output],
|
||||||
) {
|
) -> u32 {
|
||||||
|
let batch_key = Self::batch_key(key, block);
|
||||||
|
if let Some(batch) = txn.get(batch_key) {
|
||||||
|
return u32::from_le_bytes(batch.try_into().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
let mut bytes = Vec::with_capacity(outputs.len() * 64);
|
let mut bytes = Vec::with_capacity(outputs.len() * 64);
|
||||||
for output in outputs {
|
for output in outputs {
|
||||||
output.write(&mut bytes).unwrap();
|
output.write(&mut bytes).unwrap();
|
||||||
}
|
}
|
||||||
txn.put(Self::outputs_key(key, block), bytes);
|
txn.put(Self::outputs_key(key, block), bytes);
|
||||||
|
|
||||||
|
// This is a new set of outputs, which are expected to be handled in a perfectly ordered
|
||||||
|
// fashion
|
||||||
|
|
||||||
|
// TODO2: This is not currently how this works
|
||||||
|
// There may be new blocks 0 .. 5, which A will scan, yet then B may be activated at block 4
|
||||||
|
// This would cause
|
||||||
|
// 0a, 1a, 2a, 3a, 4a, 5a, 4b, 5b
|
||||||
|
// when it should be
|
||||||
|
// 0a, 1a, 2a, 3a, 4a, 4b, 5a, 5b
|
||||||
|
|
||||||
|
// Because it's a new set of outputs, allocate a batch ID for it
|
||||||
|
let next_bytes = txn.get(Self::next_batch_key()).unwrap_or(vec![0; 4]).try_into().unwrap();
|
||||||
|
let next = u32::from_le_bytes(next_bytes);
|
||||||
|
txn.put(Self::next_batch_key(), (next + 1).to_le_bytes());
|
||||||
|
txn.put(Self::batch_key(key, block), next_bytes);
|
||||||
|
next
|
||||||
}
|
}
|
||||||
fn outputs(
|
fn outputs(
|
||||||
&self,
|
&self,
|
||||||
|
@ -434,7 +461,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
||||||
|
|
||||||
// Save the outputs to disk
|
// Save the outputs to disk
|
||||||
let mut txn = scanner.db.0.txn();
|
let mut txn = scanner.db.0.txn();
|
||||||
scanner.db.save_outputs(&mut txn, &key, &block_id, &outputs);
|
let batch = scanner.db.save_outputs(&mut txn, &key, &block_id, &outputs);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
const TIME_TOLERANCE: u64 = 15;
|
const TIME_TOLERANCE: u64 = 15;
|
||||||
|
@ -477,7 +504,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send all outputs
|
// Send all outputs
|
||||||
if !scanner.emit(ScannerEvent::Block(key, block_id, time, outputs)) {
|
if !scanner.emit(ScannerEvent::Block { key, block: block_id, time, batch, outputs }) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Write this number as scanned so we won't re-fire these outputs
|
// Write this number as scanned so we won't re-fire these outputs
|
||||||
|
|
|
@ -18,6 +18,7 @@ async fn spend<C: Coin, D: Db>(
|
||||||
coin: &C,
|
coin: &C,
|
||||||
keys: &HashMap<Participant, ThresholdKeys<C::Curve>>,
|
keys: &HashMap<Participant, ThresholdKeys<C::Curve>>,
|
||||||
scanner: &mut ScannerHandle<C, D>,
|
scanner: &mut ScannerHandle<C, D>,
|
||||||
|
batch: u32,
|
||||||
outputs: Vec<C::Output>,
|
outputs: Vec<C::Output>,
|
||||||
) -> Vec<C::Output> {
|
) -> Vec<C::Output> {
|
||||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
@ -49,8 +50,9 @@ async fn spend<C: Coin, D: Db>(
|
||||||
coin.mine_block().await;
|
coin.mine_block().await;
|
||||||
}
|
}
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block(this_key, _, _, outputs) => {
|
ScannerEvent::Block { key: this_key, block: _, time: _, batch: this_batch, outputs } => {
|
||||||
assert_eq!(this_key, key);
|
assert_eq!(this_key, key);
|
||||||
|
assert_eq!(this_batch, batch);
|
||||||
assert_eq!(outputs.len(), 1);
|
assert_eq!(outputs.len(), 1);
|
||||||
// Make sure this is actually a change output
|
// Make sure this is actually a change output
|
||||||
assert_eq!(outputs[0].kind(), OutputType::Change);
|
assert_eq!(outputs[0].kind(), OutputType::Change);
|
||||||
|
@ -85,9 +87,10 @@ pub async fn test_addresses<C: Coin>(coin: C) {
|
||||||
// Verify the Scanner picked them up
|
// Verify the Scanner picked them up
|
||||||
let outputs =
|
let outputs =
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block(this_key, block, _, outputs) => {
|
ScannerEvent::Block { key: this_key, block, time: _, batch, outputs } => {
|
||||||
assert_eq!(this_key, key);
|
assert_eq!(this_key, key);
|
||||||
assert_eq!(block, block_id);
|
assert_eq!(block, block_id);
|
||||||
|
assert_eq!(batch, 0);
|
||||||
assert_eq!(outputs.len(), 1);
|
assert_eq!(outputs.len(), 1);
|
||||||
assert_eq!(outputs[0].kind(), OutputType::Branch);
|
assert_eq!(outputs[0].kind(), OutputType::Branch);
|
||||||
outputs
|
outputs
|
||||||
|
@ -98,7 +101,7 @@ pub async fn test_addresses<C: Coin>(coin: C) {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Spend the branch output, creating a change output and ensuring we actually get change
|
// Spend the branch output, creating a change output and ensuring we actually get change
|
||||||
let outputs = spend(&coin, &keys, &mut scanner, outputs).await;
|
let outputs = spend(&coin, &keys, &mut scanner, 1, outputs).await;
|
||||||
// Also test spending the change output
|
// Also test spending the change output
|
||||||
spend(&coin, &keys, &mut scanner, outputs).await;
|
spend(&coin, &keys, &mut scanner, 2, outputs).await;
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,10 +48,11 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
||||||
let verify_event = |mut scanner: ScannerHandle<C, MemDb>| async {
|
let verify_event = |mut scanner: ScannerHandle<C, MemDb>| async {
|
||||||
let outputs =
|
let outputs =
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block(key, block, time, outputs) => {
|
ScannerEvent::Block { key, block, time, batch, outputs } => {
|
||||||
assert_eq!(key, keys.group_key());
|
assert_eq!(key, keys.group_key());
|
||||||
assert_eq!(block, block_id);
|
assert_eq!(block, block_id);
|
||||||
assert_eq!(time, block_time);
|
assert_eq!(time, block_time);
|
||||||
|
assert_eq!(batch, 0);
|
||||||
assert_eq!(outputs.len(), 1);
|
assert_eq!(outputs.len(), 1);
|
||||||
assert_eq!(outputs[0].kind(), OutputType::External);
|
assert_eq!(outputs[0].kind(), OutputType::External);
|
||||||
outputs
|
outputs
|
||||||
|
|
|
@ -32,10 +32,11 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
||||||
let block_time = block.time();
|
let block_time = block.time();
|
||||||
|
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block(this_key, block, time, outputs) => {
|
ScannerEvent::Block { key: this_key, block, time, batch, outputs } => {
|
||||||
assert_eq!(this_key, key);
|
assert_eq!(this_key, key);
|
||||||
assert_eq!(block, block_id);
|
assert_eq!(block, block_id);
|
||||||
assert_eq!(time, block_time);
|
assert_eq!(time, block_time);
|
||||||
|
assert_eq!(batch, 0);
|
||||||
assert_eq!(outputs.len(), 1);
|
assert_eq!(outputs.len(), 1);
|
||||||
(block_id, outputs)
|
(block_id, outputs)
|
||||||
}
|
}
|
||||||
|
@ -102,10 +103,11 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
||||||
}
|
}
|
||||||
|
|
||||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||||
ScannerEvent::Block(this_key, block_id, time, these_outputs) => {
|
ScannerEvent::Block { key: this_key, block: block_id, time, batch, outputs: these_outputs } => {
|
||||||
assert_eq!(this_key, key);
|
assert_eq!(this_key, key);
|
||||||
assert_eq!(block_id, block.id());
|
assert_eq!(block_id, block.id());
|
||||||
assert_eq!(time, block.time());
|
assert_eq!(time, block.time());
|
||||||
|
assert_eq!(batch, 1);
|
||||||
assert_eq!(these_outputs, outputs);
|
assert_eq!(these_outputs, outputs);
|
||||||
}
|
}
|
||||||
ScannerEvent::Completed(_, _) => {
|
ScannerEvent::Completed(_, _) => {
|
||||||
|
|
Loading…
Reference in a new issue