#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![doc = include_str!("../README.md")] #![deny(missing_docs)] use core::marker::PhantomData; use std::collections::HashMap; use serai_primitives::Coin; use serai_db::DbTxn; use primitives::{ReceivedOutput, Payment}; use scanner::{ LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler as SchedulerTrait, }; use scheduler_primitives::*; mod db; use db::Db; /// A planned transaction. pub struct PlannedTransaction { /// The signable transaction. signable: T, /// The outputs we'll receive from this. effected_received_outputs: OutputFor, /// The Evtnuality to watch for. eventuality: EventualityFor, } /// A scheduler of transactions for networks premised on the UTXO model which support /// transaction chaining. pub struct Scheduler< S: ScannerFeed, T, P: TransactionPlanner>, >(PhantomData, PhantomData, PhantomData

); impl>> Scheduler { fn accumulate_outputs(txn: &mut impl DbTxn, key: KeyFor, outputs: &[OutputFor]) { // Accumulate them in memory let mut outputs_by_coin = HashMap::with_capacity(1); for output in outputs.iter().filter(|output| output.key() == key) { let coin = output.balance().coin; if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { e.insert(Db::::outputs(txn, key, coin).unwrap()); } outputs_by_coin.get_mut(&coin).unwrap().push(output.clone()); } // Flush them to the database for (coin, outputs) in outputs_by_coin { Db::::set_outputs(txn, key, coin, &outputs); } } } impl< S: ScannerFeed, T: 'static + Send + Sync, P: TransactionPlanner>, > SchedulerTrait for Scheduler { fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { Db::::set_outputs(txn, key, *coin, &vec![]); } } fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor) { todo!("TODO") } fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { assert!(Db::::outputs(txn, key, *coin).is_none()); Db::::del_outputs(txn, key, *coin); } } fn update( &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, ) -> HashMap, Vec>> { // Accumulate all the outputs for key in active_keys { Self::accumulate_outputs(txn, key.0, update.outputs()); } let mut fee_rates: HashMap = todo!("TODO"); // Create the transactions for the forwards/burns { let mut planned_txs = vec![]; for forward in update.forwards() { let forward_to_key = active_keys.last().unwrap(); assert_eq!(forward_to_key.1, LifetimeStage::Active); let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here &mut 0, fee_rates[&forward.balance().coin], vec![forward.clone()], vec![Payment::new(P::forwarding_address(forward_to_key.0), forward.balance(), None)], None, ) else { continue; }; planned_txs.push(plan); } for to_return in update.returns() { let out_instruction = Payment::new(to_return.address().clone(), to_return.output().balance(), None); let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here &mut 0, fee_rates[&out_instruction.balance().coin], vec![to_return.output().clone()], vec![out_instruction], None, ) else { continue; }; planned_txs.push(plan); } // TODO: Send the transactions off for signing // TODO: Return the eventualities todo!("TODO") } } fn fulfill( &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], payments: Vec>>, ) -> HashMap, Vec>> { // TODO: Find the key to use for fulfillment // TODO: Sort outputs and payments by amount // TODO: For as long as we don't have sufficiently aggregated inputs to handle all payments, // aggregate // TODO: Create the tree for the payments todo!("TODO") } }