mirror of
https://github.com/serai-dex/serai.git
synced 2024-12-23 03:59:22 +00:00
Add scheduler-primitives
The main benefit is whatever scheduler is in use, we now have a single API to receive TXs to sign (which is of value to the TX signer crate we'll inevitably build).
This commit is contained in:
parent
c88ebe985e
commit
fadc88d2ad
12 changed files with 173 additions and 21 deletions
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
|
@ -42,9 +42,10 @@ jobs:
|
|||
-p serai-processor-key-gen \
|
||||
-p serai-processor-frost-attempt-manager \
|
||||
-p serai-processor-primitives \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler-primitives \
|
||||
-p serai-processor-transaction-chaining-scheduler \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
|
|
12
Cargo.lock
generated
12
Cargo.lock
generated
|
@ -8679,6 +8679,16 @@ dependencies = [
|
|||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-scheduler-primitives"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"parity-scale-codec",
|
||||
"serai-db",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-tests"
|
||||
version = "0.1.0"
|
||||
|
@ -8715,11 +8725,11 @@ dependencies = [
|
|||
"borsh",
|
||||
"group",
|
||||
"parity-scale-codec",
|
||||
"serai-coins-primitives",
|
||||
"serai-db",
|
||||
"serai-primitives",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-utxo-scheduler-primitives",
|
||||
]
|
||||
|
||||
|
|
|
@ -74,9 +74,10 @@ members = [
|
|||
"processor/frost-attempt-manager",
|
||||
|
||||
"processor/primitives",
|
||||
"processor/scanner",
|
||||
"processor/scheduler/primitives",
|
||||
"processor/scheduler/utxo/primitives",
|
||||
"processor/scheduler/utxo/transaction-chaining",
|
||||
"processor/scanner",
|
||||
"processor",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
|
|
|
@ -49,9 +49,10 @@ exceptions = [
|
|||
{ allow = ["AGPL-3.0"], name = "serai-processor-key-gen" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-utxo-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-scanner" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "tributary-chain" },
|
||||
|
|
|
@ -241,8 +241,12 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
|
|||
///
|
||||
/// When a key is activated, the existing multisig should retain its outputs and utility for a
|
||||
/// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some
|
||||
/// obligation or the `new_key`. Every output MUST be connected to an Eventuality. If a key no
|
||||
/// longer has active Eventualities, it MUST be able to be retired.
|
||||
/// obligation or the `new_key`. Every output held by the retiring key MUST be connected to an
|
||||
/// Eventuality. If a key no longer has active Eventualities, it MUST be able to be retired
|
||||
/// without losing any coins.
|
||||
///
|
||||
/// If the retiring key has any unfulfilled payments associated with it, those MUST be made
|
||||
/// the responsibility of the new key.
|
||||
fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor<S>, new_key: KeyFor<S>);
|
||||
|
||||
/// Retire a key as it'll no longer be used.
|
||||
|
|
25
processor/scheduler/primitives/Cargo.toml
Normal file
25
processor/scheduler/primitives/Cargo.toml
Normal file
|
@ -0,0 +1,25 @@
|
|||
[package]
|
||||
name = "serai-processor-scheduler-primitives"
|
||||
version = "0.1.0"
|
||||
description = "Primitives for schedulers for the Serai processor"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/primitives"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
group = { version = "0.13", default-features = false }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-db = { path = "../../../common/db" }
|
15
processor/scheduler/primitives/LICENSE
Normal file
15
processor/scheduler/primitives/LICENSE
Normal file
|
@ -0,0 +1,15 @@
|
|||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
3
processor/scheduler/primitives/README.md
Normal file
3
processor/scheduler/primitives/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Scheduler Primitives
|
||||
|
||||
Primitives for schedulers.
|
48
processor/scheduler/primitives/src/lib.rs
Normal file
48
processor/scheduler/primitives/src/lib.rs
Normal file
|
@ -0,0 +1,48 @@
|
|||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use std::io;
|
||||
|
||||
use group::GroupEncoding;
|
||||
|
||||
use serai_db::DbTxn;
|
||||
|
||||
/// A signable transaction.
|
||||
pub trait SignableTransaction: 'static + Sized + Send + Sync {
|
||||
/// Read a `SignableTransaction`.
|
||||
fn read(reader: &mut impl io::Read) -> io::Result<Self>;
|
||||
/// Write a `SignableTransaction`.
|
||||
fn write(&self, writer: &mut impl io::Write) -> io::Result<()>;
|
||||
}
|
||||
|
||||
mod db {
|
||||
use serai_db::{Get, DbTxn, create_db, db_channel};
|
||||
|
||||
db_channel! {
|
||||
SchedulerPrimitives {
|
||||
TransactionsToSign: (key: &[u8]) -> Vec<u8>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The transactions to sign, as scheduled by a Scheduler.
|
||||
pub struct TransactionsToSign<T>(PhantomData<T>);
|
||||
impl<T: SignableTransaction> TransactionsToSign<T> {
|
||||
/// Send a transaction to sign.
|
||||
pub fn send(txn: &mut impl DbTxn, key: &impl GroupEncoding, tx: &T) {
|
||||
let mut buf = Vec::with_capacity(128);
|
||||
tx.write(&mut buf).unwrap();
|
||||
db::TransactionsToSign::send(txn, key.to_bytes().as_ref(), &buf);
|
||||
}
|
||||
|
||||
/// Try to receive a transaction to sign.
|
||||
pub fn try_recv(txn: &mut impl DbTxn, key: &impl GroupEncoding) -> Option<T> {
|
||||
let tx = db::TransactionsToSign::try_recv(txn, key.to_bytes().as_ref())?;
|
||||
let mut tx = tx.as_slice();
|
||||
let res = T::read(&mut tx).unwrap();
|
||||
assert!(tx.is_empty());
|
||||
Some(res)
|
||||
}
|
||||
}
|
|
@ -26,10 +26,10 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals
|
|||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] }
|
||||
serai-coins-primitives = { path = "../../../../substrate/coins/primitives", default-features = false, features = ["std"] }
|
||||
|
||||
serai-db = { path = "../../../../common/db" }
|
||||
|
||||
primitives = { package = "serai-processor-primitives", path = "../../../primitives" }
|
||||
scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" }
|
||||
scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" }
|
||||
utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" }
|
||||
scanner = { package = "serai-processor-scanner", path = "../../../scanner" }
|
||||
|
|
|
@ -2,7 +2,7 @@ use core::marker::PhantomData;
|
|||
|
||||
use group::GroupEncoding;
|
||||
|
||||
use serai_primitives::Coin;
|
||||
use serai_primitives::{Coin, Amount};
|
||||
|
||||
use serai_db::{Get, DbTxn, create_db};
|
||||
|
||||
|
@ -11,12 +11,23 @@ use scanner::{ScannerFeed, KeyFor, OutputFor};
|
|||
|
||||
create_db! {
|
||||
TransactionChainingScheduler {
|
||||
OperatingCosts: (coin: Coin) -> Amount,
|
||||
SerializedOutputs: (key: &[u8], coin: Coin) -> Vec<u8>,
|
||||
// We should be immediately able to schedule the fulfillment of payments, yet this may not be
|
||||
// possible if we're in the middle of a multisig rotation (as our output set will be split)
|
||||
SerializedQueuedPayments: (key: &[u8]) > Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Db<S: ScannerFeed>(PhantomData<S>);
|
||||
impl<S: ScannerFeed> Db<S> {
|
||||
pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount {
|
||||
OperatingCosts::get(getter, coin).unwrap_or(Amount(0))
|
||||
}
|
||||
pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) {
|
||||
OperatingCosts::set(txn, coin, &amount)
|
||||
}
|
||||
|
||||
pub(crate) fn outputs(
|
||||
getter: &impl Get,
|
||||
key: KeyFor<S>,
|
||||
|
@ -46,4 +57,17 @@ impl<S: ScannerFeed> Db<S> {
|
|||
pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor<S>, coin: Coin) {
|
||||
SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin);
|
||||
}
|
||||
|
||||
pub(crate) fn queued_payments(
|
||||
getter: &impl Get,
|
||||
key: KeyFor<S>,
|
||||
) -> Option<Vec<Payment<S>>> {
|
||||
todo!("TODO")
|
||||
}
|
||||
pub(crate) fn set_queued_payments(txn: &mut impl DbTxn, key: KeyFor<S>, queued: Vec<Payment<S>>) {
|
||||
todo!("TODO")
|
||||
}
|
||||
pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
SerializedQueuedPayments::del(txn, key.to_bytes().as_ref());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
use core::marker::PhantomData;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use group::GroupEncoding;
|
||||
|
||||
use serai_primitives::Coin;
|
||||
|
||||
use serai_db::DbTxn;
|
||||
|
@ -15,6 +17,7 @@ use scanner::{
|
|||
Scheduler as SchedulerTrait,
|
||||
};
|
||||
use scheduler_primitives::*;
|
||||
use utxo_scheduler_primitives::*;
|
||||
|
||||
mod db;
|
||||
use db::Db;
|
||||
|
@ -25,7 +28,7 @@ pub struct PlannedTransaction<S: ScannerFeed, T> {
|
|||
signable: T,
|
||||
/// The outputs we'll receive from this.
|
||||
effected_received_outputs: OutputFor<S>,
|
||||
/// The Evtnuality to watch for.
|
||||
/// The Eventuality to watch for.
|
||||
eventuality: EventualityFor<S>,
|
||||
}
|
||||
|
||||
|
@ -60,13 +63,13 @@ impl<S: ScannerFeed, T, P: TransactionPlanner<S, PlannedTransaction = PlannedTra
|
|||
|
||||
impl<
|
||||
S: ScannerFeed,
|
||||
T: 'static + Send + Sync,
|
||||
T: 'static + Send + Sync + SignableTransaction,
|
||||
P: TransactionPlanner<S, PlannedTransaction = PlannedTransaction<S, T>>,
|
||||
> SchedulerTrait<S> for Scheduler<S, T, P>
|
||||
{
|
||||
fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
for coin in S::NETWORK.coins() {
|
||||
Db::<S>::set_outputs(txn, key, *coin, &vec![]);
|
||||
Db::<S>::set_outputs(txn, key, *coin, &[]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,22 +101,27 @@ impl<
|
|||
{
|
||||
let mut planned_txs = vec![];
|
||||
for forward in update.forwards() {
|
||||
let forward_to_key = active_keys.last().unwrap();
|
||||
assert_eq!(forward_to_key.1, LifetimeStage::Active);
|
||||
let key = forward.key();
|
||||
|
||||
assert_eq!(active_keys.len(), 2);
|
||||
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
|
||||
assert_eq!(active_keys[1].1, LifetimeStage::Active);
|
||||
let forward_to_key = active_keys[1].0;
|
||||
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
&mut 0,
|
||||
fee_rates[&forward.balance().coin],
|
||||
vec![forward.clone()],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key.0), forward.balance(), None)],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
|
||||
None,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push(plan);
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
for to_return in update.returns() {
|
||||
let key = to_return.output().key();
|
||||
let out_instruction =
|
||||
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
|
@ -126,12 +134,24 @@ impl<
|
|||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push(plan);
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
|
||||
// TODO: Send the transactions off for signing
|
||||
// TODO: Return the eventualities
|
||||
todo!("TODO")
|
||||
let mut eventualities = HashMap::new();
|
||||
for (key, planned_tx) in planned_txs {
|
||||
// Send the transactions off for signing
|
||||
TransactionsToSign::<T>::send(txn, &key, &planned_tx.signable);
|
||||
|
||||
// Insert the eventualities into the result
|
||||
eventualities
|
||||
.entry(key.to_bytes().as_ref().to_vec())
|
||||
.or_insert(Vec::with_capacity(1))
|
||||
.push(planned_tx.eventuality);
|
||||
}
|
||||
|
||||
// TODO: Fulfill any payments we prior couldn't
|
||||
|
||||
eventualities
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue