mirror of
https://github.com/serai-dex/serai.git
synced 2024-12-22 19:49:22 +00:00
Use a single txn for an entire coordinator message
Removes direct DB accesses whre possible. Documents the safety of the rest. Does uncover one case of unsafety not previously noted.
This commit is contained in:
parent
7579c71765
commit
fd1bbec134
12 changed files with 370 additions and 284 deletions
|
@ -15,16 +15,24 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||
D::key(b"MAIN", dst, key)
|
||||
}
|
||||
|
||||
fn handled_key(id: u64) -> Vec<u8> {
|
||||
Self::main_key(b"handled", id.to_le_bytes())
|
||||
}
|
||||
pub fn handled_message(&self, id: u64) -> bool {
|
||||
self.0.get(Self::handled_key(id)).is_some()
|
||||
}
|
||||
pub fn handle_message(txn: &mut D::Transaction<'_>, id: u64) {
|
||||
txn.put(Self::handled_key(id), [])
|
||||
}
|
||||
|
||||
fn plan_key(id: &[u8]) -> Vec<u8> {
|
||||
Self::main_key(b"plan", id)
|
||||
}
|
||||
fn signing_key(key: &[u8]) -> Vec<u8> {
|
||||
Self::main_key(b"signing", key)
|
||||
}
|
||||
pub fn save_signing(&mut self, key: &[u8], block_number: u64, plan: &Plan<C>) {
|
||||
pub fn save_signing(txn: &mut D::Transaction<'_>, key: &[u8], block_number: u64, plan: &Plan<C>) {
|
||||
let id = plan.id();
|
||||
// Creating a TXN here is arguably an anti-pattern, yet nothing here expects atomicity
|
||||
let mut txn = self.0.txn();
|
||||
|
||||
{
|
||||
let mut signing = txn.get(Self::signing_key(key)).unwrap_or(vec![]);
|
||||
|
@ -46,8 +54,6 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||
plan.write(&mut buf).unwrap();
|
||||
txn.put(Self::plan_key(&id), &buf);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
pub fn signing(&self, key: &[u8]) -> Vec<(u64, Plan<C>)> {
|
||||
|
@ -68,7 +74,7 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||
res
|
||||
}
|
||||
|
||||
pub fn finish_signing(&mut self, key: &[u8], id: [u8; 32]) {
|
||||
pub fn finish_signing(&mut self, txn: &mut D::Transaction<'_>, key: &[u8], id: [u8; 32]) {
|
||||
let mut signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]);
|
||||
assert_eq!(signing.len() % 32, 0);
|
||||
|
||||
|
@ -87,8 +93,6 @@ impl<C: Coin, D: Db> MainDb<C, D> {
|
|||
log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(id));
|
||||
}
|
||||
|
||||
let mut txn = self.0.txn();
|
||||
txn.put(Self::signing_key(key), signing);
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ pub struct KeyConfirmed<C: Ciphersuite> {
|
|||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct KeyGenDb<C: Coin, D: Db>(D, PhantomData<C>);
|
||||
struct KeyGenDb<C: Coin, D: Db>(PhantomData<D>, PhantomData<C>);
|
||||
impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
||||
fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
D::key(b"KEY_GEN", dst, key)
|
||||
|
@ -40,9 +40,9 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||
fn save_params(txn: &mut D::Transaction<'_>, set: &ValidatorSet, params: &ThresholdParams) {
|
||||
txn.put(Self::params_key(set), bincode::serialize(params).unwrap());
|
||||
}
|
||||
fn params(&self, set: &ValidatorSet) -> ThresholdParams {
|
||||
fn params<G: Get>(getter: &G, set: &ValidatorSet) -> ThresholdParams {
|
||||
// Directly unwraps the .get() as this will only be called after being set
|
||||
bincode::deserialize(&self.0.get(Self::params_key(set)).unwrap()).unwrap()
|
||||
bincode::deserialize(&getter.get(Self::params_key(set)).unwrap()).unwrap()
|
||||
}
|
||||
|
||||
// Not scoped to the set since that'd have latter attempts overwrite former
|
||||
|
@ -58,9 +58,9 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||
) {
|
||||
txn.put(Self::commitments_key(id), bincode::serialize(commitments).unwrap());
|
||||
}
|
||||
fn commitments(&self, id: &KeyGenId) -> HashMap<Participant, Vec<u8>> {
|
||||
fn commitments<G: Get>(getter: &G, id: &KeyGenId) -> HashMap<Participant, Vec<u8>> {
|
||||
bincode::deserialize::<HashMap<Participant, Vec<u8>>>(
|
||||
&self.0.get(Self::commitments_key(id)).unwrap(),
|
||||
&getter.get(Self::commitments_key(id)).unwrap(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
@ -102,11 +102,11 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||
txn.put(Self::keys_key(&keys.1.group_key()), keys_vec);
|
||||
keys
|
||||
}
|
||||
fn keys(
|
||||
&self,
|
||||
fn keys<G: Get>(
|
||||
getter: &G,
|
||||
key: &<C::Curve as Ciphersuite>::G,
|
||||
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>) {
|
||||
Self::read_keys(&self.0, &Self::keys_key(key)).1
|
||||
Self::read_keys(getter, &Self::keys_key(key)).1
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ impl<C: Coin, D: Db> KeyGenDb<C, D> {
|
|||
/// 2) It did send its response, and has locally saved enough data to continue
|
||||
#[derive(Debug)]
|
||||
pub struct KeyGen<C: Coin, D: Db> {
|
||||
db: KeyGenDb<C, D>,
|
||||
db: D,
|
||||
entropy: Zeroizing<[u8; 32]>,
|
||||
|
||||
active_commit:
|
||||
|
@ -126,23 +126,23 @@ pub struct KeyGen<C: Coin, D: Db> {
|
|||
impl<C: Coin, D: Db> KeyGen<C, D> {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<C, D> {
|
||||
KeyGen {
|
||||
db: KeyGenDb(db, PhantomData::<C>),
|
||||
entropy,
|
||||
|
||||
active_commit: HashMap::new(),
|
||||
active_share: HashMap::new(),
|
||||
}
|
||||
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
|
||||
}
|
||||
|
||||
pub fn keys(
|
||||
&self,
|
||||
key: &<C::Curve as Ciphersuite>::G,
|
||||
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<C::Curve>) {
|
||||
self.db.keys(key)
|
||||
// This is safe, despite not having a txn, since it's a static value
|
||||
// At worst, it's not set when it's expected to be set, yet that should be handled contextually
|
||||
KeyGenDb::<C, D>::keys(&self.db, key)
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self, msg: CoordinatorMessage) -> ProcessorMessage {
|
||||
pub async fn handle(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
msg: CoordinatorMessage,
|
||||
) -> ProcessorMessage {
|
||||
let context = |id: &KeyGenId| {
|
||||
// TODO2: Also embed the chain ID/genesis block
|
||||
format!(
|
||||
|
@ -177,11 +177,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
self.active_share.remove(&id.set).is_none()
|
||||
{
|
||||
// If we haven't handled this set before, save the params
|
||||
// This may overwrite previously written params if we rebooted, yet that isn't a
|
||||
// concern
|
||||
let mut txn = self.db.0.txn();
|
||||
KeyGenDb::<C, D>::save_params(&mut txn, &id.set, ¶ms);
|
||||
txn.commit();
|
||||
KeyGenDb::<C, D>::save_params(txn, &id.set, ¶ms);
|
||||
}
|
||||
|
||||
let (machines, commitments) = key_gen_machines(id, params);
|
||||
|
@ -202,7 +198,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
panic!("commitments when already handled commitments");
|
||||
}
|
||||
|
||||
let params = self.db.params(&id.set);
|
||||
let params = KeyGenDb::<C, D>::params(txn, &id.set);
|
||||
|
||||
// Unwrap the machines, rebuilding them if we didn't have them in our cache
|
||||
// We won't if the processor rebooted
|
||||
|
@ -256,9 +252,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
share.extend(coin_shares[i].serialize());
|
||||
}
|
||||
|
||||
let mut txn = self.db.0.txn();
|
||||
KeyGenDb::<C, D>::save_commitments(&mut txn, &id, &commitments);
|
||||
txn.commit();
|
||||
KeyGenDb::<C, D>::save_commitments(txn, &id, &commitments);
|
||||
|
||||
ProcessorMessage::Shares { id, shares }
|
||||
}
|
||||
|
@ -266,13 +260,13 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
CoordinatorMessage::Shares { id, shares } => {
|
||||
info!("Received shares for {:?}", id);
|
||||
|
||||
let params = self.db.params(&id.set);
|
||||
let params = KeyGenDb::<C, D>::params(txn, &id.set);
|
||||
|
||||
// Same commentary on inconsistency as above exists
|
||||
let machines = self.active_share.remove(&id.set).unwrap_or_else(|| {
|
||||
let machines = key_gen_machines(id, params).0;
|
||||
let mut rng = secret_shares_rng(id);
|
||||
let commitments = self.db.commitments(&id);
|
||||
let commitments = KeyGenDb::<C, D>::commitments(txn, &id);
|
||||
|
||||
let mut commitments_ref: HashMap<Participant, &[u8]> =
|
||||
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
|
||||
|
@ -337,9 +331,7 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
|
||||
let coin_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
|
||||
|
||||
let mut txn = self.db.0.txn();
|
||||
KeyGenDb::<C, D>::save_keys(&mut txn, &id, &substrate_keys, &coin_keys);
|
||||
txn.commit();
|
||||
KeyGenDb::<C, D>::save_keys(txn, &id, &substrate_keys, &coin_keys);
|
||||
|
||||
let mut coin_keys = ThresholdKeys::new(coin_keys);
|
||||
C::tweak_keys(&mut coin_keys);
|
||||
|
@ -354,12 +346,11 @@ impl<C: Coin, D: Db> KeyGen<C, D> {
|
|||
|
||||
pub async fn confirm(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
context: SubstrateContext,
|
||||
id: KeyGenId,
|
||||
) -> KeyConfirmed<C::Curve> {
|
||||
let mut txn = self.db.0.txn();
|
||||
let (substrate_keys, coin_keys) = KeyGenDb::<C, D>::confirm_keys(&mut txn, &id);
|
||||
txn.commit();
|
||||
let (substrate_keys, coin_keys) = KeyGenDb::<C, D>::confirm_keys(txn, &id);
|
||||
|
||||
info!(
|
||||
"Confirmed key pair {} {} from {:?}",
|
||||
|
|
|
@ -8,7 +8,7 @@ use zeroize::{Zeroize, Zeroizing};
|
|||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use group::GroupEncoding;
|
||||
use frost::curve::Ciphersuite;
|
||||
use frost::{curve::Ciphersuite, ThresholdKeys};
|
||||
|
||||
use log::{info, warn, error};
|
||||
use tokio::time::sleep;
|
||||
|
@ -90,14 +90,13 @@ async fn get_fee<C: Coin>(coin: &C, block_number: usize) -> C::Fee {
|
|||
}
|
||||
}
|
||||
|
||||
async fn prepare_send<C: Coin, D: Db>(
|
||||
async fn prepare_send<C: Coin>(
|
||||
coin: &C,
|
||||
signer: &Signer<C, D>,
|
||||
keys: ThresholdKeys<C::Curve>,
|
||||
block_number: usize,
|
||||
fee: C::Fee,
|
||||
plan: Plan<C>,
|
||||
) -> (Option<(C::SignableTransaction, C::Eventuality)>, Vec<PostFeeBranch>) {
|
||||
let keys = signer.keys().await;
|
||||
loop {
|
||||
match coin.prepare_send(keys.clone(), block_number, plan.clone(), fee).await {
|
||||
Ok(prepared) => {
|
||||
|
@ -173,7 +172,7 @@ struct SubstrateMutable<C: Coin, D: Db> {
|
|||
}
|
||||
|
||||
async fn sign_plans<C: Coin, D: Db>(
|
||||
db: &mut MainDb<C, D>,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
coin: &C,
|
||||
substrate_mutable: &mut SubstrateMutable<C, D>,
|
||||
signers: &mut HashMap<Vec<u8>, Signer<C, D>>,
|
||||
|
@ -197,17 +196,11 @@ async fn sign_plans<C: Coin, D: Db>(
|
|||
info!("preparing plan {}: {:?}", hex::encode(id), plan);
|
||||
|
||||
let key = plan.key.to_bytes();
|
||||
db.save_signing(key.as_ref(), block_number.try_into().unwrap(), &plan);
|
||||
MainDb::<C, D>::save_signing(txn, key.as_ref(), block_number.try_into().unwrap(), &plan);
|
||||
let (tx, branches) =
|
||||
prepare_send(coin, signers.get_mut(key.as_ref()).unwrap(), block_number, fee, plan).await;
|
||||
prepare_send(coin, signers.get_mut(key.as_ref()).unwrap().keys(), block_number, fee, plan)
|
||||
.await;
|
||||
|
||||
// TODO: If we reboot mid-sign_plans, for a DB-backed scheduler, these may be partially
|
||||
// executed
|
||||
// Global TXN object for the entire coordinator message?
|
||||
// Re-ser the scheduler after every sign_plans call?
|
||||
// To clarify, the scheduler is distinct as it mutates itself on new data.
|
||||
// The key_gen/scanner/signer are designed to be deterministic to new data, irrelevant to prior
|
||||
// states.
|
||||
for branch in branches {
|
||||
substrate_mutable
|
||||
.schedulers
|
||||
|
@ -218,19 +211,18 @@ async fn sign_plans<C: Coin, D: Db>(
|
|||
|
||||
if let Some((tx, eventuality)) = tx {
|
||||
substrate_mutable.scanner.register_eventuality(block_number, id, eventuality.clone()).await;
|
||||
signers.get_mut(key.as_ref()).unwrap().sign_transaction(id, tx, eventuality).await;
|
||||
signers.get_mut(key.as_ref()).unwrap().sign_transaction(txn, id, tx, eventuality).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
||||
raw_db: &D,
|
||||
main_db: &mut MainDb<C, D>,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
coin: &C,
|
||||
coordinator: &mut Co,
|
||||
tributary_mutable: &mut TributaryMutable<C, D>,
|
||||
substrate_mutable: &mut SubstrateMutable<C, D>,
|
||||
msg: Message,
|
||||
msg: &Message,
|
||||
) {
|
||||
// If this message expects a higher block number than we have, halt until synced
|
||||
async fn wait<C: Coin, D: Db>(scanner: &ScannerHandle<C, D>, block_hash: &BlockHash) {
|
||||
|
@ -293,15 +285,17 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
match msg.msg.clone() {
|
||||
CoordinatorMessage::KeyGen(msg) => {
|
||||
// TODO: This may be fired multiple times. What's our plan for that?
|
||||
coordinator.send(ProcessorMessage::KeyGen(tributary_mutable.key_gen.handle(msg).await)).await;
|
||||
coordinator
|
||||
.send(ProcessorMessage::KeyGen(tributary_mutable.key_gen.handle(txn, msg).await))
|
||||
.await;
|
||||
}
|
||||
|
||||
CoordinatorMessage::Sign(msg) => {
|
||||
tributary_mutable.signers.get_mut(msg.key()).unwrap().handle(msg).await;
|
||||
tributary_mutable.signers.get_mut(msg.key()).unwrap().handle(txn, msg).await;
|
||||
}
|
||||
|
||||
CoordinatorMessage::Coordinator(msg) => {
|
||||
tributary_mutable.substrate_signers.get_mut(msg.key()).unwrap().handle(msg).await;
|
||||
tributary_mutable.substrate_signers.get_mut(msg.key()).unwrap().handle(txn, msg).await;
|
||||
}
|
||||
|
||||
CoordinatorMessage::Substrate(msg) => {
|
||||
|
@ -309,10 +303,10 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, id } => {
|
||||
// See TributaryMutable's struct definition for why this block is safe
|
||||
let KeyConfirmed { activation_block, substrate_keys, coin_keys } =
|
||||
tributary_mutable.key_gen.confirm(context, id).await;
|
||||
tributary_mutable.key_gen.confirm(txn, context, id).await;
|
||||
tributary_mutable.substrate_signers.insert(
|
||||
substrate_keys.group_key().to_bytes().to_vec(),
|
||||
SubstrateSigner::new(raw_db.clone(), substrate_keys),
|
||||
SubstrateSigner::new(substrate_keys),
|
||||
);
|
||||
|
||||
let key = coin_keys.group_key();
|
||||
|
@ -325,15 +319,14 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
.await
|
||||
.expect("KeyConfirmed from context we haven't synced");
|
||||
|
||||
substrate_mutable.scanner.rotate_key(activation_number, key).await;
|
||||
substrate_mutable.scanner.rotate_key(txn, activation_number, key).await;
|
||||
substrate_mutable
|
||||
.schedulers
|
||||
.insert(key.to_bytes().as_ref().to_vec(), Scheduler::<C>::new(key));
|
||||
|
||||
tributary_mutable.signers.insert(
|
||||
key.to_bytes().as_ref().to_vec(),
|
||||
Signer::new(raw_db.clone(), coin.clone(), coin_keys),
|
||||
);
|
||||
tributary_mutable
|
||||
.signers
|
||||
.insert(key.to_bytes().as_ref().to_vec(), Signer::new(coin.clone(), coin_keys));
|
||||
}
|
||||
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
|
@ -347,11 +340,12 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
let key = <C::Curve as Ciphersuite>::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap();
|
||||
|
||||
// We now have to acknowledge every block for this key up to the acknowledged block
|
||||
let (blocks, outputs) = substrate_mutable.scanner.ack_up_to_block(key, block_id).await;
|
||||
let (blocks, outputs) =
|
||||
substrate_mutable.scanner.ack_up_to_block(txn, key, block_id).await;
|
||||
// Since this block was acknowledged, we no longer have to sign the batch for it
|
||||
for block in blocks {
|
||||
for (_, signer) in tributary_mutable.substrate_signers.iter_mut() {
|
||||
signer.batch_signed(block);
|
||||
signer.batch_signed(txn, block);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,7 +371,7 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
.schedule(outputs, payments);
|
||||
|
||||
sign_plans(
|
||||
main_db,
|
||||
txn,
|
||||
coin,
|
||||
substrate_mutable,
|
||||
// See commentary in TributaryMutable for why this is safe
|
||||
|
@ -390,12 +384,10 @@ async fn handle_coordinator_msg<D: Db, C: Coin, Co: Coordinator>(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
coordinator.ack(msg).await;
|
||||
}
|
||||
|
||||
async fn boot<C: Coin, D: Db>(
|
||||
raw_db: &D,
|
||||
raw_db: &mut D,
|
||||
coin: &C,
|
||||
) -> (MainDb<C, D>, TributaryMutable<C, D>, SubstrateMutable<C, D>) {
|
||||
let mut entropy_transcript = {
|
||||
|
@ -443,12 +435,12 @@ async fn boot<C: Coin, D: Db>(
|
|||
let (substrate_keys, coin_keys) = key_gen.keys(key);
|
||||
|
||||
let substrate_key = substrate_keys.group_key();
|
||||
let substrate_signer = SubstrateSigner::new(raw_db.clone(), substrate_keys);
|
||||
let substrate_signer = SubstrateSigner::new(substrate_keys);
|
||||
// We don't have to load any state for this since the Scanner will re-fire any events
|
||||
// necessary
|
||||
substrate_signers.insert(substrate_key.to_bytes().to_vec(), substrate_signer);
|
||||
|
||||
let mut signer = Signer::new(raw_db.clone(), coin.clone(), coin_keys);
|
||||
let mut signer = Signer::new(coin.clone(), coin_keys);
|
||||
|
||||
// Load any TXs being actively signed
|
||||
let key = key.to_bytes();
|
||||
|
@ -461,14 +453,17 @@ async fn boot<C: Coin, D: Db>(
|
|||
info!("reloading plan {}: {:?}", hex::encode(id), plan);
|
||||
|
||||
let (Some((tx, eventuality)), _) =
|
||||
prepare_send(coin, &signer, block_number, fee, plan).await else {
|
||||
prepare_send(coin, signer.keys(), block_number, fee, plan).await else {
|
||||
panic!("previously created transaction is no longer being created")
|
||||
};
|
||||
|
||||
scanner.register_eventuality(block_number, id, eventuality.clone()).await;
|
||||
// TODO: Reconsider if the Signer should have the eventuality, or if just the coin/scanner
|
||||
// should
|
||||
signer.sign_transaction(id, tx, eventuality).await;
|
||||
let mut txn = raw_db.txn();
|
||||
signer.sign_transaction(&mut txn, id, tx, eventuality).await;
|
||||
// This should only have re-writes of existing data
|
||||
drop(txn);
|
||||
}
|
||||
|
||||
signers.insert(key.as_ref().to_vec(), signer);
|
||||
|
@ -481,14 +476,14 @@ async fn boot<C: Coin, D: Db>(
|
|||
)
|
||||
}
|
||||
|
||||
async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinator: Co) {
|
||||
async fn run<C: Coin, D: Db, Co: Coordinator>(mut raw_db: D, coin: C, mut coordinator: Co) {
|
||||
// We currently expect a contextless bidirectional mapping between these two values
|
||||
// (which is that any value of A can be interpreted as B and vice versa)
|
||||
// While we can write a contextual mapping, we have yet to do so
|
||||
// This check ensures no coin which doesn't have a bidirectional mapping is defined
|
||||
assert_eq!(<C::Block as Block<C>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len());
|
||||
|
||||
let (mut main_db, mut tributary_mutable, mut substrate_mutable) = boot(&raw_db, &coin).await;
|
||||
let (mut main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &coin).await;
|
||||
|
||||
// We can't load this from the DB as we can't guarantee atomic increments with the ack function
|
||||
let mut last_coordinator_msg = None;
|
||||
|
@ -505,12 +500,6 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
|||
}
|
||||
|
||||
SignerEvent::SignedTransaction { id, tx } => {
|
||||
// If we die after calling finish_signing, we'll never fire Completed
|
||||
// TODO: Is that acceptable? Do we need to fire Completed before firing finish_signing?
|
||||
main_db.finish_signing(key, id);
|
||||
// This does mutate the Scanner, yet the eventuality protocol is only run to mutate
|
||||
// the signer, which is Tributary mutable (and what's currently being mutated)
|
||||
substrate_mutable.scanner.drop_eventuality(id).await;
|
||||
coordinator
|
||||
.send(ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {
|
||||
key: key.clone(),
|
||||
|
@ -519,6 +508,13 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
|||
}))
|
||||
.await;
|
||||
|
||||
let mut txn = raw_db.txn();
|
||||
// This does mutate the Scanner, yet the eventuality protocol is only run to mutate
|
||||
// the signer, which is Tributary mutable (and what's currently being mutated)
|
||||
substrate_mutable.scanner.drop_eventuality(id).await;
|
||||
main_db.finish_signing(&mut txn, key, id);
|
||||
txn.commit();
|
||||
|
||||
// TODO
|
||||
// 1) We need to stop signing whenever a peer informs us or the chain has an
|
||||
// eventuality
|
||||
|
@ -559,33 +555,39 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
|||
assert_eq!(msg.id, (last_coordinator_msg.unwrap_or(msg.id - 1) + 1));
|
||||
last_coordinator_msg = Some(msg.id);
|
||||
|
||||
// If we've already handled this message, continue
|
||||
// TODO
|
||||
// Only handle this if we haven't already
|
||||
if !main_db.handled_message(msg.id) {
|
||||
let mut txn = raw_db.txn();
|
||||
MainDb::<C, D>::handle_message(&mut txn, msg.id);
|
||||
|
||||
// This is isolated to better think about how its ordered, or rather, about how the
|
||||
// following cases aren't ordered
|
||||
//
|
||||
// While the coordinator messages are ordered, they're not deterministically ordered
|
||||
// While Tributary-caused messages are deterministically ordered, and Substrate-caused
|
||||
// messages are deterministically-ordered, they're both shoved into this singular queue
|
||||
// The order at which they're shoved in together isn't deterministic
|
||||
//
|
||||
// This should be safe so long as Tributary and Substrate messages don't both expect
|
||||
// mutable references over the same data
|
||||
//
|
||||
// TODO: Better assert/guarantee this
|
||||
handle_coordinator_msg(
|
||||
&raw_db,
|
||||
&mut main_db,
|
||||
&coin,
|
||||
&mut coordinator,
|
||||
&mut tributary_mutable,
|
||||
&mut substrate_mutable,
|
||||
msg,
|
||||
).await;
|
||||
// This is isolated to better think about how its ordered, or rather, about how the other
|
||||
// cases aren't ordered
|
||||
//
|
||||
// While the coordinator messages are ordered, they're not deterministically ordered
|
||||
// Tributary-caused messages are deterministically ordered, and Substrate-caused messages
|
||||
// are deterministically-ordered, yet they're both shoved into a singular queue
|
||||
// The order at which they're shoved in together isn't deterministic
|
||||
//
|
||||
// This is safe so long as Tributary and Substrate messages don't both expect mutable
|
||||
// references over the same data
|
||||
handle_coordinator_msg(
|
||||
&mut txn,
|
||||
&coin,
|
||||
&mut coordinator,
|
||||
&mut tributary_mutable,
|
||||
&mut substrate_mutable,
|
||||
&msg,
|
||||
).await;
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
coordinator.ack(msg).await;
|
||||
},
|
||||
|
||||
msg = substrate_mutable.scanner.events.recv() => {
|
||||
let mut txn = raw_db.txn();
|
||||
|
||||
match msg.unwrap() {
|
||||
ScannerEvent::Block { key, block, batch, outputs } => {
|
||||
let key = key.to_bytes().as_ref().to_vec();
|
||||
|
@ -625,16 +627,18 @@ async fn run<C: Coin, D: Db, Co: Coordinator>(raw_db: D, coin: C, mut coordinato
|
|||
};
|
||||
|
||||
// Start signing this batch
|
||||
tributary_mutable.substrate_signers.get_mut(&key).unwrap().sign(batch).await;
|
||||
tributary_mutable.substrate_signers.get_mut(&key).unwrap().sign(&mut txn, batch).await;
|
||||
},
|
||||
|
||||
ScannerEvent::Completed(id, tx) => {
|
||||
// We don't know which signer had this plan, so inform all of them
|
||||
for (_, signer) in tributary_mutable.signers.iter_mut() {
|
||||
signer.eventuality_completion(id, &tx).await;
|
||||
signer.eventuality_completion(&mut txn, id, &tx).await;
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ pub enum ScannerEvent<C: Coin> {
|
|||
pub type ScannerEventChannel<C> = mpsc::UnboundedReceiver<ScannerEvent<C>>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ScannerDb<C: Coin, D: Db>(D, PhantomData<C>);
|
||||
struct ScannerDb<C: Coin, D: Db>(PhantomData<C>, PhantomData<D>);
|
||||
impl<C: Coin, D: Db> ScannerDb<C, D> {
|
||||
fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
D::key(b"SCANNER", dst, key)
|
||||
|
@ -60,9 +60,8 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||
res
|
||||
})
|
||||
}
|
||||
fn block_number(&self, id: &<C::Block as Block<C>>::Id) -> Option<usize> {
|
||||
self
|
||||
.0
|
||||
fn block_number<G: Get>(getter: &G, id: &<C::Block as Block<C>>::Id) -> Option<usize> {
|
||||
getter
|
||||
.get(Self::block_number_key(id))
|
||||
.map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap())
|
||||
}
|
||||
|
@ -91,8 +90,8 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||
keys.extend(key_bytes.as_ref());
|
||||
txn.put(Self::active_keys_key(), keys);
|
||||
}
|
||||
fn active_keys(&self) -> Vec<<C::Curve as Ciphersuite>::G> {
|
||||
let bytes_vec = self.0.get(Self::active_keys_key()).unwrap_or(vec![]);
|
||||
fn active_keys<G: Get>(getter: &G) -> Vec<<C::Curve as Ciphersuite>::G> {
|
||||
let bytes_vec = getter.get(Self::active_keys_key()).unwrap_or(vec![]);
|
||||
let mut bytes: &[u8] = bytes_vec.as_ref();
|
||||
|
||||
// Assumes keys will be 32 bytes when calculating the capacity
|
||||
|
@ -109,8 +108,8 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||
fn seen_key(id: &<C::Output as Output>::Id) -> Vec<u8> {
|
||||
Self::scanner_key(b"seen", id)
|
||||
}
|
||||
fn seen(&self, id: &<C::Output as Output>::Id) -> bool {
|
||||
self.0.get(Self::seen_key(id)).is_some()
|
||||
fn seen<G: Get>(getter: &G, id: &<C::Output as Output>::Id) -> bool {
|
||||
getter.get(Self::seen_key(id)).is_some()
|
||||
}
|
||||
|
||||
fn next_batch_key() -> Vec<u8> {
|
||||
|
@ -201,9 +200,8 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||
// Return this block's outputs so they can be pruned from the RAM cache
|
||||
(id, outputs)
|
||||
}
|
||||
fn latest_scanned_block(&self, key: <C::Curve as Ciphersuite>::G) -> usize {
|
||||
let bytes = self
|
||||
.0
|
||||
fn latest_scanned_block<G: Get>(getter: &G, key: <C::Curve as Ciphersuite>::G) -> usize {
|
||||
let bytes = getter
|
||||
.get(Self::scanned_block_key(&key))
|
||||
.expect("asking for latest scanned block of key which wasn't rotated to");
|
||||
u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()
|
||||
|
@ -216,7 +214,7 @@ impl<C: Coin, D: Db> ScannerDb<C, D> {
|
|||
#[derive(Debug)]
|
||||
pub struct Scanner<C: Coin, D: Db> {
|
||||
coin: C,
|
||||
db: ScannerDb<C, D>,
|
||||
db: D,
|
||||
keys: Vec<<C::Curve as Ciphersuite>::G>,
|
||||
|
||||
eventualities: EventualitiesTracker<C::Eventuality>,
|
||||
|
@ -267,7 +265,12 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||
/// If a key has been prior set, both keys will be scanned for as detailed in the Multisig
|
||||
/// documentation. The old key will eventually stop being scanned for, leaving just the
|
||||
/// updated-to key.
|
||||
pub async fn rotate_key(&mut self, activation_number: usize, key: <C::Curve as Ciphersuite>::G) {
|
||||
pub async fn rotate_key(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
activation_number: usize,
|
||||
key: <C::Curve as Ciphersuite>::G,
|
||||
) {
|
||||
let mut scanner = self.scanner.write().await;
|
||||
if !scanner.keys.is_empty() {
|
||||
// Protonet will have a single, static validator set
|
||||
|
@ -276,21 +279,25 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||
}
|
||||
|
||||
info!("Rotating to key {}", hex::encode(key.to_bytes()));
|
||||
let mut txn = scanner.db.0.txn();
|
||||
let (_, outputs) = ScannerDb::<C, D>::save_scanned_block(&mut txn, &key, activation_number);
|
||||
|
||||
let (_, outputs) = ScannerDb::<C, D>::save_scanned_block(txn, &key, activation_number);
|
||||
scanner.ram_scanned.insert(key.to_bytes().as_ref().to_vec(), activation_number);
|
||||
assert!(outputs.is_empty());
|
||||
ScannerDb::<C, D>::add_active_key(&mut txn, key);
|
||||
txn.commit();
|
||||
|
||||
ScannerDb::<C, D>::add_active_key(txn, key);
|
||||
scanner.keys.push(key);
|
||||
}
|
||||
|
||||
pub async fn block_number(&self, id: &<C::Block as Block<C>>::Id) -> Option<usize> {
|
||||
self.scanner.read().await.db.block_number(id)
|
||||
// This is safe, despite not having a txn, since it's a static value
|
||||
// At worst, it's not set when it's expected to be set, yet that should be handled contextually
|
||||
ScannerDb::<C, D>::block_number(&self.scanner.read().await.db, id)
|
||||
}
|
||||
|
||||
/// Acknowledge having handled a block for a key.
|
||||
pub async fn ack_up_to_block(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: <C::Curve as Ciphersuite>::G,
|
||||
id: <C::Block as Block<C>>::Id,
|
||||
) -> (Vec<BlockHash>, Vec<C::Output>) {
|
||||
|
@ -298,23 +305,20 @@ impl<C: Coin, D: Db> ScannerHandle<C, D> {
|
|||
debug!("Block {} acknowledged", hex::encode(&id));
|
||||
|
||||
// Get the number for this block
|
||||
let number =
|
||||
scanner.db.block_number(&id).expect("main loop trying to operate on data we haven't scanned");
|
||||
let number = ScannerDb::<C, D>::block_number(txn, &id)
|
||||
.expect("main loop trying to operate on data we haven't scanned");
|
||||
// Get the number of the last block we acknowledged
|
||||
let prior = scanner.db.latest_scanned_block(key);
|
||||
let prior = ScannerDb::<C, D>::latest_scanned_block(txn, key);
|
||||
|
||||
let mut blocks = vec![];
|
||||
let mut outputs = vec![];
|
||||
let mut txn = scanner.db.0.txn();
|
||||
for number in (prior + 1) ..= number {
|
||||
let (block, these_outputs) = ScannerDb::<C, D>::save_scanned_block(&mut txn, &key, number);
|
||||
let (block, these_outputs) = ScannerDb::<C, D>::save_scanned_block(txn, &key, number);
|
||||
let block = BlockHash(block.unwrap().as_ref().try_into().unwrap());
|
||||
blocks.push(block);
|
||||
outputs.extend(these_outputs);
|
||||
}
|
||||
assert_eq!(blocks.last().unwrap().as_ref(), id.as_ref());
|
||||
// TODO: This likely needs to be atomic with the scheduler?
|
||||
txn.commit();
|
||||
|
||||
for output in &outputs {
|
||||
assert!(scanner.ram_outputs.remove(output.id().as_ref()));
|
||||
|
@ -329,8 +333,14 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
pub fn new(coin: C, db: D) -> (ScannerHandle<C, D>, Vec<<C::Curve as Ciphersuite>::G>) {
|
||||
let (events_send, events_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let db = ScannerDb(db, PhantomData);
|
||||
let keys = db.active_keys();
|
||||
let keys = ScannerDb::<C, D>::active_keys(&db);
|
||||
let mut ram_scanned = HashMap::new();
|
||||
for key in keys.clone() {
|
||||
ram_scanned.insert(
|
||||
key.to_bytes().as_ref().to_vec(),
|
||||
ScannerDb::<C, D>::latest_scanned_block(&db, key),
|
||||
);
|
||||
}
|
||||
|
||||
let scanner = Arc::new(RwLock::new(Scanner {
|
||||
coin,
|
||||
|
@ -339,7 +349,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
|
||||
eventualities: EventualitiesTracker::new(),
|
||||
|
||||
ram_scanned: HashMap::new(),
|
||||
ram_scanned,
|
||||
ram_outputs: HashSet::new(),
|
||||
|
||||
events: events_send,
|
||||
|
@ -380,21 +390,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
|
||||
for key in scanner.keys.clone() {
|
||||
let key_vec = key.to_bytes().as_ref().to_vec();
|
||||
let latest_scanned = {
|
||||
// Grab the latest scanned block according to the DB
|
||||
let db_scanned = scanner.db.latest_scanned_block(key);
|
||||
// We may, within this process's lifetime, have scanned more blocks
|
||||
// If they're still being processed, we will not have officially written them to the DB
|
||||
// as scanned yet
|
||||
// That way, if the process terminates, and is rebooted, we'll rescan from a handled
|
||||
// point, re-firing all events along the way, enabling them to be properly processed
|
||||
// In order to not re-fire them within this process's lifetime, check our RAM cache
|
||||
// of what we've scanned
|
||||
// We are allowed to re-fire them within this lifetime. It's just wasteful
|
||||
let ram_scanned = scanner.ram_scanned.get(&key_vec).cloned().unwrap_or(0);
|
||||
// Pick whichever is higher
|
||||
db_scanned.max(ram_scanned)
|
||||
};
|
||||
let latest_scanned = scanner.ram_scanned[&key_vec];
|
||||
|
||||
for i in (latest_scanned + 1) ..= latest {
|
||||
// TODO2: Check for key deprecation
|
||||
|
@ -408,14 +404,15 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
};
|
||||
let block_id = block.id();
|
||||
|
||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db.0, i) {
|
||||
// These block calls are safe, despite not having a txn, since they're static values
|
||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db, i) {
|
||||
if id != block_id {
|
||||
panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id));
|
||||
}
|
||||
} else {
|
||||
info!("Found new block: {}", hex::encode(&block_id));
|
||||
|
||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db.0, i.saturating_sub(1)) {
|
||||
if let Some(id) = ScannerDb::<C, D>::block(&scanner.db, i.saturating_sub(1)) {
|
||||
if id != block.parent() {
|
||||
panic!(
|
||||
"block {} doesn't build off expected parent {}",
|
||||
|
@ -425,7 +422,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
}
|
||||
}
|
||||
|
||||
let mut txn = scanner.db.0.txn();
|
||||
let mut txn = scanner.db.txn();
|
||||
ScannerDb::<C, D>::save_block(&mut txn, i, &block_id);
|
||||
txn.commit();
|
||||
}
|
||||
|
@ -470,7 +467,35 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
// On Bitcoin, the output ID should be unique for a given chain
|
||||
// On Monero, it's trivial to make an output sharing an ID with another
|
||||
// We should only scan outputs with valid IDs however, which will be unique
|
||||
let seen = scanner.db.seen(&id);
|
||||
|
||||
/*
|
||||
The safety of this code must satisfy the following conditions:
|
||||
1) seen is not set for the first occurrence
|
||||
2) seen is set for any future occurrence
|
||||
|
||||
seen is only written to after this code completes. Accordingly, it cannot be set
|
||||
before the first occurrence UNLESSS it's set, yet the last scanned block isn't.
|
||||
They are both written in the same database transaction, preventing this.
|
||||
|
||||
As for future occurrences, the RAM entry ensures they're handled properly even if
|
||||
the database has yet to be set.
|
||||
|
||||
On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned
|
||||
block. Accordingly, this will scan from some prior block, re-populating the RAM.
|
||||
|
||||
If seen was set, then this will be successfully read.
|
||||
|
||||
There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning
|
||||
from ram_outputs will acquire a write lock (preventing this code from acquiring
|
||||
its own write lock and running), and during its holding of the write lock, it
|
||||
commits the transaction setting seen and the latest scanned block.
|
||||
|
||||
This last case isn't true. Committing seen/latest_scanned_block happens after
|
||||
relinquishing the write lock.
|
||||
|
||||
TODO: Only update ram_outputs after committing the TXN in question.
|
||||
*/
|
||||
let seen = ScannerDb::<C, D>::seen(&scanner.db, &id);
|
||||
let id = id.as_ref().to_vec();
|
||||
if seen || scanner.ram_outputs.contains(&id) {
|
||||
panic!("scanned an output multiple times");
|
||||
|
@ -483,7 +508,7 @@ impl<C: Coin, D: Db> Scanner<C, D> {
|
|||
}
|
||||
|
||||
// Save the outputs to disk
|
||||
let mut txn = scanner.db.0.txn();
|
||||
let mut txn = scanner.db.txn();
|
||||
let batch = ScannerDb::<C, D>::save_outputs(&mut txn, &key, &block_id, &outputs);
|
||||
txn.commit();
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||
existing.extend(tx.as_ref());
|
||||
txn.put(Self::completed_key(id), existing);
|
||||
}
|
||||
fn completed(&self, id: [u8; 32]) -> Option<Vec<u8>> {
|
||||
self.0.get(Self::completed_key(id))
|
||||
fn completed<G: Get>(getter: &G, id: [u8; 32]) -> Option<Vec<u8>> {
|
||||
getter.get(Self::completed_key(id))
|
||||
}
|
||||
|
||||
fn eventuality_key(id: [u8; 32]) -> Vec<u8> {
|
||||
|
@ -67,9 +67,9 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||
fn save_eventuality(txn: &mut D::Transaction<'_>, id: [u8; 32], eventuality: C::Eventuality) {
|
||||
txn.put(Self::eventuality_key(id), eventuality.serialize());
|
||||
}
|
||||
fn eventuality(&self, id: [u8; 32]) -> Option<C::Eventuality> {
|
||||
fn eventuality<G: Get>(getter: &G, id: [u8; 32]) -> Option<C::Eventuality> {
|
||||
Some(
|
||||
C::Eventuality::read::<&[u8]>(&mut self.0.get(Self::eventuality_key(id))?.as_ref()).unwrap(),
|
||||
C::Eventuality::read::<&[u8]>(&mut getter.get(Self::eventuality_key(id))?.as_ref()).unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -79,8 +79,8 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||
fn attempt(txn: &mut D::Transaction<'_>, id: &SignId) {
|
||||
txn.put(Self::attempt_key(id), []);
|
||||
}
|
||||
fn has_attempt(&mut self, id: &SignId) -> bool {
|
||||
self.0.get(Self::attempt_key(id)).is_some()
|
||||
fn has_attempt<G: Get>(getter: &G, id: &SignId) -> bool {
|
||||
getter.get(Self::attempt_key(id)).is_some()
|
||||
}
|
||||
|
||||
fn save_transaction(txn: &mut D::Transaction<'_>, tx: &C::Transaction) {
|
||||
|
@ -89,8 +89,9 @@ impl<C: Coin, D: Db> SignerDb<C, D> {
|
|||
}
|
||||
|
||||
pub struct Signer<C: Coin, D: Db> {
|
||||
db: PhantomData<D>,
|
||||
|
||||
coin: C,
|
||||
db: SignerDb<C, D>,
|
||||
|
||||
keys: ThresholdKeys<C::Curve>,
|
||||
|
||||
|
@ -120,10 +121,11 @@ impl<C: Coin, D: Db> fmt::Debug for Signer<C, D> {
|
|||
}
|
||||
|
||||
impl<C: Coin, D: Db> Signer<C, D> {
|
||||
pub fn new(db: D, coin: C, keys: ThresholdKeys<C::Curve>) -> Signer<C, D> {
|
||||
pub fn new(coin: C, keys: ThresholdKeys<C::Curve>) -> Signer<C, D> {
|
||||
Signer {
|
||||
db: PhantomData,
|
||||
|
||||
coin,
|
||||
db: SignerDb(db, PhantomData),
|
||||
|
||||
keys,
|
||||
|
||||
|
@ -136,7 +138,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn keys(&self) -> ThresholdKeys<C::Curve> {
|
||||
pub fn keys(&self) -> ThresholdKeys<C::Curve> {
|
||||
self.keys.clone()
|
||||
}
|
||||
|
||||
|
@ -172,10 +174,11 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
|
||||
pub async fn eventuality_completion(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
id: [u8; 32],
|
||||
tx_id: &<C::Transaction as Transaction<C>>::Id,
|
||||
) {
|
||||
if let Some(eventuality) = self.db.eventuality(id) {
|
||||
if let Some(eventuality) = SignerDb::<C, D>::eventuality(txn, id) {
|
||||
// Transaction hasn't hit our mempool/was dropped for a different signature
|
||||
// The latter can happen given certain latency conditions/a single malicious signer
|
||||
// In the case of a single malicious signer, they can drag multiple honest
|
||||
|
@ -193,10 +196,8 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
debug!("eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id));
|
||||
|
||||
// Stop trying to sign for this TX
|
||||
let mut txn = self.db.0.txn();
|
||||
SignerDb::<C, D>::save_transaction(&mut txn, &tx);
|
||||
SignerDb::<C, D>::complete(&mut txn, id, tx_id);
|
||||
txn.commit();
|
||||
SignerDb::<C, D>::save_transaction(txn, &tx);
|
||||
SignerDb::<C, D>::complete(txn, id, tx_id);
|
||||
|
||||
self.signable.remove(&id);
|
||||
self.attempt.remove(&id);
|
||||
|
@ -221,8 +222,8 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn check_completion(&mut self, id: [u8; 32]) -> bool {
|
||||
if let Some(txs) = self.db.completed(id) {
|
||||
async fn check_completion(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
|
||||
if let Some(txs) = SignerDb::<C, D>::completed(txn, id) {
|
||||
debug!(
|
||||
"SignTransaction/Reattempt order for {}, which we've already completed signing",
|
||||
hex::encode(id)
|
||||
|
@ -255,8 +256,8 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn attempt(&mut self, id: [u8; 32], attempt: u32) {
|
||||
if self.check_completion(id).await {
|
||||
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32) {
|
||||
if self.check_completion(txn, id).await {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -304,7 +305,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
// branch again for something we've already attempted
|
||||
//
|
||||
// Only run if this hasn't already been attempted
|
||||
if self.db.has_attempt(&id) {
|
||||
if SignerDb::<C, D>::has_attempt(txn, &id) {
|
||||
warn!(
|
||||
"already attempted {} #{}. this is an error if we didn't reboot",
|
||||
hex::encode(id.id),
|
||||
|
@ -313,9 +314,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
return;
|
||||
}
|
||||
|
||||
let mut txn = self.db.0.txn();
|
||||
SignerDb::<C, D>::attempt(&mut txn, &id);
|
||||
txn.commit();
|
||||
SignerDb::<C, D>::attempt(txn, &id);
|
||||
|
||||
// Attempt to create the TX
|
||||
let machine = match self.coin.attempt_send(tx).await {
|
||||
|
@ -338,23 +337,22 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
|
||||
pub async fn sign_transaction(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
id: [u8; 32],
|
||||
tx: C::SignableTransaction,
|
||||
eventuality: C::Eventuality,
|
||||
) {
|
||||
if self.check_completion(id).await {
|
||||
if self.check_completion(txn, id).await {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut txn = self.db.0.txn();
|
||||
SignerDb::<C, D>::save_eventuality(&mut txn, id, eventuality);
|
||||
txn.commit();
|
||||
SignerDb::<C, D>::save_eventuality(txn, id, eventuality);
|
||||
|
||||
self.signable.insert(id, tx);
|
||||
self.attempt(id, 0).await;
|
||||
self.attempt(txn, id, 0).await;
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self, msg: CoordinatorMessage) {
|
||||
pub async fn handle(&mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage) {
|
||||
match msg {
|
||||
CoordinatorMessage::Preprocesses { id, mut preprocesses } => {
|
||||
if self.verify_id(&id).is_err() {
|
||||
|
@ -440,11 +438,9 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
};
|
||||
|
||||
// Save the transaction in case it's needed for recovery
|
||||
let mut txn = self.db.0.txn();
|
||||
SignerDb::<C, D>::save_transaction(&mut txn, &tx);
|
||||
SignerDb::<C, D>::save_transaction(txn, &tx);
|
||||
let tx_id = tx.id();
|
||||
SignerDb::<C, D>::complete(&mut txn, id.id, &tx_id);
|
||||
txn.commit();
|
||||
SignerDb::<C, D>::complete(txn, id.id, &tx_id);
|
||||
|
||||
// Publish it
|
||||
if let Err(e) = self.coin.publish_transaction(&tx).await {
|
||||
|
@ -463,7 +459,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
}
|
||||
|
||||
CoordinatorMessage::Reattempt { id } => {
|
||||
self.attempt(id.id, id.attempt).await;
|
||||
self.attempt(txn, id.id, id.attempt).await;
|
||||
}
|
||||
|
||||
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
|
||||
|
@ -479,7 +475,7 @@ impl<C: Coin, D: Db> Signer<C, D> {
|
|||
}
|
||||
tx.as_mut().copy_from_slice(&tx_vec);
|
||||
|
||||
self.eventuality_completion(id, &tx).await;
|
||||
self.eventuality_completion(txn, id, &tx).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use core::fmt;
|
||||
use core::{marker::PhantomData, fmt};
|
||||
use std::collections::{VecDeque, HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
@ -24,7 +24,7 @@ use serai_client::{
|
|||
};
|
||||
|
||||
use messages::{sign::SignId, coordinator::*};
|
||||
use crate::{DbTxn, Db};
|
||||
use crate::{Get, DbTxn, Db};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SubstrateSignerEvent {
|
||||
|
@ -45,8 +45,8 @@ impl<D: Db> SubstrateSignerDb<D> {
|
|||
fn complete(txn: &mut D::Transaction<'_>, id: [u8; 32]) {
|
||||
txn.put(Self::completed_key(id), [1]);
|
||||
}
|
||||
fn completed(&self, id: [u8; 32]) -> bool {
|
||||
self.0.get(Self::completed_key(id)).is_some()
|
||||
fn completed<G: Get>(getter: &G, id: [u8; 32]) -> bool {
|
||||
getter.get(Self::completed_key(id)).is_some()
|
||||
}
|
||||
|
||||
fn attempt_key(id: &SignId) -> Vec<u8> {
|
||||
|
@ -55,8 +55,8 @@ impl<D: Db> SubstrateSignerDb<D> {
|
|||
fn attempt(txn: &mut D::Transaction<'_>, id: &SignId) {
|
||||
txn.put(Self::attempt_key(id), []);
|
||||
}
|
||||
fn has_attempt(&mut self, id: &SignId) -> bool {
|
||||
self.0.get(Self::attempt_key(id)).is_some()
|
||||
fn has_attempt<G: Get>(getter: &G, id: &SignId) -> bool {
|
||||
getter.get(Self::attempt_key(id)).is_some()
|
||||
}
|
||||
|
||||
fn save_batch(txn: &mut D::Transaction<'_>, batch: &SignedBatch) {
|
||||
|
@ -65,7 +65,7 @@ impl<D: Db> SubstrateSignerDb<D> {
|
|||
}
|
||||
|
||||
pub struct SubstrateSigner<D: Db> {
|
||||
db: SubstrateSignerDb<D>,
|
||||
db: PhantomData<D>,
|
||||
|
||||
keys: ThresholdKeys<Ristretto>,
|
||||
|
||||
|
@ -88,9 +88,9 @@ impl<D: Db> fmt::Debug for SubstrateSigner<D> {
|
|||
}
|
||||
|
||||
impl<D: Db> SubstrateSigner<D> {
|
||||
pub fn new(db: D, keys: ThresholdKeys<Ristretto>) -> SubstrateSigner<D> {
|
||||
pub fn new(keys: ThresholdKeys<Ristretto>) -> SubstrateSigner<D> {
|
||||
SubstrateSigner {
|
||||
db: SubstrateSignerDb(db),
|
||||
db: PhantomData,
|
||||
|
||||
keys,
|
||||
|
||||
|
@ -129,9 +129,9 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn attempt(&mut self, id: [u8; 32], attempt: u32) {
|
||||
async fn attempt(&mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32) {
|
||||
// See above commentary for why this doesn't emit SignedBatch
|
||||
if self.db.completed(id) {
|
||||
if SubstrateSignerDb::<D>::completed(txn, id) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
// branch again for something we've already attempted
|
||||
//
|
||||
// Only run if this hasn't already been attempted
|
||||
if self.db.has_attempt(&id) {
|
||||
if SubstrateSignerDb::<D>::has_attempt(txn, &id) {
|
||||
warn!(
|
||||
"already attempted {} #{}. this is an error if we didn't reboot",
|
||||
hex::encode(id.id),
|
||||
|
@ -185,9 +185,7 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
return;
|
||||
}
|
||||
|
||||
let mut txn = self.db.0.txn();
|
||||
SubstrateSignerDb::<D>::attempt(&mut txn, &id);
|
||||
txn.commit();
|
||||
SubstrateSignerDb::<D>::attempt(txn, &id);
|
||||
|
||||
// b"substrate" is a literal from sp-core
|
||||
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), self.keys.clone());
|
||||
|
@ -201,8 +199,8 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
));
|
||||
}
|
||||
|
||||
pub async fn sign(&mut self, batch: Batch) {
|
||||
if self.db.completed(batch.block.0) {
|
||||
pub async fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) {
|
||||
if SubstrateSignerDb::<D>::completed(txn, batch.block.0) {
|
||||
debug!("Sign batch order for ID we've already completed signing");
|
||||
// See batch_signed for commentary on why this simply returns
|
||||
return;
|
||||
|
@ -210,10 +208,10 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
|
||||
let id = batch.block.0;
|
||||
self.signable.insert(id, batch);
|
||||
self.attempt(id, 0).await;
|
||||
self.attempt(txn, id, 0).await;
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self, msg: CoordinatorMessage) {
|
||||
pub async fn handle(&mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage) {
|
||||
match msg {
|
||||
CoordinatorMessage::BatchPreprocesses { id, mut preprocesses } => {
|
||||
if self.verify_id(&id).is_err() {
|
||||
|
@ -302,10 +300,8 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
SignedBatch { batch: self.signable.remove(&id.id).unwrap(), signature: sig.into() };
|
||||
|
||||
// Save the batch in case it's needed for recovery
|
||||
let mut txn = self.db.0.txn();
|
||||
SubstrateSignerDb::<D>::save_batch(&mut txn, &batch);
|
||||
SubstrateSignerDb::<D>::complete(&mut txn, id.id);
|
||||
txn.commit();
|
||||
SubstrateSignerDb::<D>::save_batch(txn, &batch);
|
||||
SubstrateSignerDb::<D>::complete(txn, id.id);
|
||||
|
||||
// Stop trying to sign for this batch
|
||||
assert!(self.attempt.remove(&id.id).is_some());
|
||||
|
@ -316,16 +312,14 @@ impl<D: Db> SubstrateSigner<D> {
|
|||
}
|
||||
|
||||
CoordinatorMessage::BatchReattempt { id } => {
|
||||
self.attempt(id.id, id.attempt).await;
|
||||
self.attempt(txn, id.id, id.attempt).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn batch_signed(&mut self, block: BlockHash) {
|
||||
pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, block: BlockHash) {
|
||||
// Stop trying to sign for this batch
|
||||
let mut txn = self.db.0.txn();
|
||||
SubstrateSignerDb::<D>::complete(&mut txn, block.0);
|
||||
txn.commit();
|
||||
SubstrateSignerDb::<D>::complete(txn, block.0);
|
||||
|
||||
self.signable.remove(&block.0);
|
||||
self.attempt.remove(&block.0);
|
||||
|
|
|
@ -7,7 +7,7 @@ use frost::{Participant, ThresholdKeys};
|
|||
|
||||
use tokio::time::timeout;
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, MemDb};
|
||||
|
||||
use crate::{
|
||||
Plan, Db,
|
||||
|
@ -78,10 +78,12 @@ pub async fn test_addresses<C: Coin>(coin: C) {
|
|||
coin.mine_block().await;
|
||||
}
|
||||
|
||||
let db = MemDb::new();
|
||||
let mut db = MemDb::new();
|
||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
||||
assert!(active_keys.is_empty());
|
||||
scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await;
|
||||
let mut txn = db.txn();
|
||||
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
|
||||
txn.commit();
|
||||
|
||||
// Receive funds to the branch address and make sure it's properly identified
|
||||
let block_id = coin.test_send(C::branch_address(key)).await.id();
|
||||
|
|
|
@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
|
|||
use group::GroupEncoding;
|
||||
use frost::{Participant, ThresholdParams, tests::clone_without};
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{MONERO_NET_ID, BlockHash},
|
||||
|
@ -31,19 +31,24 @@ pub async fn test_key_gen<C: Coin>() {
|
|||
let mut entropy = Zeroizing::new([0; 32]);
|
||||
OsRng.fill_bytes(entropy.as_mut());
|
||||
entropies.insert(i, entropy);
|
||||
dbs.insert(i, MemDb::new());
|
||||
key_gens.insert(i, KeyGen::<C, _>::new(dbs[&i].clone(), entropies[&i].clone()));
|
||||
let db = MemDb::new();
|
||||
dbs.insert(i, db.clone());
|
||||
key_gens.insert(i, KeyGen::<C, MemDb>::new(db, entropies[&i].clone()));
|
||||
}
|
||||
|
||||
let mut all_commitments = HashMap::new();
|
||||
for i in 1 ..= 5 {
|
||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||
if let ProcessorMessage::Commitments { id, commitments } = key_gen
|
||||
.handle(CoordinatorMessage::GenerateKey {
|
||||
id: ID,
|
||||
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
|
||||
.unwrap(),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::GenerateKey {
|
||||
id: ID,
|
||||
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
|
||||
.unwrap(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
assert_eq!(id, ID);
|
||||
|
@ -51,27 +56,32 @@ pub async fn test_key_gen<C: Coin>() {
|
|||
} else {
|
||||
panic!("didn't get commitments back");
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// 1 is rebuilt on every step
|
||||
// 2 is rebuilt here
|
||||
// 3 ... are rebuilt once, one at each of the following steps
|
||||
let rebuild = |key_gens: &mut HashMap<_, _>, i| {
|
||||
let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| {
|
||||
key_gens.remove(&i);
|
||||
key_gens.insert(i, KeyGen::<C, _>::new(dbs[&i].clone(), entropies[&i].clone()));
|
||||
};
|
||||
rebuild(&mut key_gens, 1);
|
||||
rebuild(&mut key_gens, 2);
|
||||
rebuild(&mut key_gens, &dbs, 1);
|
||||
rebuild(&mut key_gens, &dbs, 2);
|
||||
|
||||
let mut all_shares = HashMap::new();
|
||||
for i in 1 ..= 5 {
|
||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||
if let ProcessorMessage::Shares { id, shares } = key_gen
|
||||
.handle(CoordinatorMessage::Commitments {
|
||||
id: ID,
|
||||
commitments: clone_without(&all_commitments, &i),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::Commitments {
|
||||
id: ID,
|
||||
commitments: clone_without(&all_commitments, &i),
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
assert_eq!(id, ID);
|
||||
|
@ -79,24 +89,29 @@ pub async fn test_key_gen<C: Coin>() {
|
|||
} else {
|
||||
panic!("didn't get shares back");
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// Rebuild 1 and 3
|
||||
rebuild(&mut key_gens, 1);
|
||||
rebuild(&mut key_gens, 3);
|
||||
rebuild(&mut key_gens, &dbs, 1);
|
||||
rebuild(&mut key_gens, &dbs, 3);
|
||||
|
||||
let mut res = None;
|
||||
for i in 1 ..= 5 {
|
||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } = key_gen
|
||||
.handle(CoordinatorMessage::Shares {
|
||||
id: ID,
|
||||
shares: all_shares
|
||||
.iter()
|
||||
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
|
||||
.collect(),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::Shares {
|
||||
id: ID,
|
||||
shares: all_shares
|
||||
.iter()
|
||||
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
|
||||
.collect(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
assert_eq!(id, ID);
|
||||
|
@ -107,17 +122,25 @@ pub async fn test_key_gen<C: Coin>() {
|
|||
} else {
|
||||
panic!("didn't get key back");
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// Rebuild 1 and 4
|
||||
rebuild(&mut key_gens, 1);
|
||||
rebuild(&mut key_gens, 4);
|
||||
rebuild(&mut key_gens, &dbs, 1);
|
||||
rebuild(&mut key_gens, &dbs, 4);
|
||||
|
||||
for i in 1 ..= 5 {
|
||||
let key_gen = key_gens.get_mut(&i).unwrap();
|
||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||
let KeyConfirmed { activation_block, substrate_keys, coin_keys } = key_gen
|
||||
.confirm(SubstrateContext { coin_latest_finalized_block: BlockHash([0x11; 32]) }, ID)
|
||||
.confirm(
|
||||
&mut txn,
|
||||
SubstrateContext { coin_latest_finalized_block: BlockHash([0x11; 32]) },
|
||||
ID,
|
||||
)
|
||||
.await;
|
||||
txn.commit();
|
||||
|
||||
assert_eq!(activation_block, BlockHash([0x11; 32]));
|
||||
let params =
|
||||
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();
|
||||
|
|
|
@ -9,7 +9,7 @@ use tokio::time::timeout;
|
|||
|
||||
use serai_client::primitives::BlockHash;
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
use crate::{
|
||||
coins::{OutputType, Output, Block, Coin},
|
||||
|
@ -20,6 +20,7 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
|||
let mut keys =
|
||||
frost::tests::key_gen::<_, C::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap();
|
||||
C::tweak_keys(&mut keys);
|
||||
let group_key = keys.group_key();
|
||||
|
||||
// Mine blocks so there's a confirmed block
|
||||
for _ in 0 .. C::CONFIRMATIONS {
|
||||
|
@ -30,11 +31,14 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
|||
let activation_number = coin.get_latest_block_number().await.unwrap();
|
||||
let db = MemDb::new();
|
||||
let new_scanner = || async {
|
||||
let mut db = db.clone();
|
||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
||||
let mut first = first.lock().unwrap();
|
||||
if *first {
|
||||
assert!(active_keys.is_empty());
|
||||
scanner.rotate_key(activation_number, keys.group_key()).await;
|
||||
let mut txn = db.txn();
|
||||
scanner.rotate_key(&mut txn, activation_number, group_key).await;
|
||||
txn.commit();
|
||||
*first = false;
|
||||
} else {
|
||||
assert_eq!(active_keys.len(), 1);
|
||||
|
@ -83,7 +87,14 @@ pub async fn test_scanner<C: Coin>(coin: C) {
|
|||
}
|
||||
curr_block += 1;
|
||||
}
|
||||
assert_eq!(scanner.ack_up_to_block(keys.group_key(), block_id).await, (blocks, outputs));
|
||||
|
||||
let mut cloned_db = db.clone();
|
||||
let mut txn = cloned_db.txn();
|
||||
assert_eq!(
|
||||
scanner.ack_up_to_block(&mut txn, keys.group_key(), block_id).await,
|
||||
(blocks, outputs)
|
||||
);
|
||||
txn.commit();
|
||||
|
||||
// There should be no more events
|
||||
assert!(timeout(Duration::from_secs(30), scanner.events.recv()).await.is_err());
|
||||
|
|
|
@ -8,7 +8,7 @@ use frost::{
|
|||
dkg::tests::{key_gen, clone_without},
|
||||
};
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
use messages::sign::*;
|
||||
use crate::{
|
||||
|
@ -39,19 +39,23 @@ pub async fn sign<C: Coin>(
|
|||
}
|
||||
|
||||
let mut signers = HashMap::new();
|
||||
let mut dbs = HashMap::new();
|
||||
let mut t = 0;
|
||||
for i in 1 ..= keys.len() {
|
||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||
let keys = keys.remove(&i).unwrap();
|
||||
t = keys.params().t();
|
||||
signers.insert(i, Signer::new(MemDb::new(), coin.clone(), keys));
|
||||
signers.insert(i, Signer::<_, MemDb>::new(coin.clone(), keys));
|
||||
dbs.insert(i, MemDb::new());
|
||||
}
|
||||
drop(keys);
|
||||
|
||||
for i in 1 ..= signers.len() {
|
||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||
let (tx, eventuality) = txs.remove(&i).unwrap();
|
||||
signers.get_mut(&i).unwrap().sign_transaction(actual_id.id, tx, eventuality).await;
|
||||
let mut txn = dbs.get_mut(&i).unwrap().txn();
|
||||
signers.get_mut(&i).unwrap().sign_transaction(&mut txn, actual_id.id, tx, eventuality).await;
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
let mut signing_set = vec![];
|
||||
|
@ -84,14 +88,20 @@ pub async fn sign<C: Coin>(
|
|||
|
||||
let mut shares = HashMap::new();
|
||||
for i in &signing_set {
|
||||
let mut txn = dbs.get_mut(i).unwrap().txn();
|
||||
signers
|
||||
.get_mut(i)
|
||||
.unwrap()
|
||||
.handle(CoordinatorMessage::Preprocesses {
|
||||
id: actual_id.clone(),
|
||||
preprocesses: clone_without(&preprocesses, i),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::Preprocesses {
|
||||
id: actual_id.clone(),
|
||||
preprocesses: clone_without(&preprocesses, i),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
txn.commit();
|
||||
|
||||
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, share }) =
|
||||
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
||||
{
|
||||
|
@ -104,14 +114,17 @@ pub async fn sign<C: Coin>(
|
|||
|
||||
let mut tx_id = None;
|
||||
for i in &signing_set {
|
||||
let mut txn = dbs.get_mut(i).unwrap().txn();
|
||||
signers
|
||||
.get_mut(i)
|
||||
.unwrap()
|
||||
.handle(CoordinatorMessage::Shares {
|
||||
id: actual_id.clone(),
|
||||
shares: clone_without(&shares, i),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) },
|
||||
)
|
||||
.await;
|
||||
txn.commit();
|
||||
|
||||
if let SignerEvent::SignedTransaction { id, tx } =
|
||||
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
||||
{
|
||||
|
|
|
@ -12,7 +12,7 @@ use frost::{
|
|||
use scale::Encode;
|
||||
use sp_application_crypto::{RuntimePublic, sr25519::Public};
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
use serai_client::{primitives::*, in_instructions::primitives::*};
|
||||
|
||||
|
@ -49,14 +49,21 @@ async fn test_substrate_signer() {
|
|||
};
|
||||
|
||||
let mut signers = HashMap::new();
|
||||
let mut dbs = HashMap::new();
|
||||
let mut t = 0;
|
||||
for i in 1 ..= keys.len() {
|
||||
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
|
||||
let keys = keys.remove(&i).unwrap();
|
||||
t = keys.params().t();
|
||||
let mut signer = SubstrateSigner::new(MemDb::new(), keys);
|
||||
signer.sign(batch.clone()).await;
|
||||
|
||||
let mut signer = SubstrateSigner::<MemDb>::new(keys);
|
||||
let mut db = MemDb::new();
|
||||
let mut txn = db.txn();
|
||||
signer.sign(&mut txn, batch.clone()).await;
|
||||
txn.commit();
|
||||
|
||||
signers.insert(i, signer);
|
||||
dbs.insert(i, db);
|
||||
}
|
||||
drop(keys);
|
||||
|
||||
|
@ -92,14 +99,20 @@ async fn test_substrate_signer() {
|
|||
|
||||
let mut shares = HashMap::new();
|
||||
for i in &signing_set {
|
||||
let mut txn = dbs.get_mut(i).unwrap().txn();
|
||||
signers
|
||||
.get_mut(i)
|
||||
.unwrap()
|
||||
.handle(CoordinatorMessage::BatchPreprocesses {
|
||||
id: actual_id.clone(),
|
||||
preprocesses: clone_without(&preprocesses, i),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::BatchPreprocesses {
|
||||
id: actual_id.clone(),
|
||||
preprocesses: clone_without(&preprocesses, i),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
txn.commit();
|
||||
|
||||
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare { id, share }) =
|
||||
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
||||
{
|
||||
|
@ -111,14 +124,19 @@ async fn test_substrate_signer() {
|
|||
}
|
||||
|
||||
for i in &signing_set {
|
||||
let mut txn = dbs.get_mut(i).unwrap().txn();
|
||||
signers
|
||||
.get_mut(i)
|
||||
.unwrap()
|
||||
.handle(CoordinatorMessage::BatchShares {
|
||||
id: actual_id.clone(),
|
||||
shares: clone_without(&shares, i),
|
||||
})
|
||||
.handle(
|
||||
&mut txn,
|
||||
CoordinatorMessage::BatchShares {
|
||||
id: actual_id.clone(),
|
||||
shares: clone_without(&shares, i),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
txn.commit();
|
||||
|
||||
if let SubstrateSignerEvent::SignedBatch(signed_batch) =
|
||||
signers.get_mut(i).unwrap().events.pop_front().unwrap()
|
||||
|
|
|
@ -6,7 +6,7 @@ use frost::{Participant, dkg::tests::key_gen};
|
|||
|
||||
use tokio::time::timeout;
|
||||
|
||||
use serai_db::MemDb;
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
use crate::{
|
||||
Payment, Plan,
|
||||
|
@ -24,10 +24,13 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||
}
|
||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), MemDb::new());
|
||||
let mut db = MemDb::new();
|
||||
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
|
||||
assert!(active_keys.is_empty());
|
||||
let (block_id, outputs) = {
|
||||
scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await;
|
||||
let mut txn = db.txn();
|
||||
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
|
||||
txn.commit();
|
||||
|
||||
let block = coin.test_send(C::address(key)).await;
|
||||
let block_id = block.id();
|
||||
|
@ -114,8 +117,10 @@ pub async fn test_wallet<C: Coin>(coin: C) {
|
|||
}
|
||||
|
||||
// Check the Scanner DB can reload the outputs
|
||||
let mut txn = db.txn();
|
||||
assert_eq!(
|
||||
scanner.ack_up_to_block(key, block.id()).await.1,
|
||||
scanner.ack_up_to_block(&mut txn, key, block.id()).await.1,
|
||||
[first_outputs, outputs].concat().to_vec()
|
||||
);
|
||||
txn.commit();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue