Use a single Substrate signer, per intentions in #227

Removes key from Update as well, since it's no longer variable.
This commit is contained in:
Luke Parker 2023-08-24 20:30:50 -04:00
parent b91bd44476
commit bccdabb53d
No known key found for this signature in database
8 changed files with 33 additions and 47 deletions

View file

@ -569,7 +569,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
}
},
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
processor_messages::substrate::ProcessorMessage::Update { key: _, batch } => {
processor_messages::substrate::ProcessorMessage::Update { batch } => {
assert_eq!(
batch.batch.network, msg.network,
"processor sent us a batch for a different network than it was for",

View file

@ -176,7 +176,7 @@ pub mod substrate {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
pub enum ProcessorMessage {
Update { key: [u8; 32], batch: SignedBatch },
Update { batch: SignedBatch },
}
}

View file

@ -157,8 +157,8 @@ struct TributaryMutable<N: Network, D: Db> {
// Substrate may mark tasks as completed, invalidating any existing mutable borrows.
// The safety of this follows as written above.
// TODO: There should only be one SubstrateSigner at a time (see #277)
substrate_signers: HashMap<[u8; 32], SubstrateSigner<D>>,
// There should only be one SubstrateSigner at a time (see #277)
substrate_signer: Option<SubstrateSigner<D>>,
}
// Items which are mutably borrowed by Substrate.
@ -308,7 +308,9 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
}
CoordinatorMessage::Coordinator(msg) => {
tributary_mutable.substrate_signers.get_mut(msg.key()).unwrap().handle(txn, msg).await;
if let Some(substrate_signer) = tributary_mutable.substrate_signer.as_mut() {
substrate_signer.handle(txn, msg).await;
}
}
CoordinatorMessage::Substrate(msg) => {
@ -317,7 +319,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// This is the first key pair for this network so no block has been finalized yet
let activation_number = if context.network_latest_finalized_block.0 == [0; 32] {
assert!(tributary_mutable.signers.is_empty());
assert!(tributary_mutable.substrate_signers.is_empty());
assert!(tributary_mutable.substrate_signer.is_none());
assert!(substrate_mutable.schedulers.is_empty());
// Wait until a network's block's time exceeds Serai's time
@ -373,10 +375,9 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// See TributaryMutable's struct definition for why this block is safe
let KeyConfirmed { substrate_keys, network_keys } =
tributary_mutable.key_gen.confirm(txn, set, key_pair).await;
tributary_mutable.substrate_signers.insert(
substrate_keys.group_key().to_bytes(),
SubstrateSigner::new(N::NETWORK, substrate_keys),
);
// TODO2: Don't immediately set this, set it once it's active
tributary_mutable.substrate_signer =
Some(SubstrateSigner::new(N::NETWORK, substrate_keys));
let key = network_keys.group_key();
@ -408,9 +409,9 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// We now have to acknowledge every block for this key up to the acknowledged block
let outputs = substrate_mutable.scanner.ack_up_to_block(txn, key, block_id).await;
// Since this block was acknowledged, we no longer have to sign the batch for it
for batch_id in batches {
for (_, signer) in tributary_mutable.substrate_signers.iter_mut() {
signer.batch_signed(txn, batch_id);
if let Some(substrate_signer) = tributary_mutable.substrate_signer.as_mut() {
for batch_id in batches {
substrate_signer.batch_signed(txn, batch_id);
}
}
@ -506,7 +507,7 @@ async fn boot<N: Network, D: Db>(
let (mut scanner, active_keys) = Scanner::new(network.clone(), raw_db.clone());
let mut schedulers = HashMap::<Vec<u8>, Scheduler<N>>::new();
let mut substrate_signers = HashMap::new();
let mut substrate_signer = None;
let mut signers = HashMap::new();
let main_db = MainDb::new(raw_db.clone());
@ -516,11 +517,10 @@ async fn boot<N: Network, D: Db>(
let (substrate_keys, network_keys) = key_gen.keys(key);
let substrate_key = substrate_keys.group_key();
let substrate_signer = SubstrateSigner::new(N::NETWORK, substrate_keys);
// We don't have to load any state for this since the Scanner will re-fire any events
// necessary
substrate_signers.insert(substrate_key.to_bytes(), substrate_signer);
// TODO2: This uses most recent as signer, use the active one
substrate_signer = Some(SubstrateSigner::new(N::NETWORK, substrate_keys));
let mut signer = Signer::new(network.clone(), network_keys);
@ -554,7 +554,7 @@ async fn boot<N: Network, D: Db>(
(
main_db,
TributaryMutable { key_gen, substrate_signers, signers },
TributaryMutable { key_gen, substrate_signer, signers },
SubstrateMutable { scanner, schedulers },
)
}
@ -603,7 +603,7 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
}
}
for (key, signer) in tributary_mutable.substrate_signers.iter_mut() {
if let Some(signer) = tributary_mutable.substrate_signer.as_mut() {
while let Some(msg) = signer.events.pop_front() {
match msg {
SubstrateSignerEvent::ProcessorMessage(msg) => {
@ -612,7 +612,6 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
SubstrateSignerEvent::SignedBatch(batch) => {
coordinator
.send(ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Update {
key: *key,
batch,
}))
.await;
@ -665,7 +664,7 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
let mut txn = raw_db.txn();
match msg.unwrap() {
ScannerEvent::Block { key, block, outputs } => {
ScannerEvent::Block { block, outputs } => {
let mut block_hash = [0; 32];
block_hash.copy_from_slice(block.as_ref());
// TODO: Move this out from Scanner now that the Scanner no longer handles batches
@ -735,13 +734,11 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
for batch in batches {
info!("created batch {} ({} instructions)", batch.id, batch.instructions.len());
// TODO: Don't reload both sets of keys in full just to get the Substrate public key
tributary_mutable
.substrate_signers
.get_mut(tributary_mutable.key_gen.keys(&key).0.group_key().to_bytes().as_slice())
.unwrap()
.sign(&mut txn, batch)
.await;
if let Some(substrate_signer) = tributary_mutable.substrate_signer.as_mut() {
substrate_signer
.sign(&mut txn, batch)
.await;
}
}
},

View file

@ -22,11 +22,7 @@ use crate::{
#[derive(Clone, Debug)]
pub enum ScannerEvent<N: Network> {
// Block scanned
Block {
key: <N::Curve as Ciphersuite>::G,
block: <N::Block as Block<N>>::Id,
outputs: Vec<N::Output>,
},
Block { block: <N::Block as Block<N>>::Id, outputs: Vec<N::Output> },
// Eventuality completion found on-chain
Completed([u8; 32], <N::Transaction as Transaction<N>>::Id),
}
@ -507,7 +503,8 @@ impl<N: Network, D: Db> Scanner<N, D> {
txn.commit();
// Send all outputs
if !scanner.emit(ScannerEvent::Block { key, block: block_id, outputs }) {
// TODO2: Fire this with all outputs for all keys, not for each key
if !scanner.emit(ScannerEvent::Block { block: block_id, outputs }) {
return;
}
// Write this number as scanned so we won't re-fire these outputs

View file

@ -51,8 +51,7 @@ async fn spend<N: Network, D: Db>(
network.mine_block().await;
}
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
ScannerEvent::Block { key: this_key, block: _, outputs } => {
assert_eq!(this_key, key);
ScannerEvent::Block { block: _, outputs } => {
assert_eq!(outputs.len(), 1);
// Make sure this is actually a change output
assert_eq!(outputs[0].kind(), OutputType::Change);
@ -89,8 +88,7 @@ pub async fn test_addresses<N: Network>(network: N) {
// Verify the Scanner picked them up
let outputs =
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
ScannerEvent::Block { key: this_key, block, outputs } => {
assert_eq!(this_key, key);
ScannerEvent::Block { block, outputs } => {
assert_eq!(block, block_id);
assert_eq!(outputs.len(), 1);
assert_eq!(outputs[0].kind(), OutputType::Branch);

View file

@ -55,8 +55,7 @@ pub async fn test_scanner<N: Network>(network: N) {
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async {
let outputs =
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
ScannerEvent::Block { key, block, outputs } => {
assert_eq!(key, keys.group_key());
ScannerEvent::Block { block, outputs } => {
assert_eq!(block, block_id);
assert_eq!(outputs.len(), 1);
assert_eq!(outputs[0].kind(), OutputType::External);

View file

@ -36,8 +36,7 @@ pub async fn test_wallet<N: Network>(network: N) {
let block_id = block.id();
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
ScannerEvent::Block { key: this_key, block, outputs } => {
assert_eq!(this_key, key);
ScannerEvent::Block { block, outputs } => {
assert_eq!(block, block_id);
assert_eq!(outputs.len(), 1);
(block_id, outputs)
@ -109,8 +108,7 @@ pub async fn test_wallet<N: Network>(network: N) {
}
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
ScannerEvent::Block { key: this_key, block: block_id, outputs: these_outputs } => {
assert_eq!(this_key, key);
ScannerEvent::Block { block: block_id, outputs: these_outputs } => {
assert_eq!(block_id, block.id());
assert_eq!(these_outputs, outputs);
}

View file

@ -122,11 +122,8 @@ pub(crate) async fn sign_batch(
if preprocesses.contains_key(&i) {
match coordinator.recv_message().await {
messages::ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Update {
key,
batch: this_batch,
}) => {
assert_eq!(key.as_slice(), &id.key);
if batch.is_none() {
assert!(PublicKey::from_raw(id.key.clone().try_into().unwrap())
.verify(&batch_message(&this_batch.batch), &this_batch.signature));