Correct a couple years of accumulated typos

This commit is contained in:
Luke Parker 2023-12-17 02:06:51 -05:00
parent 9c3329abeb
commit c2fffb9887
No known key found for this signature in database
40 changed files with 63 additions and 63 deletions

View file

@ -84,7 +84,7 @@ impl Rpc {
for line in res.split('\n') {
// This doesn't check if the arguments are as expected
// This is due to Bitcoin supporting a large amount of optional arguments, which
// occassionally change, with their own mechanism of text documentation, making matching off
// occasionally change, with their own mechanism of text documentation, making matching off
// it a quite involved task
// Instead, once we've confirmed the methods are present, we assume our arguments are aligned
// Else we'll error at time of call

View file

@ -186,7 +186,7 @@ impl SignableTransaction {
// src/policy/policy.cpp#L295-L298
// implements this as expected
// Technically, it takes whatever's greater, the weight or the amount of signature operatons
// Technically, it takes whatever's greater, the weight or the amount of signature operations
// multiplied by DEFAULT_BYTES_PER_SIGOP (20)
// We only use 1 signature per input, and our inputs have a weight exceeding 20
// Accordingly, our inputs' weight will always be greater than the cost of the signature ops

View file

@ -397,7 +397,7 @@ impl Scanner {
}
let subaddress = *subaddress.unwrap();
// If it has torsion, it'll substract the non-torsioned shared key to a torsioned key
// If it has torsion, it'll subtract the non-torsioned shared key to a torsioned key
// We will not have a torsioned key in our HashMap of keys, so we wouldn't identify it as
// ours
// If we did though, it'd enable bypassing the included burning bug protection

View file

@ -22,7 +22,7 @@ pub fn serai_db_key(
///
/// * `db_name` - A database name
/// * `field_name` - An item name
/// * `args` - Comma seperated list of key arguments
/// * `args` - Comma separated list of key arguments
/// * `field_type` - The return type
///
/// # Example

View file

@ -6,7 +6,7 @@ use std::{
use crate::*;
/// An atomic operation for the in-memory databae.
/// An atomic operation for the in-memory database.
#[must_use]
#[derive(PartialEq, Eq, Debug)]
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);

View file

@ -140,7 +140,7 @@ async fn potentially_cosign_block(
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
// trigger a cosigning protocol covering it
// This means there will be the maximum delay allowed from a block needing cosigning occuring
// This means there will be the maximum delay allowed from a block needing cosigning occurring
// and a cosign for it triggering
let maximally_latent_cosign_block =
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);

View file

@ -42,7 +42,7 @@ pub fn dkg_confirmation_nonces(
.preprocess()
}
// If there's an error generating a key pair, return any errors which would've occured when
// If there's an error generating a key pair, return any errors which would've occurred when
// executing the DkgConfirmer in order to stay in sync with those who did.
//
// The caller must ensure only error_generating_key_pair or generated_key_pair is called for a

View file

@ -38,7 +38,7 @@
only way to operate on distinct received messages would be if:
1) A logical flaw exists, letting new messages over write prior messages
2) A reorganization occured from chain A to chain B, and with it, different messages
2) A reorganization occurred from chain A to chain B, and with it, different messages
Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While
a significant amount of processes may be byzantine, leading to BFT being broken, that still will

View file

@ -74,7 +74,7 @@ fn invalid_block() {
assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());
}
// Mutate tranactions merkle
// Mutate transactions merkle
{
let mut block = block;
block.header.transactions = Blake2s256::digest(block.header.transactions).into();

View file

@ -52,7 +52,7 @@ pub trait Ciphersuite:
/// Group element type.
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq;
/// Hash algorithm used with this curve.
// Requires BlockSizeUser so it can be used within Hkdf which requies that.
// Requires BlockSizeUser so it can be used within Hkdf which requires that.
type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest;
/// ID for this curve.

View file

@ -222,7 +222,7 @@ impl FieldElement {
FieldElement(reduce(U512::from(value.mul_wide(&value))))
}
/// Perform an exponentation.
/// Perform an exponentiation.
pub fn pow(&self, other: FieldElement) -> FieldElement {
let mut table = [FieldElement::ONE; 16];
table[1] = *self;

View file

@ -118,7 +118,7 @@ fn cipher<C: Ciphersuite>(context: &str, ecdh: &Zeroizing<C::G>) -> ChaCha20 {
zeroize(challenge.as_mut());
// Since the key is single-use, it doesn't matter what we use for the IV
// The isssue is key + IV reuse. If we never reuse the key, we can't have the opportunity to
// The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to
// reuse a nonce
// Use a static IV in acknowledgement of this
let mut iv = Cc20Iv::default();

View file

@ -20,7 +20,7 @@ pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
let mut res1 = F0::ZERO;
let mut res2 = F1::ZERO;
// Uses the bits API to ensure a consistent endianess
// Uses the bits API to ensure a consistent endianness
let mut bits = scalar.to_le_bits();
scalar.zeroize();
// Convert it to big endian

View file

@ -28,7 +28,7 @@ mod tests;
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// From here, there are three ways to get a scalar under the ff/group API
// 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge")))
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness
// and loading it in
// 3: Iterating over each byte and manually doubling/adding. This is simplest

View file

@ -139,7 +139,7 @@ macro_rules! field {
}
impl $FieldName {
/// Perform an exponentation.
/// Perform an exponentiation.
pub fn pow(&self, other: $FieldName) -> $FieldName {
let mut table = [Self(Residue::ONE); 16];
table[1] = *self;

View file

@ -51,7 +51,7 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
/// Read an addendum from a reader.
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<Self::Addendum>;
/// Proccess the addendum for the specified participant. Guaranteed to be called in order.
/// Process the addendum for the specified participant. Guaranteed to be called in order.
fn process_addendum(
&mut self,
params: &ThresholdView<C>,

View file

@ -43,7 +43,7 @@ pub struct Vectors {
}
// Vectors are expected to be formatted per the IETF proof of concept
// The included vectors are direcly from
// The included vectors are directly from
// https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-14/poc
#[cfg(test)]
impl From<serde_json::Value> for Vectors {

View file

@ -1,7 +1,7 @@
[package]
name = "multiexp"
version = "0.4.0"
description = "Multiexponentation algorithms for ff/group"
description = "Multiexponentiation algorithms for ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/multiexp"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]

View file

@ -2,7 +2,7 @@
A multiexp implementation for ff/group implementing Straus and Pippenger. A
batch verification API is also available via the "batch" feature, which enables
secure multiexponentation batch verification given a series of values which
secure multiexponentiation batch verification given a series of values which
should sum to the identity, identifying which doesn't via binary search if they
don't.

View file

@ -173,7 +173,7 @@ fn algorithm(len: usize) -> Algorithm {
}
}
/// Performs a multiexponentation, automatically selecting the optimal algorithm based on the
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
/// amount of pairs.
pub fn multiexp<G: Group>(pairs: &[(G::Scalar, G)]) -> G
where
@ -188,7 +188,7 @@ where
}
}
/// Performs a multiexponentation in variable time, automatically selecting the optimal algorithm
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
/// based on the amount of pairs.
pub fn multiexp_vartime<G: Group>(pairs: &[(G::Scalar, G)]) -> G
where

View file

@ -5,7 +5,7 @@ use group::Group;
use crate::prep_bits;
// Pippenger's algorithm for multiexponentation, as published in the SIAM Journal on Computing
// Pippenger's algorithm for multiexponentiation, as published in the SIAM Journal on Computing
// DOI: 10.1137/0209022
pub(crate) fn pippenger<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G
where

View file

@ -22,7 +22,7 @@ fn prep_tables<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<G>> {
tables
}
// Straus's algorithm for multiexponentation, as published in The American Mathematical Monthly
// Straus's algorithm for multiexponentiation, as published in The American Mathematical Monthly
// DOI: 10.2307/2310929
pub(crate) fn straus<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G
where

View file

@ -83,7 +83,7 @@ impl<C: Ciphersuite> SchnorrSignature<C> {
}
/// Return the series of pairs whose products sum to zero for a valid signature.
/// This is inteded to be used with a multiexp.
/// This is intended to be used with a multiexp.
pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C::F, C::G); 3] {
// s = r + ca
// sG == R + cA

View file

@ -61,7 +61,7 @@ pub fn ack_challenge(
id: u64,
nonce: <Ristretto as Ciphersuite>::G,
) -> <Ristretto as Ciphersuite>::F {
let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Ackowledgement");
let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Acknowledgement");
transcript.domain_separate(b"metadata");
transcript.append_message(b"to", borsh::to_vec(&to).unwrap());
transcript.append_message(b"to_key", to_key.to_bytes());

View file

@ -65,8 +65,8 @@ USER coordinator
WORKDIR /home/coordinator
# Copy the Coordinator binary and relevant license
COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/
COPY --from=builder --chown=processsor /serai/AGPL-3.0 .
COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/
COPY --from=builder --chown=processor /serai/AGPL-3.0 .
# Run coordinator
CMD ["serai-coordinator"]

View file

@ -8,8 +8,8 @@ USER coordinator
WORKDIR /home/coordinator
# Copy the Coordinator binary and relevant license
COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/
COPY --from=builder --chown=processsor /serai/AGPL-3.0 .
COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/
COPY --from=builder --chown=processor /serai/AGPL-3.0 .
# Run coordinator
CMD ["serai-coordinator"]

View file

@ -8,8 +8,8 @@ USER processor
WORKDIR /home/processor
# Copy the Processor binary and relevant license
COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processsor /serai/AGPL-3.0 .
COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processor /serai/AGPL-3.0 .
# Run processor
CMD ["serai-processor"]

View file

@ -65,8 +65,8 @@ USER processor
WORKDIR /home/processor
# Copy the Processor binary and relevant license
COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processsor /serai/AGPL-3.0 .
COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processor /serai/AGPL-3.0 .
# Run processor
CMD ["serai-processor"]

View file

@ -65,8 +65,8 @@ USER processor
WORKDIR /home/processor
# Copy the Processor binary and relevant license
COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processsor /serai/AGPL-3.0 .
COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/
COPY --from=builder --chown=processor /serai/AGPL-3.0 .
# Run processor
CMD ["serai-processor"]

View file

@ -306,7 +306,7 @@ impl_from!(substrate, ProcessorMessage, Substrate);
// Intent generation code
const COORDINATOR_UID: u8 = 0;
const PROCESSSOR_UID: u8 = 1;
const PROCESSOR_UID: u8 = 1;
const TYPE_KEY_GEN_UID: u8 = 2;
const TYPE_SIGN_UID: u8 = 3;
@ -401,7 +401,7 @@ impl ProcessorMessage {
key_gen::ProcessorMessage::Blame { id, .. } => (5, id),
};
let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub];
let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub];
res.extend(&id.encode());
res
}
@ -415,7 +415,7 @@ impl ProcessorMessage {
sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()),
};
let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub];
let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub];
res.extend(&id);
res
}
@ -430,7 +430,7 @@ impl ProcessorMessage {
coordinator::ProcessorMessage::CosignedBlock { block, .. } => (5, block.encode()),
};
let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub];
let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];
res.extend(&id);
res
}
@ -443,7 +443,7 @@ impl ProcessorMessage {
}
};
let mut res = vec![PROCESSSOR_UID, TYPE_SUBSTRATE_UID, sub];
let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub];
res.extend(&id);
res
}

View file

@ -572,7 +572,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
.unwrap()
.blame(accuser, accused, network_share, network_blame);
// If thw accused was blamed for either, mark them as at fault
// If the accused was blamed for either, mark them as at fault
if (substrate_blame == accused) || (network_blame == accused) {
return ProcessorMessage::Blame { id, participant: accused };
}

View file

@ -547,7 +547,7 @@ impl<N: Network, D: Db> Scanner<N, D> {
let key_vec = key.to_bytes().as_ref().to_vec();
// TODO: These lines are the ones which will cause a really long-lived lock acquisiton
// TODO: These lines are the ones which will cause a really long-lived lock acquisition
for output in network.get_outputs(&block, key).await {
assert_eq!(output.key(), key);
if output.balance().amount.0 >= N::DUST {

View file

@ -18,7 +18,7 @@ pub struct Scheduler<N: Network> {
key: <N::Curve as Ciphersuite>::G,
coin: Coin,
// Serai, when it has more outputs expected than it can handle in a single tranaction, will
// Serai, when it has more outputs expected than it can handle in a single transaction, will
// schedule the outputs to be handled later. Immediately, it just creates additional outputs
// which will eventually handle those outputs
//
@ -321,7 +321,7 @@ impl<N: Network> Scheduler<N> {
// If we don't have UTXOs available, don't try to continue
if self.utxos.is_empty() {
log::info!("no utxos currently avilable");
log::info!("no utxos currently available");
return plans;
}

View file

@ -507,14 +507,14 @@ impl Network for Bitcoin {
// The output should be ~36 bytes, or 144 weight units
// The overhead should be ~20 bytes at most, or 80 weight units
// 684 weight units, 171 vbytes, round up to 200
// 200 vbytes at 1 sat/weight (our current minumum fee, 4 sat/vbyte) = 800 sat fee for the
// 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the
// aggregation TX
const COST_TO_AGGREGATE: u64 = 800;
// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT)
// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes
// While our inputs are entirely SegWit, such fine tuning is not necessary and could create
// issues in the future (if the size decreases or we mis-evaluate it)
// issues in the future (if the size decreases or we misevaluate it)
// It also offers a minimal amount of benefit when we are able to logarithmically accumulate
// inputs
// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and

View file

@ -77,7 +77,7 @@ impl<N: Network> Payment<N> {
pub struct Plan<N: Network> {
pub key: <N::Curve as Ciphersuite>::G,
pub inputs: Vec<N::Output>,
/// The payments this Plan is inteded to create.
/// The payments this Plan is intended to create.
///
/// This should only contain payments leaving Serai. While it is acceptable for users to enter
/// Serai's address(es) as the payment address, as that'll be handled by anything which expects
@ -152,7 +152,7 @@ impl<N: Network> Plan<N> {
let change = if let Some(change) = &self.change {
change.clone().try_into().map_err(|_| {
io::Error::other(format!(
"an address we said to use as change couldn't be convered to a Vec<u8>: {}",
"an address we said to use as change couldn't be converted to a Vec<u8>: {}",
change.to_string(),
))
})?

View file

@ -142,7 +142,7 @@ pub async fn test_no_deadlock_in_multisig_completed<N: Network>(network: N) {
}
};
// The ack_block acquisiton shows the Scanner isn't maintaining the lock on its own thread after
// The ack_block acquisition shows the Scanner isn't maintaining the lock on its own thread after
// emitting the Block event
// TODO: This is incomplete. Also test after emitting Completed
let mut txn = db.txn();

View file

@ -108,7 +108,7 @@ impl Coin {
// more liquidity, the only reason we'd have so many coins from a network is if there's no DEX
// on-chain
// There's probably no chain with so many *worthwhile* coins and no on-chain DEX
// This could probably be just 4, yet 8 is a hedge for the unforseen
// This could probably be just 4, yet 8 is a hedge for the unforeseen
// If necessary, this can be increased with a fork
pub const MAX_COINS_PER_NETWORK: u32 = 8;

View file

@ -57,7 +57,7 @@ pub mod pallet {
pub struct RegisteredRetirementSignal<T: Config> {
in_favor_of: [u8; 32],
registrant: T::AccountId,
registed_at: BlockNumberFor<T>,
registered_at: BlockNumberFor<T>,
}
impl<T: Config> RegisteredRetirementSignal<T> {
@ -135,10 +135,10 @@ pub mod pallet {
RetirementSignalLockedIn,
RetirementSignalAlreadyRegistered,
NotRetirementSignalRegistrant,
NonExistantRetirementSignal,
NonExistentRetirementSignal,
ExpiredRetirementSignal,
NotValidator,
RevokingNonExistantFavor,
RevokingNonExistentFavor,
}
// 80% threshold
@ -236,7 +236,7 @@ pub mod pallet {
for_network: NetworkId,
) -> DispatchResult {
if !Favors::<T>::contains_key((signal_id, for_network), account) {
Err::<(), _>(Error::<T>::RevokingNonExistantFavor)?;
Err::<(), _>(Error::<T>::RevokingNonExistentFavor)?;
}
Favors::<T>::remove((signal_id, for_network), account);
Self::deposit_event(Event::<T>::FavorRevoked { signal_id, by: account, for_network });
@ -275,7 +275,7 @@ pub mod pallet {
let signal = RegisteredRetirementSignal {
in_favor_of,
registrant: account,
registed_at: frame_system::Pallet::<T>::block_number(),
registered_at: frame_system::Pallet::<T>::block_number(),
};
let signal_id = signal.id();
@ -301,7 +301,7 @@ pub mod pallet {
let account = ensure_signed(origin)?;
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(retirement_signal_id)
else {
return Err::<(), _>(Error::<T>::NonExistantRetirementSignal.into());
return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());
};
if account != registered_signal.registrant {
Err::<(), _>(Error::<T>::NotRetirementSignalRegistrant)?;
@ -341,7 +341,7 @@ pub mod pallet {
// We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration
// process
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(signal_id) else {
return Err::<(), _>(Error::<T>::NonExistantRetirementSignal.into());
return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());
};
// Check the signal isn't out of date
@ -350,7 +350,7 @@ pub mod pallet {
// The reason to still have it is because locking in a dated runtime may cause a corrupt
// blockchain and lead to a failure in system integrity
// `Halt`, which doesn't have this check, at worst causes temporary downtime
if (registered_signal.registed_at + T::RetirementValidityDuration::get().into()) <
if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) <
frame_system::Pallet::<T>::block_number()
{
Err::<(), _>(Error::<T>::ExpiredRetirementSignal)?;
@ -448,7 +448,7 @@ pub mod pallet {
// Check this Signal exists (which would've been implied by Favors for it existing)
if let SignalId::Retirement(signal_id) = signal_id {
if RegisteredRetirementSignals::<T>::get(signal_id).is_none() {
Err::<(), _>(Error::<T>::NonExistantRetirementSignal)?;
Err::<(), _>(Error::<T>::NonExistentRetirementSignal)?;
}
}
}

View file

@ -115,7 +115,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
.await;
// Confirm the key pair
// TODO: Beter document network_latest_finalized_block's genesis state, and error if a set claims
// TODO: Better document network_latest_finalized_block's genesis state, and error if a set claims
// [0; 32] was finalized
let context = SubstrateContext {
serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(),

View file

@ -272,7 +272,7 @@ fn send_test() {
for (i, coordinator) in coordinators.iter_mut().enumerate() {
if !participating.contains(&i) {
coordinator.publish_transacton(&ops, &tx).await;
// Tell them of it as a completion of the relevant signing nodess
// Tell them of it as a completion of the relevant signing nodes
coordinator
.send_message(messages::sign::CoordinatorMessage::Completed {
session: Session(0),
@ -297,8 +297,8 @@ fn send_test() {
}
// TODO: Test the Eventuality from the blockchain, instead of from the coordinator
// TODO: Test what happenns when Completed is sent with a non-existent TX ID
// TODO: Test what happenns when Completed is sent with a non-completing TX ID
// TODO: Test what happens when Completed is sent with a non-existent TX ID
// TODO: Test what happens when Completed is sent with a non-completing TX ID
});
}
}