2023-01-05 03:52:41 +00:00
|
|
|
#![cfg_attr(not(feature = "std"), no_std)]
|
|
|
|
|
2023-05-01 07:17:37 +00:00
|
|
|
#[allow(deprecated, clippy::let_unit_value)] // TODO
|
2023-01-05 03:52:41 +00:00
|
|
|
#[frame_support::pallet]
|
|
|
|
pub mod pallet {
|
|
|
|
use scale_info::TypeInfo;
|
|
|
|
|
2023-05-13 06:02:47 +00:00
|
|
|
use sp_core::sr25519::{Public, Signature};
|
2023-10-10 10:53:24 +00:00
|
|
|
use sp_std::{vec, vec::Vec};
|
2023-05-13 06:02:47 +00:00
|
|
|
use sp_application_crypto::RuntimePublic;
|
|
|
|
|
2023-01-05 03:52:41 +00:00
|
|
|
use frame_system::pallet_prelude::*;
|
2023-10-10 10:53:24 +00:00
|
|
|
use frame_support::{pallet_prelude::*, StoragePrefixedMap};
|
2023-01-05 03:52:41 +00:00
|
|
|
|
|
|
|
use serai_primitives::*;
|
Processor (#259)
* Initial work on a message box
* Finish message-box (untested)
* Expand documentation
* Embed the recipient in the signature challenge
Prevents a message from A -> B from being read as from A -> C.
* Update documentation by bifurcating sender/receiver
* Panic on receiving an invalid signature
If we've received an invalid signature in an authenticated system, a
service is malicious, critically faulty (equivalent to malicious), or
the message layer has been compromised (or is otherwise critically
faulty).
Please note a receiver who handles a message they shouldn't will trigger
this. That falls under being critically faulty.
* Documentation and helper methods
SecureMessage::new and SecureMessage::serialize.
Secure Debug for MessageBox.
* Have SecureMessage not be serialized by default
Allows passing around in-memory, if desired, and moves the error from
decrypt to new (which performs deserialization).
Decrypt no longer has an error since it panics if given an invalid
signature, due to this being intranet code.
* Explain and improve nonce handling
Includes a missing zeroize call.
* Rebase to latest develop
Updates to transcript 0.2.0.
* Add a test for the MessageBox
* Export PrivateKey and PublicKey
* Also test serialization
* Add a key_gen binary to message_box
* Have SecureMessage support Serde
* Add encrypt_to_bytes and decrypt_from_bytes
* Support String ser via base64
* Rename encrypt/decrypt to encrypt_bytes/decrypt_to_bytes
* Directly operate with values supporting Borsh
* Use bincode instead of Borsh
By staying inside of serde, we'll support many more structs. While
bincode isn't canonical, we don't need canonicity on an authenticated,
internal system.
* Turn PrivateKey, PublicKey into structs
Uses Zeroizing for the PrivateKey per #150.
* from_string functions intended for loading from an env
* Use &str for PublicKey from_string (now from_str)
The PrivateKey takes the String to take ownership of its memory and
zeroize it. That isn't needed with PublicKeys.
* Finish updating from develop
* Resolve warning
* Use ZeroizingAlloc on the key_gen binary
* Move message-box from crypto/ to common/
* Move key serialization functions to ser
* add/remove functions in MessageBox
* Implement Hash on dalek_ff_group Points
* Make MessageBox generic to its key
Exposes a &'static str variant for internal use and a RistrettoPoint
variant for external use.
* Add Private to_string as deprecated
Stub before more competent tooling is deployed.
* Private to_public
* Test both Internal and External MessageBox, only use PublicKey in the pub API
* Remove panics on invalid signatures
Leftover from when this was solely internal which is now unsafe.
* Chicken scratch a Scanner task
* Add a write function to the DKG library
Enables writing directly to a file.
Also modifies serialize to return Zeroizing<Vec<u8>> instead of just Vec<u8>.
* Make dkg::encryption pub
* Remove encryption from MessageBox
* Use a 64-bit block number in Substrate
We use a 64-bit block number in general since u32 only works for 120 years
(with a 1 second block time). As some chains even push the 1 second threshold,
especially ones based on DAG consensus, this becomes potentially as low as 60
years.
While that should still be plenty, it's not worth wondering/debating. Since
Serai uses 64-bit block numbers elsewhere, this ensures consistency.
* Misc crypto lints
* Get the scanner scratch to compile
* Initial scanner test
* First few lines of scheduler
* Further work on scheduler, solidify API
* Define Scheduler TX format
* Branch creation algorithm
* Document when the branch algorithm isn't perfect
* Only scanned confirmed blocks
* Document Coin
* Remove Canonical/ChainNumber from processor
The processor should be abstracted from canonical numbers thanks to the
coordinator, making this unnecessary.
* Add README documenting processor flow
* Use Zeroize on substrate primitives
* Define messages from/to the processor
* Correct over-specified versioning
* Correct build re: in_instructions::primitives
* Debug/some serde in crypto/
* Use a struct for ValidatorSetInstance
* Add a processor key_gen task
Redos DB handling code.
* Replace trait + impl with wrapper struct
* Add a key confirmation flow to the key gen task
* Document concerns on key_gen
* Start on a signer task
* Add Send to FROST traits
* Move processor lib.rs to main.rs
Adds a dummy main to reduce clippy dead_code warnings.
* Further flesh out main.rs
* Move the DB trait to AsRef<[u8]>
* Signer task
* Remove a panic in bitcoin when there's insufficient funds
Unchecked underflow.
* Have Monero's mine_block mine one block, not 10
It was initially a nicety to deal with the 10 block lock. C::CONFIRMATIONS
should be used for that instead.
* Test signer
* Replace channel expects with log statements
The expects weren't problematic and had nicer code. They just clutter test
output.
* Remove the old wallet file
It predates the coordinator design and shouldn't be used.
* Rename tests/scan.rs to tests/scanner.rs
* Add a wallet test
Complements the recently removed wallet file by adding a test for the scanner,
scheduler, and signer together.
* Work on a run function
Triggers a clippy ICE.
* Resolve clippy ICE
The issue was the non-fully specified lambda in signer.
* Add KeyGenEvent and KeyGenOrder
Needed so we get KeyConfirmed messages from the key gen task.
While we could've read the CoordinatorMessage to see that, routing through the
key gen tasks ensures we only handle it once it's been successfully saved to
disk.
* Expand scanner test
* Clarify processor documentation
* Have the Scanner load keys on boot/save outputs to disk
* Use Vec<u8> for Block ID
Much more flexible.
* Panic if we see the same output multiple times
* Have the Scanner DB mark itself as corrupt when doing a multi-put
This REALLY should be a TX. Since we don't have a TX API right now, this at
least offers detection.
* Have DST'd DB keys accept AsRef<[u8]>
* Restore polling all signers
Writes a custom future to do so.
Also loads signers on boot using what the scanner claims are active keys.
* Schedule OutInstructions
Adds a data field to Payment.
Also cleans some dead code.
* Panic if we create an invalid transaction
Saves the TX once it's successfully signed so if we do panic, we have a copy.
* Route coordinator messages to their respective signer
Requires adding key to the SignId.
* Send SignTransaction orders for all plans
* Add a timer to retry sign_plans when prepare_send fails
* Minor fmt'ing
* Basic Fee API
* Move the change key into Plan
* Properly route activation_number
* Remove ScannerEvent::Block
It's not used under current designs
* Nicen logs
* Add utilities to get a block's number
* Have main issue AckBlock
Also has a few misc lints.
* Parse instructions out of outputs
* Tweak TODOs and remove an unwrap
* Update Bitcoin max input/output quantity
* Only read one piece of data from Monero
Due to output randomization, it's infeasible.
* Embed plan IDs into the TXs they create
We need to stop attempting signing if we've already signed a protocol. Ideally,
any one of the participating signers should be able to provide a proof the TX
was successfully signed. We can't just run a second signing protocol though as
a single malicious signer could complete the TX signature, and publish it,
yet not complete the secondary signature.
The TX itself has to be sufficient to show that the TX matches the plan. This
is done by embedding the ID, so matching addresses/amounts plans are
distinguished, and by allowing verification a TX actually matches a set of
addresses/amounts.
For Monero, this will need augmenting with the ephemeral keys (or usage of a
static seed for them).
* Don't use OP_RETURN to encode the plan ID on Bitcoin
We can use the inputs to distinguih identical-output plans without issue.
* Update OP_RETURN data access
It's not required to be the last output.
* Add Eventualities to Monero
An Eventuality is an effective equivalent to a SignableTransaction. That is
declared not by the inputs it spends, yet the outputs it creates.
Eventualities are also bound to a 32-byte RNG seed, enabling usage of a
hash-based identifier in a SignableTransaction, allowing multiple
SignableTransactions with the same output set to have different Eventualities.
In order to prevent triggering the burning bug, the RNG seed is hashed with
the planned-to-be-used inputs' output keys. While this does bind to them, it's
only loosely bound. The TX actually created may use different inputs entirely
if a forgery is crafted (which requires no brute forcing).
Binding to the key images would provide a strong binding, yet would require
knowing the key images, which requires active communication with the spend
key.
The purpose of this is so a multisig can identify if a Transaction the entire
group planned has been executed by a subset of the group or not. Once a plan
is created, it can have an Eventuality made. The Eventuality's extra is able
to be inserted into a HashMap, so all new on-chain transactions can be
trivially checked as potential candidates. Once a potential candidate is found,
a check involving ECC ops can be performed.
While this is arguably a DoS vector, the underlying Monero blockchain would
need to be spammed with transactions to trigger it. Accordingly, it becomes
a Monero blockchain DoS vector, when this code is written on the premise
of the Monero blockchain functioning. Accordingly, it is considered handled.
If a forgery does match, it must have created the exact same outputs the
multisig would've. Accordingly, it's argued the multisig shouldn't mind.
This entire suite of code is only necessary due to the lack of outgoing
view keys, yet it's able to avoid an interactive protocol to communicate
key images on every single received output.
While this could be locked to the multisig feature, there's no practical
benefit to doing so.
* Add support for encoding Monero address to instructions
* Move Serai's Monero address encoding into serai-client
serai-client is meant to be a single library enabling using Serai. While it was
originally written as an RPC client for Serai, apps actually using Serai will
primarily be sending transactions on connected networks. Sending those
transactions require proper {In, Out}Instructions, including proper address
encoding.
Not only has address encoding been moved, yet the subxt client is now behind
a feature. coin integrations have their own features, which are on by default.
primitives are always exposed.
* Reorganize file layout a bit, add feature flags to processor
* Tidy up ETH Dockerfile
* Add Bitcoin address encoding
* Move Bitcoin::Address to serai-client's
* Comment where tweaking needs to happen
* Add an API to check if a plan was completed in a specific TX
This allows any participating signer to submit the TX ID to prevent further
signing attempts.
Also performs some API cleanup.
* Minimize FROST dependencies
* Use a seeded RNG for key gen
* Tweak keys from Key gen
* Test proper usage of Branch/Change addresses
Adds a more descriptive error to an error case in decoys, and pads Monero
payments as needed.
* Also test spending the change output
* Add queued_plans to the Scheduler
queued_plans is for payments to be issued when an amount appears, yet the
amount is currently pre-fee. One the output is actually created, the
Scheduler should be notified of the amount it was created with, moving from
queued_plans to plans under the actual amount.
Also tightens debug_asserts to asserts for invariants which may are at risk of
being exclusive to prod.
* Add missing tweak_keys call
* Correct decoy selection height handling
* Add a few log statements to the scheduler
* Simplify test's get_block_number
* Simplify, while making more robust, branch address handling in Scheduler
* Have fees deducted from payments
Corrects Monero's handling of fees when there's no change address.
Adds a DUST variable, as needed due to 1_00_000_000 not being enough to pay
its fee on Monero.
* Add comment to Monero
* Consolidate BTC/XMR prepare_send code
These aren't fully consolidated. We'd need a SignableTransaction trait for
that. This is a lot cleaner though.
* Ban integrated addresses
The reasoning why is accordingly documented.
* Tidy TODOs/dust handling
* Update README TODO
* Use a determinisitic protocol version in Monero
* Test rebuilt KeyGen machines function as expected
* Use a more robust KeyGen entropy system
* Add DB TXNs
Also load entropy from env
* Add a loop for processing messages from substrate
Allows detecting if we're behind, and if so, waiting to handle the message
* Set Monero MAX_INPUTS properly
The previous number was based on an old hard fork. With the ring size having
increased, transactions have since got larger.
* Distinguish TODOs into TODO and TODO2s
TODO2s are for after protonet
* Zeroize secret share repr in ThresholdCore write
* Work on Eventualities
Adds serialization and stops signing when an eventuality is proven.
* Use a more robust DB key schema
* Update to {k, p}256 0.12
* cargo +nightly clippy
* cargo update
* Slight message-box tweaks
* Update to recent Monero merge
* Add a Coordinator trait for communication with coordinator
* Remove KeyGenHandle for just KeyGen
While KeyGen previously accepted instructions over a channel, this breaks the
ack flow needed for coordinator communication. Now, KeyGen is the direct object
with a handle() function for messages.
Thankfully, this ended up being rather trivial for KeyGen as it has no
background tasks.
* Add a handle function to Signer
Enables determining when it's finished handling a CoordinatorMessage and
therefore creating an acknowledgement.
* Save transactions used to complete eventualities
* Use a more intelligent sleep in the signer
* Emit SignedTransaction with the first ID *we can still get from our node*
* Move Substrate message handling into the new coordinator recv loop
* Add handle function to Scanner
* Remove the plans timer
Enables ensuring the ordring on the handling of plans.
* Remove the outputs function which panicked if a precondition wasn't met
The new API only returns outputs upon satisfaction of the precondition.
* Convert SignerOrder::SignTransaction to a function
* Remove the key_gen object from sign_plans
* Refactor out get_fee/prepare_send into dedicated functions
* Save plans being signed to the DB
* Reload transactions being signed on boot
* Stop reloading TXs being signed (and report it to peers)
* Remove message-box from the processor branch
We don't use it here yet.
* cargo +nightly fmt
* Move back common/zalloc
* Update subxt to 0.27
* Zeroize ^1.5, not 1
* Update GitHub workflow
* Remove usage of SignId in completed
2023-03-17 02:59:40 +00:00
|
|
|
pub use validator_sets_primitives as primitives;
|
|
|
|
use primitives::*;
|
2023-01-05 03:52:41 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
use coins_pallet::Pallet as Coins;
|
|
|
|
|
2023-01-05 03:52:41 +00:00
|
|
|
#[pallet::config]
|
2023-10-10 10:53:24 +00:00
|
|
|
pub trait Config:
|
2023-10-12 04:51:18 +00:00
|
|
|
frame_system::Config<AccountId = Public>
|
2023-10-22 07:59:21 +00:00
|
|
|
+ coins_pallet::Config
|
2023-10-12 04:51:18 +00:00
|
|
|
+ pallet_session::Config<ValidatorId = Public>
|
|
|
|
+ TypeInfo
|
2023-10-10 10:53:24 +00:00
|
|
|
{
|
2023-01-05 03:52:41 +00:00
|
|
|
type RuntimeEvent: IsType<<Self as frame_system::Config>::RuntimeEvent> + From<Event<Self>>;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::genesis_config]
|
2023-01-20 16:00:18 +00:00
|
|
|
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
2023-01-05 03:52:41 +00:00
|
|
|
pub struct GenesisConfig<T: Config> {
|
2023-10-10 10:53:24 +00:00
|
|
|
/// Stake requirement to join the initial validator sets.
|
2023-10-13 04:04:28 +00:00
|
|
|
|
|
|
|
/// Networks to spawn Serai with, and the stake requirement per key share.
|
2023-10-10 10:53:24 +00:00
|
|
|
///
|
|
|
|
/// Every participant at genesis will automatically be assumed to have this much stake.
|
|
|
|
/// This stake cannot be withdrawn however as there's no actual stake behind it.
|
2023-10-13 04:04:28 +00:00
|
|
|
pub networks: Vec<(NetworkId, Amount)>,
|
2023-03-25 05:30:53 +00:00
|
|
|
/// List of participants to place in the initial validator sets.
|
2023-01-05 03:52:41 +00:00
|
|
|
pub participants: Vec<T::AccountId>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: Config> Default for GenesisConfig<T> {
|
|
|
|
fn default() -> Self {
|
2023-10-13 04:12:10 +00:00
|
|
|
GenesisConfig { networks: Default::default(), participants: Default::default() }
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::pallet]
|
|
|
|
pub struct Pallet<T>(PhantomData<T>);
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
/// The current session for a network.
|
|
|
|
///
|
|
|
|
/// This does not store the current session for Serai. pallet_session handles that.
|
|
|
|
// Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space.
|
2023-01-05 03:52:41 +00:00
|
|
|
#[pallet::storage]
|
2023-10-10 10:53:24 +00:00
|
|
|
pub type CurrentSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;
|
|
|
|
impl<T: Config> Pallet<T> {
|
2023-10-22 00:06:53 +00:00
|
|
|
pub fn session(network: NetworkId) -> Option<Session> {
|
2023-10-10 10:53:24 +00:00
|
|
|
if network == NetworkId::Serai {
|
2023-10-22 00:06:53 +00:00
|
|
|
Some(Session(pallet_session::Pallet::<T>::current_index()))
|
2023-10-10 10:53:24 +00:00
|
|
|
} else {
|
2023-10-22 00:06:53 +00:00
|
|
|
CurrentSession::<T>::get(network)
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
2023-10-22 00:06:53 +00:00
|
|
|
|
|
|
|
pub fn latest_decided_session(network: NetworkId) -> Option<Session> {
|
|
|
|
CurrentSession::<T>::get(network)
|
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 02:44:10 +00:00
|
|
|
/// The allocation required per key share.
|
2023-10-10 10:53:24 +00:00
|
|
|
// Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space.
|
|
|
|
#[pallet::storage]
|
2023-10-13 02:44:10 +00:00
|
|
|
#[pallet::getter(fn allocation_per_key_share)]
|
|
|
|
pub type AllocationPerKeyShare<T: Config> =
|
|
|
|
StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
|
2023-10-10 10:53:24 +00:00
|
|
|
/// The validators selected to be in-set.
|
|
|
|
#[pallet::storage]
|
|
|
|
#[pallet::getter(fn participants)]
|
|
|
|
pub type Participants<T: Config> = StorageMap<
|
|
|
|
_,
|
|
|
|
Identity,
|
|
|
|
NetworkId,
|
2023-10-13 04:50:07 +00:00
|
|
|
BoundedVec<Public, ConstU32<{ MAX_KEY_SHARES_PER_SET }>>,
|
2023-10-10 10:53:24 +00:00
|
|
|
ValueQuery,
|
|
|
|
>;
|
|
|
|
/// The validators selected to be in-set, yet with the ability to perform a check for presence.
|
2023-10-11 05:05:48 +00:00
|
|
|
// Uses Identity so we can call clear_prefix over network, manually inserting a Blake2 hash
|
|
|
|
// before the spammable key.
|
2023-10-10 10:53:24 +00:00
|
|
|
#[pallet::storage]
|
2023-10-11 05:05:48 +00:00
|
|
|
pub type InSet<T: Config> =
|
|
|
|
StorageMap<_, Identity, (NetworkId, [u8; 16], Public), (), OptionQuery>;
|
2023-10-22 00:06:53 +00:00
|
|
|
impl<T: Config> Pallet<T> {
|
|
|
|
fn in_set_key(
|
|
|
|
network: NetworkId,
|
|
|
|
account: T::AccountId,
|
|
|
|
) -> (NetworkId, [u8; 16], T::AccountId) {
|
|
|
|
(network, sp_io::hashing::blake2_128(&(network, account).encode()), account)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This exists as InSet, for Serai, is the validators set for the next session, *not* the
|
|
|
|
// current set's validators
|
|
|
|
#[inline]
|
|
|
|
fn in_active_serai_set(account: Public) -> bool {
|
|
|
|
// TODO: This is bounded O(n). Can we get O(1) via a storage lookup, like we do with InSet?
|
|
|
|
for validator in pallet_session::Pallet::<T>::validators() {
|
|
|
|
if validator == account {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the account is included in an active set.
|
|
|
|
pub fn in_active_set(network: NetworkId, account: Public) -> bool {
|
|
|
|
if network == NetworkId::Serai {
|
|
|
|
Self::in_active_serai_set(account)
|
|
|
|
} else {
|
|
|
|
InSet::<T>::contains_key(Self::in_set_key(network, account))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the account has been definitively included in an active or upcoming set.
|
|
|
|
pub fn in_set(network: NetworkId, account: Public) -> bool {
|
|
|
|
if InSet::<T>::contains_key(Self::in_set_key(network, account)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if network == NetworkId::Serai {
|
|
|
|
return Self::in_active_serai_set(account);
|
|
|
|
}
|
|
|
|
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the account is present in the latest decided set.
|
|
|
|
///
|
|
|
|
/// This is useful when working with `allocation` and `total_allocated_stake`, which return the
|
|
|
|
/// latest information.
|
|
|
|
pub fn in_latest_decided_set(network: NetworkId, account: Public) -> bool {
|
|
|
|
InSet::<T>::contains_key(Self::in_set_key(network, account))
|
|
|
|
}
|
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
|
2023-10-20 20:58:44 +00:00
|
|
|
/// The total stake allocated to this network by the active set of validators.
|
|
|
|
#[pallet::storage]
|
2023-10-22 00:06:53 +00:00
|
|
|
#[pallet::getter(fn total_allocated_stake)]
|
2023-10-20 20:58:44 +00:00
|
|
|
pub type TotalAllocatedStake<T: Config> = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
/// The current amount allocated to a validator set by a validator.
|
|
|
|
#[pallet::storage]
|
|
|
|
#[pallet::getter(fn allocation)]
|
|
|
|
pub type Allocations<T: Config> =
|
|
|
|
StorageMap<_, Blake2_128Concat, (NetworkId, Public), Amount, OptionQuery>;
|
|
|
|
/// A sorted view of the current allocations premised on the underlying DB itself being sorted.
|
2023-10-10 20:50:48 +00:00
|
|
|
/*
|
|
|
|
This uses Identity so we can take advantage of the DB's lexicographic ordering to iterate over
|
|
|
|
the key space from highest-to-lowest allocated.
|
|
|
|
|
|
|
|
This does remove the protection using a hash algorithm here offers against spam attacks (by
|
|
|
|
flooding the DB with layers, increasing lookup time and merkle proof sizes, not that we use
|
|
|
|
merkle proofs as Polkadot does).
|
|
|
|
|
|
|
|
Since amounts are represented with just 8 bytes, only 16 nibbles are presents. This caps the
|
|
|
|
potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles).
|
|
|
|
|
|
|
|
While there is an entire 32-byte public key after this, a Blake hash of the key is inserted
|
|
|
|
after the amount to prevent the key from also being used to cause layer spam.
|
|
|
|
|
|
|
|
There's also a minimum stake requirement, which further reduces the potential for spam.
|
|
|
|
*/
|
2023-10-10 10:53:24 +00:00
|
|
|
#[pallet::storage]
|
|
|
|
type SortedAllocations<T: Config> =
|
2023-10-10 20:50:48 +00:00
|
|
|
StorageMap<_, Identity, (NetworkId, [u8; 8], [u8; 16], Public), (), OptionQuery>;
|
2023-10-10 10:53:24 +00:00
|
|
|
impl<T: Config> Pallet<T> {
|
|
|
|
/// A function which takes an amount and generates a byte array with a lexicographic order from
|
|
|
|
/// high amount to low amount.
|
|
|
|
#[inline]
|
|
|
|
fn lexicographic_amount(amount: Amount) -> [u8; 8] {
|
|
|
|
let mut bytes = amount.0.to_be_bytes();
|
|
|
|
for byte in &mut bytes {
|
|
|
|
*byte = !*byte;
|
|
|
|
}
|
|
|
|
bytes
|
|
|
|
}
|
2023-10-10 20:50:48 +00:00
|
|
|
#[inline]
|
|
|
|
fn sorted_allocation_key(
|
|
|
|
network: NetworkId,
|
|
|
|
key: Public,
|
|
|
|
amount: Amount,
|
|
|
|
) -> (NetworkId, [u8; 8], [u8; 16], Public) {
|
|
|
|
let amount = Self::lexicographic_amount(amount);
|
|
|
|
let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode());
|
|
|
|
(network, amount, hash, key)
|
|
|
|
}
|
2023-10-13 02:44:10 +00:00
|
|
|
fn recover_amount_from_sorted_allocation_key(key: &[u8]) -> Amount {
|
|
|
|
let distance_from_end = 8 + 16 + 32;
|
|
|
|
let start_pos = key.len() - distance_from_end;
|
|
|
|
let mut raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap();
|
|
|
|
for byte in &mut raw {
|
|
|
|
*byte = !*byte;
|
|
|
|
}
|
|
|
|
Amount(u64::from_be_bytes(raw))
|
|
|
|
}
|
|
|
|
fn recover_key_from_sorted_allocation_key(key: &[u8]) -> Public {
|
|
|
|
Public(key[(key.len() - 32) ..].try_into().unwrap())
|
|
|
|
}
|
2023-10-16 05:47:15 +00:00
|
|
|
// Returns if this validator already had an allocation set.
|
|
|
|
fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> bool {
|
2023-10-10 10:53:24 +00:00
|
|
|
let prior = Allocations::<T>::take((network, key));
|
2023-10-10 20:50:48 +00:00
|
|
|
if let Some(amount) = prior {
|
|
|
|
SortedAllocations::<T>::remove(Self::sorted_allocation_key(network, key, amount));
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
if amount.0 != 0 {
|
|
|
|
Allocations::<T>::set((network, key), Some(amount));
|
2023-10-10 20:50:48 +00:00
|
|
|
SortedAllocations::<T>::set(Self::sorted_allocation_key(network, key, amount), Some(()));
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
2023-10-16 05:47:15 +00:00
|
|
|
prior.is_some()
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
|
2023-10-13 03:05:29 +00:00
|
|
|
struct SortedAllocationsIter<T: Config> {
|
|
|
|
_t: PhantomData<T>,
|
|
|
|
prefix: Vec<u8>,
|
|
|
|
last: Vec<u8>,
|
|
|
|
}
|
|
|
|
impl<T: Config> SortedAllocationsIter<T> {
|
|
|
|
fn new(network: NetworkId) -> Self {
|
|
|
|
let mut prefix = SortedAllocations::<T>::final_prefix().to_vec();
|
|
|
|
prefix.extend(&network.encode());
|
|
|
|
Self { _t: PhantomData, prefix: prefix.clone(), last: prefix }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl<T: Config> Iterator for SortedAllocationsIter<T> {
|
|
|
|
type Item = (Public, Amount);
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
let next = sp_io::storage::next_key(&self.last)?;
|
|
|
|
if !next.starts_with(&self.prefix) {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
let key = Pallet::<T>::recover_key_from_sorted_allocation_key(&next);
|
|
|
|
let amount = Pallet::<T>::recover_amount_from_sorted_allocation_key(&next);
|
|
|
|
self.last = next;
|
|
|
|
Some((key, amount))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-12 03:42:15 +00:00
|
|
|
/// Pending deallocations, keyed by the Session they become unlocked on.
|
|
|
|
#[pallet::storage]
|
|
|
|
type PendingDeallocations<T: Config> =
|
|
|
|
StorageMap<_, Blake2_128Concat, (NetworkId, Session, Public), Amount, OptionQuery>;
|
|
|
|
|
2023-05-13 06:02:47 +00:00
|
|
|
/// The MuSig key for a validator set.
|
|
|
|
#[pallet::storage]
|
|
|
|
#[pallet::getter(fn musig_key)]
|
|
|
|
pub type MuSigKeys<T: Config> = StorageMap<_, Twox64Concat, ValidatorSet, Public, OptionQuery>;
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
/// The generated key pair for a given validator set instance.
|
2023-01-05 03:52:41 +00:00
|
|
|
#[pallet::storage]
|
2023-03-31 00:24:11 +00:00
|
|
|
#[pallet::getter(fn keys)]
|
2023-03-28 09:45:54 +00:00
|
|
|
pub type Keys<T: Config> = StorageMap<_, Twox64Concat, ValidatorSet, KeyPair, OptionQuery>;
|
2023-01-05 03:52:41 +00:00
|
|
|
|
2023-04-15 04:40:33 +00:00
|
|
|
#[pallet::event]
|
|
|
|
#[pallet::generate_deposit(pub(super) fn deposit_event)]
|
|
|
|
pub enum Event<T: Config> {
|
2023-10-22 07:28:42 +00:00
|
|
|
NewSet {
|
|
|
|
set: ValidatorSet,
|
|
|
|
},
|
|
|
|
KeyGen {
|
|
|
|
set: ValidatorSet,
|
|
|
|
key_pair: KeyPair,
|
|
|
|
},
|
|
|
|
AllocationIncreased {
|
|
|
|
validator: T::AccountId,
|
|
|
|
network: NetworkId,
|
|
|
|
amount: Amount,
|
|
|
|
},
|
|
|
|
AllocationDecreased {
|
|
|
|
validator: T::AccountId,
|
|
|
|
network: NetworkId,
|
|
|
|
amount: Amount,
|
|
|
|
delayed_until: Option<Session>,
|
|
|
|
},
|
2023-10-22 07:59:21 +00:00
|
|
|
DeallocationClaimed {
|
|
|
|
validator: T::AccountId,
|
|
|
|
network: NetworkId,
|
|
|
|
session: Session,
|
|
|
|
},
|
2023-10-22 07:28:42 +00:00
|
|
|
SetRetired {
|
|
|
|
set: ValidatorSet,
|
|
|
|
},
|
2023-04-15 04:40:33 +00:00
|
|
|
}
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
impl<T: Config> Pallet<T> {
|
|
|
|
fn new_set(network: NetworkId) {
|
|
|
|
// Update CurrentSession
|
|
|
|
let session = if network != NetworkId::Serai {
|
2023-10-11 06:10:35 +00:00
|
|
|
let new_session = CurrentSession::<T>::get(network)
|
|
|
|
.map(|session| Session(session.0 + 1))
|
|
|
|
.unwrap_or(Session(0));
|
|
|
|
CurrentSession::<T>::set(network, Some(new_session));
|
|
|
|
new_session
|
2023-10-10 10:53:24 +00:00
|
|
|
} else {
|
2023-10-22 00:06:53 +00:00
|
|
|
Self::session(network).unwrap_or(Session(0))
|
2023-10-10 10:53:24 +00:00
|
|
|
};
|
2023-05-13 06:02:47 +00:00
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
// Clear the current InSet
|
|
|
|
{
|
|
|
|
let mut in_set_key = InSet::<T>::final_prefix().to_vec();
|
|
|
|
in_set_key.extend(network.encode());
|
|
|
|
assert!(matches!(
|
2023-10-13 04:50:07 +00:00
|
|
|
sp_io::storage::clear_prefix(&in_set_key, Some(MAX_KEY_SHARES_PER_SET)),
|
2023-10-10 10:53:24 +00:00
|
|
|
sp_io::KillStorageResult::AllRemoved(_)
|
|
|
|
));
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 02:44:10 +00:00
|
|
|
let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;
|
|
|
|
|
2023-10-13 03:05:29 +00:00
|
|
|
let mut iter = SortedAllocationsIter::<T>::new(network);
|
2023-10-10 10:53:24 +00:00
|
|
|
let mut participants = vec![];
|
2023-10-13 02:44:10 +00:00
|
|
|
let mut key_shares = 0;
|
2023-10-20 20:58:44 +00:00
|
|
|
let mut total_stake = 0;
|
2023-10-13 04:50:07 +00:00
|
|
|
while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) {
|
2023-10-13 03:05:29 +00:00
|
|
|
let Some((key, amount)) = iter.next() else { break };
|
2023-10-10 10:53:24 +00:00
|
|
|
|
2023-10-12 04:51:18 +00:00
|
|
|
InSet::<T>::set(Self::in_set_key(network, key), Some(()));
|
2023-10-10 10:53:24 +00:00
|
|
|
participants.push(key);
|
|
|
|
|
2023-10-13 04:50:07 +00:00
|
|
|
// This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET
|
2023-10-13 02:44:10 +00:00
|
|
|
// Off-chain, the key shares per validator will be accordingly adjusted
|
2023-10-13 03:05:29 +00:00
|
|
|
key_shares += amount.0 / allocation_per_key_share;
|
2023-10-20 20:58:44 +00:00
|
|
|
total_stake += amount.0;
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
2023-10-20 20:58:44 +00:00
|
|
|
TotalAllocatedStake::<T>::set(network, Some(Amount(total_stake)));
|
2023-10-10 10:53:24 +00:00
|
|
|
|
|
|
|
let set = ValidatorSet { network, session };
|
|
|
|
Pallet::<T>::deposit_event(Event::NewSet { set });
|
|
|
|
if network != NetworkId::Serai {
|
|
|
|
MuSigKeys::<T>::set(set, Some(musig_key(set, &participants)));
|
2023-03-25 05:30:53 +00:00
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
Participants::<T>::set(network, participants.try_into().unwrap());
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::error]
|
|
|
|
pub enum Error<T> {
|
|
|
|
/// Validator Set doesn't exist.
|
|
|
|
NonExistentValidatorSet,
|
2023-10-13 02:44:10 +00:00
|
|
|
/// Not enough allocation to obtain a key share in the set.
|
2023-10-10 10:53:24 +00:00
|
|
|
InsufficientAllocation,
|
2023-10-13 02:44:10 +00:00
|
|
|
/// Trying to deallocate more than allocated.
|
|
|
|
NotEnoughAllocated,
|
2023-10-13 03:47:00 +00:00
|
|
|
/// Allocation would cause the validator set to no longer achieve fault tolerance.
|
|
|
|
AllocationWouldRemoveFaultTolerance,
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
/// Allocation would cause the validator set to never be able to achieve fault tolerance.
|
|
|
|
AllocationWouldPreventFaultTolerance,
|
2023-10-10 10:53:24 +00:00
|
|
|
/// Deallocation would remove the participant from the set, despite the validator not
|
|
|
|
/// specifying so.
|
|
|
|
DeallocationWouldRemoveParticipant,
|
2023-10-13 03:05:29 +00:00
|
|
|
/// Deallocation would cause the validator set to no longer achieve fault tolerance.
|
|
|
|
DeallocationWouldRemoveFaultTolerance,
|
2023-10-22 07:59:21 +00:00
|
|
|
/// Deallocation to be claimed doesn't exist.
|
|
|
|
NonExistentDeallocation,
|
2023-01-05 03:52:41 +00:00
|
|
|
/// Validator Set already generated keys.
|
|
|
|
AlreadyGeneratedKeys,
|
2023-05-13 06:02:47 +00:00
|
|
|
/// An invalid MuSig signature was provided.
|
|
|
|
BadSignature,
|
2023-10-10 10:53:24 +00:00
|
|
|
/// Validator wasn't registered or active.
|
|
|
|
NonExistentValidator,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::genesis_build]
|
|
|
|
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
|
|
|
|
fn build(&self) {
|
2023-10-13 04:04:28 +00:00
|
|
|
for (id, stake) in self.networks.clone() {
|
|
|
|
AllocationPerKeyShare::<T>::set(id, Some(stake));
|
2023-10-10 10:53:24 +00:00
|
|
|
for participant in self.participants.clone() {
|
2023-10-16 05:47:15 +00:00
|
|
|
if Pallet::<T>::set_allocation(id, participant, stake) {
|
|
|
|
panic!("participants contained duplicates");
|
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
Pallet::<T>::new_set(id);
|
|
|
|
}
|
|
|
|
}
|
2023-05-13 06:02:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: Config> Pallet<T> {
|
|
|
|
fn verify_signature(
|
|
|
|
set: ValidatorSet,
|
|
|
|
key_pair: &KeyPair,
|
|
|
|
signature: &Signature,
|
|
|
|
) -> Result<(), Error<T>> {
|
2023-10-10 10:53:24 +00:00
|
|
|
// Confirm a key hasn't been set for this set instance
|
2023-05-13 06:02:47 +00:00
|
|
|
if Keys::<T>::get(set).is_some() {
|
|
|
|
Err(Error::AlreadyGeneratedKeys)?
|
|
|
|
}
|
|
|
|
|
2023-08-01 04:47:36 +00:00
|
|
|
let Some(musig_key) = MuSigKeys::<T>::get(set) else { Err(Error::NonExistentValidatorSet)? };
|
2023-05-13 08:20:13 +00:00
|
|
|
if !musig_key.verify(&set_keys_message(&set, key_pair), signature) {
|
2023-05-13 06:02:47 +00:00
|
|
|
Err(Error::BadSignature)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: Config> Pallet<T> {
|
2023-10-22 07:59:21 +00:00
|
|
|
fn account() -> T::AccountId {
|
2023-11-05 17:02:34 +00:00
|
|
|
system_address(b"ValidatorSets").into()
|
2023-05-13 06:02:47 +00:00
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// is_bft returns if the network is able to survive any single node becoming byzantine.
|
2023-10-22 07:59:21 +00:00
|
|
|
fn is_bft(network: NetworkId) -> bool {
|
|
|
|
let allocation_per_key_share = AllocationPerKeyShare::<T>::get(network).unwrap().0;
|
2023-05-13 06:02:47 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
let mut validators_len = 0;
|
|
|
|
let mut top = None;
|
|
|
|
let mut key_shares = 0;
|
|
|
|
for (_, amount) in SortedAllocationsIter::<T>::new(network) {
|
|
|
|
validators_len += 1;
|
2023-05-13 06:02:47 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
key_shares += amount.0 / allocation_per_key_share;
|
|
|
|
if top.is_none() {
|
|
|
|
top = Some(key_shares);
|
|
|
|
}
|
2023-05-13 06:02:47 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
if key_shares > u64::from(MAX_KEY_SHARES_PER_SET) {
|
|
|
|
break;
|
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
let Some(top) = top else { return false };
|
2023-10-13 04:31:23 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
// key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause an off-chain reduction of
|
|
|
|
// each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET
|
|
|
|
// post_amortization_key_shares_for_top_validator yields what the top validator's key shares
|
|
|
|
// would be after such a reduction, letting us evaluate this correctly
|
|
|
|
let top = post_amortization_key_shares_for_top_validator(validators_len, top, key_shares);
|
|
|
|
(top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET.into())
|
2023-10-13 04:31:23 +00:00
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
fn increase_allocation(
|
2023-10-10 10:53:24 +00:00
|
|
|
network: NetworkId,
|
|
|
|
account: T::AccountId,
|
|
|
|
amount: Amount,
|
2023-10-13 03:47:00 +00:00
|
|
|
) -> DispatchResult {
|
|
|
|
let old_allocation = Self::allocation((network, account)).unwrap_or(Amount(0)).0;
|
|
|
|
let new_allocation = old_allocation + amount.0;
|
|
|
|
let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;
|
|
|
|
if new_allocation < allocation_per_key_share {
|
2023-10-13 02:44:10 +00:00
|
|
|
Err(Error::<T>::InsufficientAllocation)?;
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
2023-10-13 03:47:00 +00:00
|
|
|
|
|
|
|
let increased_key_shares =
|
|
|
|
(old_allocation / allocation_per_key_share) < (new_allocation / allocation_per_key_share);
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// Check if the net exhibited the ability to handle any single node becoming byzantine
|
2023-10-13 03:47:00 +00:00
|
|
|
let mut was_bft = None;
|
|
|
|
if increased_key_shares {
|
|
|
|
was_bft = Some(Self::is_bft(network));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Increase the allocation now
|
2023-10-10 10:53:24 +00:00
|
|
|
Self::set_allocation(network, account, Amount(new_allocation));
|
2023-10-22 07:28:42 +00:00
|
|
|
Self::deposit_event(Event::AllocationIncreased { validator: account, network, amount });
|
2023-10-13 03:47:00 +00:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// Error if the net no longer can handle any single node becoming byzantine
|
2023-10-13 03:47:00 +00:00
|
|
|
if let Some(was_bft) = was_bft {
|
|
|
|
if was_bft && (!Self::is_bft(network)) {
|
|
|
|
Err(Error::<T>::AllocationWouldRemoveFaultTolerance)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
// The above is_bft calls are only used to check a BFT net doesn't become non-BFT
|
|
|
|
// Check here if this call would prevent a non-BFT net from *ever* becoming BFT
|
|
|
|
if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET / 3).into() {
|
|
|
|
Err(Error::<T>::AllocationWouldPreventFaultTolerance)?;
|
|
|
|
}
|
|
|
|
|
2023-10-20 20:58:44 +00:00
|
|
|
if InSet::<T>::contains_key(Self::in_set_key(network, account)) {
|
|
|
|
TotalAllocatedStake::<T>::set(
|
|
|
|
network,
|
|
|
|
Some(Amount(TotalAllocatedStake::<T>::get(network).unwrap_or(Amount(0)).0 + amount.0)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decreases a validator's allocation to a set.
|
|
|
|
///
|
|
|
|
/// Errors if the capacity provided by this allocation is in use.
|
|
|
|
///
|
2023-10-12 04:51:18 +00:00
|
|
|
/// Errors if a partial decrease of allocation which puts the remaining allocation below the
|
|
|
|
/// minimum requirement.
|
2023-10-10 10:53:24 +00:00
|
|
|
///
|
|
|
|
/// The capacity prior provided by the allocation is immediately removed, in order to ensure it
|
|
|
|
/// doesn't become used (preventing deallocation).
|
2023-10-12 04:51:18 +00:00
|
|
|
///
|
|
|
|
/// Returns if the amount is immediately eligible for deallocation.
|
2023-10-22 07:59:21 +00:00
|
|
|
fn decrease_allocation(
|
2023-10-10 10:53:24 +00:00
|
|
|
network: NetworkId,
|
|
|
|
account: T::AccountId,
|
|
|
|
amount: Amount,
|
2023-10-13 03:47:00 +00:00
|
|
|
) -> Result<bool, DispatchError> {
|
2023-10-10 10:53:24 +00:00
|
|
|
// TODO: Check it's safe to decrease this set's stake by this amount
|
|
|
|
|
2023-10-13 03:05:29 +00:00
|
|
|
let old_allocation =
|
|
|
|
Self::allocation((network, account)).ok_or(Error::<T>::NonExistentValidator)?.0;
|
|
|
|
let new_allocation =
|
|
|
|
old_allocation.checked_sub(amount.0).ok_or(Error::<T>::NotEnoughAllocated)?;
|
|
|
|
|
2023-10-10 10:53:24 +00:00
|
|
|
// If we're not removing the entire allocation, yet the allocation is no longer at or above
|
2023-10-13 02:44:10 +00:00
|
|
|
// the threshold for a key share, error
|
2023-10-13 03:05:29 +00:00
|
|
|
let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;
|
|
|
|
if (new_allocation != 0) && (new_allocation < allocation_per_key_share) {
|
2023-10-10 10:53:24 +00:00
|
|
|
Err(Error::<T>::DeallocationWouldRemoveParticipant)?;
|
|
|
|
}
|
2023-10-13 03:05:29 +00:00
|
|
|
|
2023-10-13 03:47:00 +00:00
|
|
|
let decreased_key_shares =
|
|
|
|
(old_allocation / allocation_per_key_share) > (new_allocation / allocation_per_key_share);
|
2023-10-13 03:05:29 +00:00
|
|
|
|
|
|
|
// If this decreases the validator's key shares, error if the new set is unable to handle
|
|
|
|
// byzantine faults
|
2023-10-13 03:47:00 +00:00
|
|
|
let mut was_bft = None;
|
|
|
|
if decreased_key_shares {
|
|
|
|
was_bft = Some(Self::is_bft(network));
|
2023-10-13 03:05:29 +00:00
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
|
|
|
|
// Decrease the allocation now
|
2023-10-22 00:06:53 +00:00
|
|
|
// Since we don't also update TotalAllocatedStake here, TotalAllocatedStake may be greater
|
|
|
|
// than the sum of all allocations, according to the Allocations StorageMap
|
|
|
|
// This is intentional as this allocation has only been queued for deallocation at this time
|
2023-10-10 10:53:24 +00:00
|
|
|
Self::set_allocation(network, account, Amount(new_allocation));
|
|
|
|
|
2023-10-13 03:47:00 +00:00
|
|
|
if let Some(was_bft) = was_bft {
|
|
|
|
if was_bft && (!Self::is_bft(network)) {
|
|
|
|
Err(Error::<T>::DeallocationWouldRemoveFaultTolerance)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-22 00:06:53 +00:00
|
|
|
// If we're not in-set, or this doesn't decrease our key shares, allow immediate deallocation
|
|
|
|
let active = Self::in_set(network, account);
|
2023-10-13 03:47:00 +00:00
|
|
|
if (!active) || (!decreased_key_shares) {
|
2023-10-20 20:58:44 +00:00
|
|
|
if active {
|
|
|
|
// Since it's being immediately deallocated, decrease TotalAllocatedStake
|
|
|
|
TotalAllocatedStake::<T>::set(
|
|
|
|
network,
|
|
|
|
Some(Amount(TotalAllocatedStake::<T>::get(network).unwrap_or(Amount(0)).0 - amount.0)),
|
|
|
|
);
|
|
|
|
}
|
2023-10-22 07:28:42 +00:00
|
|
|
Self::deposit_event(Event::AllocationDecreased {
|
|
|
|
validator: account,
|
|
|
|
network,
|
|
|
|
amount,
|
|
|
|
delayed_until: None,
|
|
|
|
});
|
2023-10-12 04:51:18 +00:00
|
|
|
return Ok(true);
|
|
|
|
}
|
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
// Set it to PendingDeallocations, letting it be released upon a future session
|
2023-10-22 00:06:53 +00:00
|
|
|
// This unwrap should be fine as this account is active, meaning a session has occurred
|
|
|
|
let mut to_unlock_on = Self::session(network).unwrap();
|
2023-10-12 03:42:15 +00:00
|
|
|
if network == NetworkId::Serai {
|
|
|
|
// Since the next Serai set will already have been decided, we can only deallocate once the
|
|
|
|
// next set ends
|
|
|
|
to_unlock_on.0 += 2;
|
|
|
|
} else {
|
|
|
|
to_unlock_on.0 += 1;
|
|
|
|
}
|
|
|
|
// Increase the session by one, creating a cooldown period
|
|
|
|
to_unlock_on.0 += 1;
|
|
|
|
let existing =
|
|
|
|
PendingDeallocations::<T>::get((network, to_unlock_on, account)).unwrap_or(Amount(0));
|
|
|
|
PendingDeallocations::<T>::set(
|
|
|
|
(network, to_unlock_on, account),
|
|
|
|
Some(Amount(existing.0 + amount.0)),
|
|
|
|
);
|
2023-10-10 10:53:24 +00:00
|
|
|
|
2023-10-22 07:28:42 +00:00
|
|
|
Self::deposit_event(Event::AllocationDecreased {
|
|
|
|
validator: account,
|
|
|
|
network,
|
|
|
|
amount,
|
|
|
|
delayed_until: Some(to_unlock_on),
|
|
|
|
});
|
|
|
|
|
2023-10-12 04:51:18 +00:00
|
|
|
Ok(false)
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
|
|
|
|
2023-10-12 03:42:15 +00:00
|
|
|
// Checks if this session has completed the handover from the prior session.
|
|
|
|
fn handover_completed(network: NetworkId, session: Session) -> bool {
|
2023-10-22 00:06:53 +00:00
|
|
|
let Some(current_session) = Self::session(network) else { return false };
|
2023-10-12 03:42:15 +00:00
|
|
|
// No handover occurs on genesis
|
|
|
|
if current_session.0 == 0 {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// If the session we've been queried about has yet to start, it can't have completed its
|
|
|
|
// handover
|
|
|
|
if current_session.0 < session.0 {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if current_session.0 == session.0 {
|
|
|
|
// Handover is automatically complete for Serai as it doesn't have a handover protocol
|
|
|
|
// If not Serai, check the prior session had its keys cleared, which happens once its
|
|
|
|
// retired
|
|
|
|
return (network == NetworkId::Serai) ||
|
|
|
|
(!Keys::<T>::contains_key(ValidatorSet {
|
|
|
|
network,
|
|
|
|
session: Session(current_session.0 - 1),
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
// We're currently in a future session, meaning this session definitely performed itself
|
|
|
|
// handover
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2023-10-22 07:59:21 +00:00
|
|
|
fn new_session() {
|
2023-10-13 03:59:21 +00:00
|
|
|
for network in serai_primitives::NETWORKS {
|
2023-10-22 00:06:53 +00:00
|
|
|
// If this network hasn't started sessions yet, don't start one now
|
|
|
|
let Some(current_session) = Self::session(network) else { continue };
|
2023-10-10 10:53:24 +00:00
|
|
|
// Only spawn a NewSet if the current set was actually established with a completed
|
|
|
|
// handover protocol
|
2023-10-12 03:42:15 +00:00
|
|
|
if Self::handover_completed(network, current_session) {
|
2023-10-10 10:53:24 +00:00
|
|
|
Pallet::<T>::new_set(network);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-14 20:47:25 +00:00
|
|
|
pub fn retire_set(set: ValidatorSet) {
|
2023-10-11 03:55:59 +00:00
|
|
|
MuSigKeys::<T>::remove(set);
|
|
|
|
Keys::<T>::remove(set);
|
2023-10-14 20:47:25 +00:00
|
|
|
Pallet::<T>::deposit_event(Event::SetRetired { set });
|
2023-10-11 03:55:59 +00:00
|
|
|
}
|
2023-10-12 03:42:15 +00:00
|
|
|
|
|
|
|
/// Take the amount deallocatable.
|
|
|
|
///
|
|
|
|
/// `session` refers to the Session the stake becomes deallocatable on.
|
2023-10-22 07:59:21 +00:00
|
|
|
fn take_deallocatable_amount(
|
2023-10-12 03:42:15 +00:00
|
|
|
network: NetworkId,
|
|
|
|
session: Session,
|
|
|
|
key: Public,
|
|
|
|
) -> Option<Amount> {
|
|
|
|
// Check this Session has properly started, completing the handover from the prior session.
|
|
|
|
if !Self::handover_completed(network, session) {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
PendingDeallocations::<T>::take((network, session, key))
|
|
|
|
}
|
2023-10-10 10:53:24 +00:00
|
|
|
}
|
2023-10-22 07:59:21 +00:00
|
|
|
|
|
|
|
#[pallet::call]
|
|
|
|
impl<T: Config> Pallet<T> {
|
|
|
|
#[pallet::call_index(0)]
|
|
|
|
#[pallet::weight(0)] // TODO
|
|
|
|
pub fn set_keys(
|
|
|
|
origin: OriginFor<T>,
|
|
|
|
network: NetworkId,
|
|
|
|
key_pair: KeyPair,
|
|
|
|
signature: Signature,
|
|
|
|
) -> DispatchResult {
|
|
|
|
ensure_none(origin)?;
|
|
|
|
|
|
|
|
// signature isn't checked as this is an unsigned transaction, and validate_unsigned
|
|
|
|
// (called by pre_dispatch) checks it
|
|
|
|
let _ = signature;
|
|
|
|
|
|
|
|
let session = Session(pallet_session::Pallet::<T>::current_index());
|
|
|
|
|
|
|
|
let set = ValidatorSet { session, network };
|
|
|
|
|
|
|
|
Keys::<T>::set(set, Some(key_pair.clone()));
|
|
|
|
Self::deposit_event(Event::KeyGen { set, key_pair });
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::call_index(1)]
|
|
|
|
#[pallet::weight(0)] // TODO
|
|
|
|
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
|
|
|
let validator = ensure_signed(origin)?;
|
|
|
|
Coins::<T>::transfer_internal(
|
|
|
|
validator,
|
|
|
|
Self::account(),
|
|
|
|
Balance { coin: Coin::Serai, amount },
|
|
|
|
)?;
|
|
|
|
Self::increase_allocation(network, validator, amount)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::call_index(2)]
|
|
|
|
#[pallet::weight(0)] // TODO
|
|
|
|
pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
|
|
|
let account = ensure_signed(origin)?;
|
|
|
|
|
|
|
|
let can_immediately_deallocate = Self::decrease_allocation(network, account, amount)?;
|
|
|
|
if can_immediately_deallocate {
|
|
|
|
Coins::<T>::transfer_internal(
|
|
|
|
Self::account(),
|
|
|
|
account,
|
|
|
|
Balance { coin: Coin::Serai, amount },
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::call_index(3)]
|
|
|
|
#[pallet::weight((0, DispatchClass::Operational))] // TODO
|
|
|
|
pub fn claim_deallocation(
|
|
|
|
origin: OriginFor<T>,
|
|
|
|
network: NetworkId,
|
|
|
|
session: Session,
|
|
|
|
) -> DispatchResult {
|
|
|
|
let account = ensure_signed(origin)?;
|
|
|
|
let Some(amount) = Self::take_deallocatable_amount(network, session, account) else {
|
|
|
|
Err(Error::<T>::NonExistentDeallocation)?
|
|
|
|
};
|
|
|
|
Coins::<T>::transfer_internal(
|
|
|
|
Self::account(),
|
|
|
|
account,
|
|
|
|
Balance { coin: Coin::Serai, amount },
|
|
|
|
)?;
|
2023-10-22 09:37:23 +00:00
|
|
|
Self::deposit_event(Event::DeallocationClaimed { validator: account, network, session });
|
2023-10-22 07:59:21 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pallet::validate_unsigned]
|
|
|
|
impl<T: Config> ValidateUnsigned for Pallet<T> {
|
|
|
|
type Call = Call<T>;
|
|
|
|
|
|
|
|
fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {
|
|
|
|
// Match to be exhaustive
|
|
|
|
let (network, key_pair, signature) = match call {
|
|
|
|
Call::set_keys { network, ref key_pair, ref signature } => (network, key_pair, signature),
|
|
|
|
Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => {
|
|
|
|
Err(InvalidTransaction::Call)?
|
|
|
|
}
|
|
|
|
Call::__Ignore(_, _) => unreachable!(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let session = Session(pallet_session::Pallet::<T>::current_index());
|
|
|
|
|
|
|
|
let set = ValidatorSet { session, network: *network };
|
|
|
|
match Self::verify_signature(set, key_pair, signature) {
|
|
|
|
Err(Error::AlreadyGeneratedKeys) => Err(InvalidTransaction::Stale)?,
|
|
|
|
Err(Error::NonExistentValidatorSet) |
|
|
|
|
Err(Error::InsufficientAllocation) |
|
|
|
|
Err(Error::NotEnoughAllocated) |
|
|
|
|
Err(Error::AllocationWouldRemoveFaultTolerance) |
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 23:26:13 +00:00
|
|
|
Err(Error::AllocationWouldPreventFaultTolerance) |
|
2023-10-22 07:59:21 +00:00
|
|
|
Err(Error::DeallocationWouldRemoveParticipant) |
|
|
|
|
Err(Error::DeallocationWouldRemoveFaultTolerance) |
|
|
|
|
Err(Error::NonExistentDeallocation) |
|
|
|
|
Err(Error::NonExistentValidator) |
|
|
|
|
Err(Error::BadSignature) => Err(InvalidTransaction::BadProof)?,
|
|
|
|
Err(Error::__Ignore(_, _)) => unreachable!(),
|
|
|
|
Ok(()) => (),
|
|
|
|
}
|
|
|
|
|
|
|
|
ValidTransaction::with_tag_prefix("validator-sets")
|
|
|
|
.and_provides(set)
|
|
|
|
// Set a 10 block longevity, though this should be included in the next block
|
|
|
|
.longevity(10)
|
|
|
|
.propagate(true)
|
|
|
|
.build()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Explicitly provide a pre-dispatch which calls validate_unsigned
|
|
|
|
fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
|
|
|
|
Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ()).map_err(Into::into)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call order is end_session(i - 1) -> start_session(i) -> new_session(i + 1)
|
|
|
|
// new_session(i + 1) is called immediately after start_session(i)
|
|
|
|
// then we wait until the session ends then get a call to end_session(i) and so on.
|
|
|
|
impl<T: Config> pallet_session::SessionManager<T::ValidatorId> for Pallet<T> {
|
|
|
|
fn new_session(_new_index: u32) -> Option<Vec<T::ValidatorId>> {
|
|
|
|
Self::new_session();
|
|
|
|
// TODO: Where do we return their stake?
|
|
|
|
Some(Self::participants(NetworkId::Serai).into())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn new_session_genesis(_: u32) -> Option<Vec<T::ValidatorId>> {
|
|
|
|
// TODO: Because we don't call new_session here, we don't emit NewSet { Serai, session: 1 }
|
|
|
|
Some(Self::participants(NetworkId::Serai).into())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn end_session(end_index: u32) {
|
|
|
|
Self::retire_set(ValidatorSet { network: NetworkId::Serai, session: Session(end_index) })
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_session(_start_index: u32) {}
|
|
|
|
}
|
2023-01-05 03:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub use pallet::*;
|