Add a Sessions abstraction for validator-sets storage

This commit is contained in:
Luke Parker 2025-03-07 04:02:11 -05:00
parent 3fc00830de
commit 02afed13b4
No known key found for this signature in database
3 changed files with 455 additions and 156 deletions
substrate
primitives/src/validator_sets
validator-sets/src

View file

@ -16,10 +16,18 @@ pub use slashes::*;
/// The type used to identify a specific session of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct Session(pub u32);
/// The type used to identify a specific set of validators for an external network.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct ExternalValidatorSet {
/// The network this set of validators are for.
pub network: ExternalNetworkId,
@ -29,6 +37,10 @@ pub struct ExternalValidatorSet {
/// The type used to identify a specific set of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct ValidatorSet {
/// The network this set of validators are for.
pub network: NetworkId,
@ -84,27 +96,11 @@ impl ExternalValidatorSet {
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the worst
/// validators lose their key shares first.
pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u64>();
pub fn amortize_excess_key_shares(validators: &mut [(sp_core::sr25519::Public, u64)]) {
let total_key_shares = validators.iter().map(|(_key, shares)| shares).sum::<u64>();
for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
}
}
/// Returns the post-amortization key shares for the top validator.
///
/// May panic when `validators == 0` or
/// `(top_validator_key_shares * validators) < total_key_shares`.
pub fn post_amortization_key_shares_for_top_validator(
validators: usize,
top_validator_key_shares: u64,
total_key_shares: u64,
) -> u64 {
let excess = total_key_shares.saturating_sub(MAX_KEY_SHARES_PER_SET.into());
// Since the top validator is amortized last, the question is how many complete iterations of
// the round robin occur
let round_robin_iterations = excess / u64::try_from(validators).unwrap();
top_validator_key_shares - round_robin_iterations
}

View file

@ -1,10 +1,70 @@
use core::marker::PhantomData;
use sp_core::{Encode, sr25519::Public};
use serai_primitives::{constants::MAX_KEY_SHARES_PER_SET, network_id::NetworkId, balance::Amount};
use frame_support::storage::{StorageMap, StoragePrefixedMap as Spm};
use frame_support::storage::{StorageMap, StoragePrefixedMap};
/// The key to use for the allocations map.
type AllocationsKey = (NetworkId, Public);
/// The key to use for the sorted allocations map.
type SortedAllocationsKey = (NetworkId, [u8; 8], [u8; 16], Public);
/// The storage underlying `Allocations`.
///
/// This storage is expected to be owned by the `Allocations` interface and not directly read/write
/// to.
pub(crate) trait AllocationsStorage {
/// An opaque map storing allocations.
type Allocations: StorageMap<AllocationsKey, Amount, Query = Option<Amount>>;
/// An opaque map storing allocations in a sorted manner.
///
/// This MUST be instantiated with a map using `Identity` for its hasher.
/*
This is premised on the underlying trie iterating from keys with low-bytes to keys with
high-bytes.
We use Identity so we don't have a hasher add pseudorandom bytes to the start of the keys. This
does remove the protection using a hash algorithm here offers against spam attacks (by flooding
the DB with layers, increasing lookup time and Merkle proof sizes, not that we use Merkle
proofs as Polkadot does).
Since amounts are represented with just 8 bytes, only 16 nibbles are present. This caps the
potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles). While
there is an entire 32-byte public key after this, a Blake hash of the key is inserted after the
amount to prevent the key from also being used to cause layer spam. We use a `[u8; 16]` to
represent this, and not a explicit `Blake2_128Concat` hasher, to ensure all prior keys are part
part of the hash. A Substrate-hasher would only hash the immediately following key.
There's also a minimum stake requirement, which further reduces the potential for spam.
*/
type SortedAllocations: StorageMap<SortedAllocationsKey, (), Query = Option<()>>
+ StoragePrefixedMap<()>;
}
/// An interface for managing validators' allocations.
pub(crate) trait Allocations {
/// Set an allocation.
///
/// Returns the validator's prior allocation.
fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> Option<Amount>;
/// Get an allocation.
fn get_allocation(network: NetworkId, key: Public) -> Option<Amount>;
/// Iterate over allocations for a network, yielding the highest-valued allocations.
///
/// This will yield all validators present whose allocation is greater than or equal to the
/// specified minimum.
///
/// If two validators share an allocation, the order is deterministic yet otherwise undefined.
fn iter_allocations(
network: NetworkId,
minimum_allocation: Amount,
) -> impl Iterator<Item = (Public, Amount)>;
/// Calculate the expected key shares for a network, per the current allocations.
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64;
}
/// Reverses the lexicographic order of a given byte array.
///
@ -17,159 +77,92 @@ fn reverse_lexicographic_order<const N: usize>(bytes: [u8; N]) -> [u8; N] {
res
}
/// The key to use for the allocations map.
type AllocationsKey = (NetworkId, Public);
/// The key to use for the sorted allocations map.
type SortedAllocationsKey = (NetworkId, [u8; 8], [u8; 16], Public);
/// The storage key to use with the sorted allocations map.
#[inline]
fn sorted_allocation_storage_key(
network: NetworkId,
key: Public,
amount: Amount,
) -> (NetworkId, [u8; 8], [u8; 16], Public) {
// We want the accounts with the highest allocations to be first. Since the DB iterates from
// low to high, we take the BE bytes of the amount (meaning the lowest-value allocations have
// the lowest lexicographic order and will be first), then reverse their order.
let amount = reverse_lexicographic_order(amount.0.to_be_bytes());
// Hash all of the keys to best defend against layer-spam attacks
let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode());
(network, amount, hash, key)
}
/// An interface for managing validators' allocations.
///
/// `SortedAllocationsMap` MUST be instantiated with a map using `Identity` for its hasher.
/*
This is premised on the underlying trie iterating from keys with low-bytes to keys with
high-bytes.
// Recover the user's public key from a storage key.
fn recover_key_from_sorted_allocation_storage_key(key: &[u8]) -> Public {
<Public as From<[u8; 32]>>::from(key[(key.len() - 32) ..].try_into().unwrap())
}
We use Identity so we don't have a hasher add pseudorandom bytes to the start of the keys. This
does remove the protection using a hash algorithm here offers against spam attacks (by flooding
the DB with layers, increasing lookup time and Merkle proof sizes, not that we use Merkle proofs
proofs as Polkadot does).
// Recover the amount allocated from a storage key.
fn recover_amount_from_sorted_allocation_storage_key(key: &[u8]) -> Amount {
// We read the amount from the end of the key as everything after the amount is fixed-length
let distance_from_end = 8 + 16 + 32;
let start_pos = key.len() - distance_from_end;
let raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap();
// Take advantage of how this is a bijective mapping
let raw = reverse_lexicographic_order(raw);
Amount(u64::from_be_bytes(raw))
}
Since amounts are represented with just 8 bytes, only 16 nibbles are present. This caps the
potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles). While
there is an entire 32-byte public key after this, a Blake hash of the key is inserted after the
amount to prevent the key from also being used to cause layer spam. We use a `[u8; 16]` to
represent this, and not a explicit `Blake2_128Concat` hasher, to ensure all prior keys are part
part of the hash. A Substrate-hasher would only hash the immediately following key.
There's also a minimum stake requirement, which further reduces the potential for spam.
*/
pub(crate) struct Allocations<
AllocationsMap: StorageMap<AllocationsKey, Amount, Query = Option<Amount>>,
SortedAllocationsMap: StorageMap<SortedAllocationsKey, (), Query = Option<()>> + Spm<()>,
>(PhantomData<(AllocationsMap, SortedAllocationsMap)>);
impl<
AllocationsMap: StorageMap<AllocationsKey, Amount, Query = Option<Amount>>,
SortedAllocationsMap: StorageMap<SortedAllocationsKey, (), Query = Option<()>> + Spm<()>,
> Allocations<AllocationsMap, SortedAllocationsMap>
{
/// The storage key to use with the sorted allocations map.
#[inline]
fn sorted_allocation_storage_key(
network: NetworkId,
key: Public,
amount: Amount,
) -> (NetworkId, [u8; 8], [u8; 16], Public) {
// We want the accounts with the highest allocations to be first. Since the DB iterates from
// low to high, we take the BE bytes of the amount (meaning the lowest-value allocations have
// the lowest lexicographic order and will be first), then reverse their order.
let amount = reverse_lexicographic_order(amount.0.to_be_bytes());
// Hash all of the keys to best defend against layer-spam attacks
let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode());
(network, amount, hash, key)
}
// Recover the user's public key from a storage key.
fn recover_key_from_sorted_allocation_storage_key(key: &[u8]) -> Public {
<Public as From<[u8; 32]>>::from(key[(key.len() - 32) ..].try_into().unwrap())
}
// Recover the amount allocated from a storage key.
fn recover_amount_from_sorted_allocation_storage_key(key: &[u8]) -> Amount {
// We read the amount from the end of the key as everything after the amount is fixed-length
let distance_from_end = 8 + 16 + 32;
let start_pos = key.len() - distance_from_end;
let raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap();
// Take advantage of how this is a bijective mapping
let raw = reverse_lexicographic_order(raw);
Amount(u64::from_be_bytes(raw))
}
/// Set an allocation.
///
/// Returns the validator's prior allocation.
pub(crate) fn set(network: NetworkId, key: Public, amount: Amount) -> Option<Amount> {
let prior = AllocationsMap::take((network, key));
impl<Storage: AllocationsStorage> Allocations for Storage {
fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> Option<Amount> {
// Remove their existing allocation, if one exists
let prior = Storage::Allocations::take((network, key));
if let Some(amount) = prior {
SortedAllocationsMap::remove(Self::sorted_allocation_storage_key(network, key, amount));
Storage::SortedAllocations::remove(sorted_allocation_storage_key(network, key, amount));
}
// If we're setting a non-zero allocation, add it back to the maps
if amount.0 != 0 {
AllocationsMap::set((network, key), Some(amount));
SortedAllocationsMap::set(
Self::sorted_allocation_storage_key(network, key, amount),
Storage::Allocations::set((network, key), Some(amount));
Storage::SortedAllocations::set(
sorted_allocation_storage_key(network, key, amount),
Some(()),
);
}
prior
}
/// Get an allocation.
pub(crate) fn get(network: NetworkId, key: Public) -> Option<Amount> {
AllocationsMap::get((network, key))
fn get_allocation(network: NetworkId, key: Public) -> Option<Amount> {
Storage::Allocations::get((network, key))
}
/// Iterate over allocations for a network, yielding the highest-valued allocations.
///
/// This will yield all validators present whose allocation is greater than or equal to the
/// specified minimum.
///
/// If two validators share an allocation, the order is deterministic yet otherwise undefined.
pub(crate) fn iter(
fn iter_allocations(
network: NetworkId,
minimum_allocation: Amount,
) -> impl Iterator<Item = (Public, Amount)> {
let mut prefix = SortedAllocationsMap::final_prefix().to_vec();
// Iterate over the sorted allocations for this network
let mut prefix = Storage::SortedAllocations::final_prefix().to_vec();
prefix.extend(&network.encode());
// Decode the read keys into (key, amount) tuples
frame_support::storage::PrefixIterator::<_, ()>::new(prefix.clone(), prefix, |key, _value| {
Ok((
Self::recover_key_from_sorted_allocation_storage_key(key),
Self::recover_amount_from_sorted_allocation_storage_key(key),
recover_key_from_sorted_allocation_storage_key(key),
recover_amount_from_sorted_allocation_storage_key(key),
))
})
// Filter by the specified minimum allocation
.filter(move |(_key, allocation)| *allocation >= minimum_allocation)
}
/// Check if a fresh sample will be BFT for f > 0.
pub(crate) fn will_be_bft_for_any_nonzero_f(
network: NetworkId,
allocation_per_key_share: Amount,
) -> bool {
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64 {
let mut validators_len = 0;
let mut top_validator_key_shares = None;
let mut total_key_shares = 0;
for (_, amount) in Self::iter(network, allocation_per_key_share) {
for (_, amount) in Self::iter_allocations(network, allocation_per_key_share) {
validators_len += 1;
let key_shares = amount.0 / allocation_per_key_share.0;
total_key_shares += key_shares;
// If this is the first validator, they're the top validator, due to this being sorted
if top_validator_key_shares.is_none() {
top_validator_key_shares = Some(key_shares);
}
if total_key_shares > u64::from(MAX_KEY_SHARES_PER_SET) {
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
break;
}
}
let Some(top_validator_key_shares) = top_validator_key_shares else {
// This network has n = 0 so f = 0
return false;
};
// `total_key_shares` may exceed `MAX_KEY_SHARES_PER_SET`, which will cause a round robin
// reduction of each validator's key shares until their sum is `MAX_KEY_SHARES_PER_SET`.
// `post_amortization_key_shares_for_top_validator` yields what the top validator's key shares
// would be after such a reduction, letting us evaluate this correctly
let top_validator_key_shares =
serai_primitives::validator_sets::post_amortization_key_shares_for_top_validator(
validators_len,
top_validator_key_shares,
total_key_shares,
);
let total_key_shares = total_key_shares.min(MAX_KEY_SHARES_PER_SET.into());
// We achieve BFT under n=3f+1. Accordingly, for the top validator's key shares to be `f`, and
// still have `3f < n`, we tolerate the top validator being faulty
(top_validator_key_shares * 3) < total_key_shares
total_key_shares
}
}
@ -250,7 +243,7 @@ fn test_allocations() {
"Allocations"
}
const STORAGE_PREFIX: &'static str = "AllocationsMap";
const STORAGE_PREFIX: &'static str = "Storage::Allocations";
}
type AllocationsMap =
StorageMap<Storage, Blake2_128Concat, AllocationsKey, Amount, OptionQuery>;
@ -261,11 +254,17 @@ fn test_allocations() {
"Allocations"
}
const STORAGE_PREFIX: &'static str = "SortedAllocationsMap";
const STORAGE_PREFIX: &'static str = "Storage::SortedAllocations";
}
type SortedAllocationsMap =
StorageMap<StorageSorted, Identity, SortedAllocationsKey, (), OptionQuery>;
struct Allocations;
impl AllocationsStorage for Allocations {
type Allocations = AllocationsMap;
type SortedAllocations = SortedAllocationsMap;
}
let before = NetworkId::deserialize_reader(&mut [0].as_slice()).unwrap();
let network = NetworkId::deserialize_reader(&mut [1].as_slice()).unwrap();
let after = NetworkId::deserialize_reader(&mut [2].as_slice()).unwrap();
@ -283,10 +282,7 @@ fn test_allocations() {
for _ in 0 .. ALLOCATIONS {
let (key, amount) = rand_allocation();
allocations.push((key, amount));
assert_eq!(
Allocations::<AllocationsMap, SortedAllocationsMap>::set(network, key, amount),
None
);
assert_eq!(Allocations::set_allocation(network, key, amount), None);
}
// Sort them from highest amount to lowest
allocations.sort_by_key(|item| item.1);
@ -296,19 +292,13 @@ fn test_allocations() {
// these allocations. This ensures we don't read from another network accidentally
{
let (key, amount) = rand_allocation();
assert_eq!(
Allocations::<AllocationsMap, SortedAllocationsMap>::set(before, key, amount),
None
);
assert_eq!(
Allocations::<AllocationsMap, SortedAllocationsMap>::set(after, key, amount),
None
);
assert_eq!(Allocations::set_allocation(before, key, amount), None);
assert_eq!(Allocations::set_allocation(after, key, amount), None);
}
// Check the iterator works
{
let mut a = Allocations::<AllocationsMap, SortedAllocationsMap>::iter(network, Amount(0));
let mut a = Allocations::iter_allocations(network, Amount(0));
let mut b = allocations.clone().into_iter();
for _ in 0 .. ALLOCATIONS {
assert_eq!(a.next(), b.next());
@ -320,11 +310,11 @@ fn test_allocations() {
// Check the minimum works
{
assert_eq!(
Allocations::<AllocationsMap, SortedAllocationsMap>::iter(network, allocations[0].1).next(),
Allocations::iter_allocations(network, allocations[0].1).next(),
Some(allocations[0])
);
assert_eq!(
Allocations::<AllocationsMap, SortedAllocationsMap>::iter(
Allocations::iter_allocations(
network,
// Fails with probability ~1/2**57
(allocations[0].1 + Amount(1)).unwrap()

View file

@ -0,0 +1,313 @@
use sp_core::{Encode, Decode, ConstU32, sr25519::Public, bounded::BoundedVec};
use serai_primitives::{
constants::{MAX_KEY_SHARES_PER_SET, MAX_KEY_SHARES_PER_SET_U32},
network_id::NetworkId,
balance::Amount,
validator_sets::{Session, ValidatorSet, amortize_excess_key_shares},
};
use frame_support::storage::{StorageValue, StorageMap, StoragePrefixedMap};
use crate::allocations::*;
/// The list of genesis validators.
type GenesisValidators = BoundedVec<Public, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>;
/// The key for the SelectedValidators map.
type SelectedValidatorsKey = (ValidatorSet, [u8; 16], Public);
pub(crate) trait SessionsStorage: AllocationsStorage {
/// The genesis validators
///
/// The usage of is shared with the rest of the pallet. `Sessions` only reads it.
type GenesisValidators: StorageValue<GenesisValidators, Query = GenesisValidators>;
/// The allocation required for a key share.
///
/// The usage of is shared with the rest of the pallet. `Sessions` only reads it.
type AllocationPerKeyShare: StorageMap<NetworkId, Amount, Query = Option<Amount>>;
/// The current session.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type CurrentSession: StorageMap<NetworkId, Session, Query = Option<Session>>;
/// The latest session which has been decided.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type LatestDecidedSession: StorageMap<NetworkId, Session, Query = Option<Session>>;
/// The selected validators for a set.
///
/// This MUST be instantiated with a map using `Identity` for its hasher.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
// The value is how many key shares the validator has.
type SelectedValidators: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>;
/// The total allocated stake for a network.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type TotalAllocatedStake: StorageMap<NetworkId, Amount, Query = Option<Amount>>;
}
/// The storage key for the SelectedValidators map.
fn selected_validators_key(set: ValidatorSet, key: Public) -> SelectedValidatorsKey {
let hash = sp_io::hashing::blake2_128(&(set, key).encode());
(set, hash, key)
}
fn selected_validators<Storage: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>>(
set: ValidatorSet,
) -> impl Iterator<Item = (Public, u64)> {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
frame_support::storage::PrefixIterator::<_, ()>::new(
prefix.clone(),
prefix,
|key, mut key_shares| {
Ok((
// Recover the validator's key from the storage key
<[u8; 32]>::try_from(&key[(key.len() - 32) ..]).unwrap().into(),
// Decode the key shares from the value
u64::decode(&mut key_shares).unwrap(),
))
},
)
}
fn clear_selected_validators<
Storage: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>,
>(
set: ValidatorSet,
) {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
assert!(matches!(
sp_io::storage::clear_prefix(&prefix, None),
sp_io::KillStorageResult::AllRemoved(_)
));
}
pub(crate) enum AllocationError {
NoAllocationPerKeyShareSet,
AllocationLessThanKeyShare,
IntroducesSinglePointOfFailure,
}
pub(crate) trait Sessions {
/// Attempt to spawn a new session for the specified network.
///
/// Validators will be selected by their allocations if `AllocationPerKeyShare` is set for this
/// network. `include_genesis_validators` will cause genesis validators to be included *with
/// greater priority than non-genesis validators*.
///
/// Doesn't spawn the next session if the latest decided session has yet to start. This bounds
/// the current session to be the latest decided session or the one prior.
fn attempt_new_session(network: NetworkId, include_genesis_validators: bool);
/// Have the latest-decided session accept the handover from the current set, if one exists.
///
/// Every decided set must accept the handover to become current.
///
/// May panic if the latest-decided session is already the current session, or if there was no
/// latest-decided session.
fn accept_handover(network: NetworkId);
/// Retire a validator set.
///
/// This MUST be called only for sessions which are no longer current.
fn retire(set: ValidatorSet);
/// Increase a validator's allocation.
///
/// This does not perform any transfers of any coins/tokens. It solely performs the book-keeping
/// of it.
fn increase_allocation(
network: NetworkId,
validator: Public,
amount: Amount,
) -> Result<(), AllocationError>;
}
impl<Storage: SessionsStorage> Sessions for Storage {
fn attempt_new_session(network: NetworkId, include_genesis_validators: bool) {
// If we haven't rotated to the latest decided session, return
// This prevents us from deciding session #n+2 when we haven't even started #n+1
let current_session = Storage::CurrentSession::get(network);
match (current_session, Storage::LatestDecidedSession::get(network)) {
(Some(current), Some(latest)) => {
if current == latest {
// If the latest decided session is current, we can decide the next session
} else {
// If we already have a pending session, don't spawn a new one
return;
}
}
(Some(current), None) => unreachable!("current session but never decided a session"),
// If we decided our first session, but didn't start it, don't decide another session
(None, Some(latest)) => return,
(None, None) => {
// If we've never started a session, we can decide the first session
}
}
let mut selected_validators = Vec::with_capacity(usize::from(MAX_KEY_SHARES_PER_SET / 2));
let mut total_key_shares = 0;
if let Some(allocation_per_key_share) = Storage::AllocationPerKeyShare::get(network) {
for (validator, amount) in Self::iter_allocations(network, allocation_per_key_share) {
// If this allocation is absurd, causing this to not fit within a u16, bound to the max
let key_shares = amount.0 / allocation_per_key_share.0;
selected_validators.push((validator, key_shares));
// We're tracking key shares as a u64 yet the max allowed is a u16, so this won't overflow
total_key_shares += key_shares;
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
break;
}
}
}
// Perform amortization if we've exceeded the maximum amount of key shares
// This is guaranteed not to cause any validators have zero key shares as we'd only be over if
// the last-added (worst) validator had multiple key shares, meaning everyone has more shares
// than we'll amortize here
amortize_excess_key_shares(selected_validators.as_mut_slice());
if include_genesis_validators {
let mut genesis_validators = Storage::GenesisValidators::get()
.into_iter()
.map(|validator| (validator, 1))
.collect::<Vec<_>>();
let genesis_validator_key_shares = u64::try_from(genesis_validators.len()).unwrap();
while (total_key_shares + genesis_validator_key_shares) > u64::from(MAX_KEY_SHARES_PER_SET) {
let (_key, key_shares) = selected_validators.pop().unwrap();
total_key_shares -= key_shares;
}
selected_validators.append(&mut genesis_validators);
total_key_shares += genesis_validator_key_shares;
}
// We kept this accurate but don't actually further read from it
let _ = total_key_shares;
let latest_decided_session = Storage::LatestDecidedSession::mutate(network, |session| {
let next_session = session.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
*session = Some(next_session);
next_session
});
let latest_decided_set = ValidatorSet { network, session: latest_decided_session };
for (key, key_shares) in selected_validators {
Storage::SelectedValidators::insert(
selected_validators_key(latest_decided_set, key),
key_shares,
);
}
}
fn accept_handover(network: NetworkId) {
let current = {
let current = Storage::CurrentSession::get(network);
let latest_decided = Storage::LatestDecidedSession::get(network)
.expect("accepting handover but never decided a session");
assert_eq!(
current,
latest_decided.0.checked_sub(1).map(Session),
"current session wasn't prior to latest-decided"
);
// Set the CurrentSession variable
Storage::CurrentSession::set(network, Some(latest_decided));
// Return `latest_decided` as `current` as it is now current
latest_decided
};
let mut total_allocated_stake = Amount(0);
for (key, _key_shares) in
selected_validators::<Storage::SelectedValidators>(ValidatorSet { network, session: current })
{
// Safe so long as the SRI supply fits within a u64
total_allocated_stake =
(total_allocated_stake + Self::get_allocation(network, key).unwrap_or(Amount(0))).unwrap();
}
// Update the total allocated stake variable to the current session
Storage::TotalAllocatedStake::set(network, Some(total_allocated_stake));
}
fn retire(set: ValidatorSet) {
assert!(
Some(set.session).map(|session| session.0) <
Storage::CurrentSession::get(set.network).map(|session| session.0),
"retiring a set which is active/upcoming"
);
// Clean-up this set's storage
clear_selected_validators::<Storage::SelectedValidators>(set);
}
fn increase_allocation(
network: NetworkId,
validator: Public,
amount: Amount,
) -> Result<(), AllocationError> {
let Some(allocation_per_key_share) = Storage::AllocationPerKeyShare::get(network) else {
Err(AllocationError::NoAllocationPerKeyShareSet)?
};
let old_allocation = Self::get_allocation(network, validator).unwrap_or(Amount(0));
// Safe so long as the SRI supply fits within a u64, per assumptions on how this is called
let new_allocation = (old_allocation + amount).unwrap();
if new_allocation < allocation_per_key_share {
Err(AllocationError::AllocationLessThanKeyShare)?
}
/*
If the validator set has a single point of failure, the following does nothing. If the
validator set has decentralized and doesn't have a single point of failure, the following
will ensure this allocation doesn't create a single point of failure.
*/
{
// Check the validator set's current expected key shares
let expected_key_shares = Self::expected_key_shares(network, allocation_per_key_share);
// Check if the top validator in this set may be faulty under this f
let top_validator_may_be_faulty = if let Some(top_validator) =
Self::iter_allocations(network, allocation_per_key_share).next()
{
let (_key, amount) = top_validator;
let key_shares = amount.0 / allocation_per_key_share.0;
key_shares <= (expected_key_shares / 3)
} else {
// If there are no validators, we claim the top validator may not be faulty so the
// following check doesn't run
false
};
if top_validator_may_be_faulty {
let old_key_shares = old_allocation.0 / allocation_per_key_share.0;
let new_key_shares = new_allocation.0 / allocation_per_key_share.0;
// Update the amount of expected key shares per the key shares added
let expected_key_shares = (expected_key_shares + (new_key_shares - old_key_shares))
.min(u64::from(MAX_KEY_SHARES_PER_SET));
// If the new key shares exceeds the fault tolerance, don't allow the allocation
if new_key_shares > (expected_key_shares / 3) {
Err(AllocationError::IntroducesSinglePointOfFailure)?
}
}
}
Self::set_allocation(network, validator, new_allocation);
// If this validator is active, update `TotalAllocatedStake`
if let Some(current) = Storage::CurrentSession::get(network) {
if Storage::SelectedValidators::contains_key(selected_validators_key(
ValidatorSet { network, session: current },
validator,
)) {
Storage::TotalAllocatedStake::mutate(network, |existing| {
Some(
(existing.expect("current session but no total allocated stake set") + amount).unwrap(),
)
});
}
}
Ok(())
}
}