Add validator set rotation test for the node side (#532)

* add node side unit test

* complete rotation test for all networks

* set up the fast-epoch docker file

* fix pr comments
This commit is contained in:
akildemir 2024-02-24 22:51:06 +03:00 committed by GitHub
parent 019b42c0e0
commit 627e7e6210
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 305 additions and 12 deletions

1
.gitignore vendored
View file

@ -1,5 +1,6 @@
target target
Dockerfile Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile !orchestration/runtime/Dockerfile
.test-logs .test-logs

View file

@ -5,6 +5,8 @@ use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
pub fn serai(orchestration_path: &Path, network: Network) { pub fn serai(orchestration_path: &Path, network: Network) {
// Always builds in release for performance reasons // Always builds in release for performance reasons
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node"); let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
let setup_fast_epoch =
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
// TODO: Review the ports exposed here // TODO: Review the ports exposed here
let run_serai = format!( let run_serai = format!(
@ -24,10 +26,16 @@ CMD ["/run.sh"]
let run = os(Os::Debian, "", "serai") + &run_serai; let run = os(Os::Debian, "", "serai") + &run_serai;
let res = setup + &run; let res = setup + &run;
let res_fast_epoch = setup_fast_epoch + &run;
let mut serai_path = orchestration_path.to_path_buf(); let mut serai_path = orchestration_path.to_path_buf();
serai_path.push("serai"); serai_path.push("serai");
let mut serai_fast_epoch_path = serai_path.clone();
serai_path.push("Dockerfile"); serai_path.push("Dockerfile");
serai_fast_epoch_path.push("Dockerfile.fast-epoch");
write_dockerfile(serai_path, &res); write_dockerfile(serai_path, &res);
write_dockerfile(serai_fast_epoch_path, &res_fast_epoch);
} }

View file

@ -3,7 +3,7 @@ use thiserror::Error;
use async_lock::RwLock; use async_lock::RwLock;
use simple_request::{hyper, Request, Client}; use simple_request::{hyper, Request, Client};
use scale::{Encode, Decode, Compact}; use scale::{Compact, Decode, Encode};
use serde::{Serialize, Deserialize, de::DeserializeOwned}; use serde::{Serialize, Deserialize, de::DeserializeOwned};
pub use sp_core::{ pub use sp_core::{
@ -195,6 +195,16 @@ impl Serai {
Ok(()) Ok(())
} }
async fn active_network_validators(&self, network: NetworkId) -> Result<Vec<Public>, SeraiError> {
let hash: String = self
.call("state_call", ["SeraiRuntimeApi_validators".to_string(), hex::encode(network.encode())])
.await?;
let bytes = Self::hex_decode(hash)?;
let r = Vec::<Public>::decode(&mut bytes.as_slice())
.map_err(|e| SeraiError::ErrorInResponse(e.to_string()))?;
Ok(r)
}
pub async fn latest_finalized_block_hash(&self) -> Result<[u8; 32], SeraiError> { pub async fn latest_finalized_block_hash(&self) -> Result<[u8; 32], SeraiError> {
let hash: String = self.call("chain_getFinalizedHead", ()).await?; let hash: String = self.call("chain_getFinalizedHead", ()).await?;
Self::hex_decode(hash)?.try_into().map_err(|_| { Self::hex_decode(hash)?.try_into().map_err(|_| {

View file

@ -143,6 +143,29 @@ impl<'a> SeraiValidatorSets<'a> {
.await .await
} }
pub async fn pending_deallocations(
&self,
network: NetworkId,
account: Public,
session: Session,
) -> Result<Option<Amount>, SeraiError> {
self
.0
.storage(
PALLET,
"PendingDeallocations",
(sp_core::hashing::blake2_128(&(network, account).encode()), (network, account, session)),
)
.await
}
pub async fn active_network_validators(
&self,
network: NetworkId,
) -> Result<Vec<Public>, SeraiError> {
self.0.serai.active_network_validators(network).await
}
// TODO: Store these separately since we almost never need both at once? // TODO: Store these separately since we almost never need both at once?
pub async fn keys(&self, set: ValidatorSet) -> Result<Option<KeyPair>, SeraiError> { pub async fn keys(&self, set: ValidatorSet) -> Result<Option<KeyPair>, SeraiError> {
self.0.storage(PALLET, "Keys", (sp_core::hashing::twox_64(&set.encode()), set)).await self.0.storage(PALLET, "Keys", (sp_core::hashing::twox_64(&set.encode()), set)).await
@ -169,6 +192,14 @@ impl<'a> SeraiValidatorSets<'a> {
})) }))
} }
pub fn allocate(network: NetworkId, amount: Amount) -> serai_abi::Call {
serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::allocate { network, amount })
}
pub fn deallocate(network: NetworkId, amount: Amount) -> serai_abi::Call {
serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::deallocate { network, amount })
}
pub fn report_slashes( pub fn report_slashes(
network: NetworkId, network: NetworkId,
slashes: sp_runtime::BoundedVec< slashes: sp_runtime::BoundedVec<

View file

@ -1,9 +1,13 @@
use std::collections::HashMap; use std::collections::HashMap;
use serai_abi::primitives::NetworkId;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use sp_core::{Pair, sr25519::Signature}; use sp_core::{
sr25519::{Pair, Signature},
Pair as PairTrait,
};
use ciphersuite::{Ciphersuite, Ristretto}; use ciphersuite::{Ciphersuite, Ristretto};
use frost::dkg::musig::musig; use frost::dkg::musig::musig;
@ -15,7 +19,7 @@ use serai_client::{
primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message},
ValidatorSetsEvent, ValidatorSetsEvent,
}, },
SeraiValidatorSets, Serai, Amount, Serai, SeraiValidatorSets,
}; };
use crate::common::tx::publish_tx; use crate::common::tx::publish_tx;
@ -59,3 +63,29 @@ pub async fn set_keys(serai: &Serai, set: ValidatorSet, key_pair: KeyPair) -> [u
block block
} }
#[allow(dead_code)]
pub async fn allocate_stake(
serai: &Serai,
network: NetworkId,
amount: Amount,
pair: &Pair,
nonce: u32,
) -> [u8; 32] {
// get the call
let tx = serai.sign(&pair, SeraiValidatorSets::allocate(network, amount), nonce, 0);
publish_tx(serai, &tx).await
}
#[allow(dead_code)]
pub async fn deallocate_stake(
serai: &Serai,
network: NetworkId,
amount: Amount,
pair: &Pair,
nonce: u32,
) -> [u8; 32] {
// get the call
let tx = serai.sign(&pair, SeraiValidatorSets::deallocate(network, amount), nonce, 0);
publish_tx(serai, &tx).await
}

View file

@ -8,11 +8,13 @@ use serai_client::{
primitives::{Session, ValidatorSet, KeyPair}, primitives::{Session, ValidatorSet, KeyPair},
ValidatorSetsEvent, ValidatorSetsEvent,
}, },
Serai, Amount, Serai,
}; };
mod common; mod common;
use common::validator_sets::set_keys; use common::validator_sets::{set_keys, allocate_stake, deallocate_stake};
const EPOCH_INTERVAL: u64 = 5;
serai_test!( serai_test!(
set_keys_test: (|serai: Serai| async move { set_keys_test: (|serai: Serai| async move {
@ -73,3 +75,199 @@ serai_test!(
assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair)); assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair));
}) })
); );
#[tokio::test]
async fn validator_set_rotation() {
use dockertest::{
PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,
TestBodySpecification, DockerTest,
};
use std::collections::HashMap;
serai_docker_tests::build("serai-fast-epoch".to_string());
let handle = |name| format!("serai_client-serai_node-{name}");
let composition = |name| {
TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never),
)
.replace_cmd(vec![
"serai-node".to_string(),
"--unsafe-rpc-external".to_string(),
"--rpc-cors".to_string(),
"all".to_string(),
"--chain".to_string(),
"local".to_string(),
format!("--{name}"),
])
.replace_env(HashMap::from([("RUST_LOG=runtime".to_string(), "debug".to_string())]))
.set_publish_all_ports(true)
.set_handle(handle(name))
.set_start_policy(StartPolicy::Strict)
.set_log_options(Some(LogOptions {
action: LogAction::Forward,
policy: LogPolicy::Always,
source: LogSource::Both,
}))
};
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
test.provide_container(composition("alice"));
test.provide_container(composition("bob"));
test.provide_container(composition("charlie"));
test.provide_container(composition("dave"));
test.provide_container(composition("eve"));
test
.run_async(|ops| async move {
// Sleep until the Substrate RPC starts
let alice = handle("alice");
let alice_rpc = ops.handle(&alice).host_port(9944).unwrap();
let alice_rpc = format!("http://{}:{}", alice_rpc.0, alice_rpc.1);
// Sleep for some time
tokio::time::sleep(core::time::Duration::from_secs(20)).await;
let serai = Serai::new(alice_rpc.clone()).await.unwrap();
// Make sure the genesis is as expected
assert_eq!(
serai
.as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash())
.validator_sets()
.new_set_events()
.await
.unwrap(),
NETWORKS
.iter()
.copied()
.map(|network| ValidatorSetsEvent::NewSet {
set: ValidatorSet { session: Session(0), network }
})
.collect::<Vec<_>>(),
);
// genesis accounts
let pair1 = insecure_pair_from_name("Alice");
let pair2 = insecure_pair_from_name("Bob");
let pair3 = insecure_pair_from_name("Charlie");
let pair4 = insecure_pair_from_name("Dave");
let pair5 = insecure_pair_from_name("Eve");
// amounts for single key share per network
let key_shares = HashMap::from([
(NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),
(NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))),
(NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))),
(NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))),
]);
// genesis participants per network
let default_participants =
vec![pair1.public(), pair2.public(), pair3.public(), pair4.public()];
let mut participants = HashMap::from([
(NetworkId::Serai, default_participants.clone()),
(NetworkId::Bitcoin, default_participants.clone()),
(NetworkId::Monero, default_participants.clone()),
(NetworkId::Ethereum, default_participants),
]);
// test the set rotation
for (i, network) in NETWORKS.into_iter().enumerate() {
let participants = participants.get_mut(&network).unwrap();
// we start the chain with 4 default participants that has a single key share each
participants.sort();
verify_session_and_active_validators(&serai, network, 0, &participants).await;
// add 1 participant & verify
let hash =
allocate_stake(&serai, network, key_shares[&network], &pair5, i.try_into().unwrap())
.await;
participants.push(pair5.public());
participants.sort();
verify_session_and_active_validators(
&serai,
network,
get_active_session(&serai, network, hash).await,
&participants,
)
.await;
// remove 1 participant & verify
let hash =
deallocate_stake(&serai, network, key_shares[&network], &pair2, i.try_into().unwrap())
.await;
participants.swap_remove(participants.iter().position(|k| *k == pair2.public()).unwrap());
let active_session = get_active_session(&serai, network, hash).await;
participants.sort();
verify_session_and_active_validators(&serai, network, active_session, &participants).await;
// check pending deallocations
let pending = serai
.as_of_latest_finalized_block()
.await
.unwrap()
.validator_sets()
.pending_deallocations(
network,
pair2.public(),
Session(u32::try_from(active_session + 1).unwrap()),
)
.await
.unwrap();
assert_eq!(pending, Some(key_shares[&network]));
}
})
.await;
}
async fn verify_session_and_active_validators(
serai: &Serai,
network: NetworkId,
session: u64,
participants: &[Public],
) {
// wait untill the epoch block finalized
let epoch_block = (session * EPOCH_INTERVAL) + 1;
while serai.finalized_block_by_number(epoch_block).await.unwrap().is_none() {
// sleep 1 block
tokio::time::sleep(tokio::time::Duration::from_secs(6)).await;
}
let serai_for_block =
serai.as_of(serai.finalized_block_by_number(epoch_block).await.unwrap().unwrap().hash());
// verify session
let s = serai_for_block.validator_sets().session(network).await.unwrap().unwrap();
assert_eq!(u64::from(s.0), session);
// verify participants
let mut validators =
serai_for_block.validator_sets().active_network_validators(network).await.unwrap();
validators.sort();
assert_eq!(validators, participants);
// make sure finalization continues as usual after the changes
tokio::time::timeout(tokio::time::Duration::from_secs(60), async move {
let mut finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
while finalized_block <= epoch_block + 2 {
tokio::time::sleep(tokio::time::Duration::from_secs(6)).await;
finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
}
})
.await
.unwrap();
// TODO: verify key shares as well?
}
async fn get_active_session(serai: &Serai, network: NetworkId, hash: [u8; 32]) -> u64 {
let block_number = serai.block(hash).await.unwrap().unwrap().header.number;
let epoch = block_number / EPOCH_INTERVAL;
// changes should be active in the next session
if network == NetworkId::Serai {
// it takes 1 extra session for serai net to make the changes active.
epoch + 2
} else {
epoch + 1
}
}

View file

@ -75,6 +75,7 @@ substrate-build-script-utils = { git = "https://github.com/serai-dex/substrate"
[features] [features]
default = [] default = []
fast-epoch = ["serai-runtime/fast-epoch"]
runtime-benchmarks = [ runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks",

View file

@ -124,6 +124,8 @@ std = [
"pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment-rpc-runtime-api/std",
] ]
fast-epoch = []
runtime-benchmarks = [ runtime-benchmarks = [
"sp-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks",

View file

@ -313,8 +313,14 @@ pub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_
pub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration; pub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration;
impl babe::Config for Runtime { impl babe::Config for Runtime {
#[cfg(feature = "fast-epoch")]
#[allow(clippy::identity_op)] #[allow(clippy::identity_op)]
type EpochDuration = ConstU64<{ 1 * DAYS }>; type EpochDuration = ConstU64<{ DAYS / (24 * 60 * 2) }>; // 30 seconds
#[cfg(not(feature = "fast-epoch"))]
#[allow(clippy::identity_op)]
type EpochDuration = ConstU64<{ DAYS }>;
type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;
type EpochChangeTrigger = babe::ExternalTrigger; type EpochChangeTrigger = babe::ExternalTrigger;
type DisabledValidators = ValidatorSets; type DisabledValidators = ValidatorSets;

View file

@ -678,16 +678,20 @@ pub mod pallet {
} }
pub fn retire_set(set: ValidatorSet) { pub fn retire_set(set: ValidatorSet) {
let keys = Keys::<T>::take(set).unwrap();
// If the prior prior set didn't report, emit they're retired now // If the prior prior set didn't report, emit they're retired now
if PendingSlashReport::<T>::get(set.network).is_some() { if PendingSlashReport::<T>::get(set.network).is_some() {
Self::deposit_event(Event::SetRetired { Self::deposit_event(Event::SetRetired {
set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) }, set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) },
}); });
} }
// Serai network slashes are handled by BABE/GRANDPA
if set.network != NetworkId::Serai {
// This overwrites the prior value as the prior to-report set's stake presumably just // This overwrites the prior value as the prior to-report set's stake presumably just
// unlocked, making their report unenforceable // unlocked, making their report unenforceable
let keys = Keys::<T>::take(set).unwrap();
PendingSlashReport::<T>::set(set.network, Some(keys.0)); PendingSlashReport::<T>::set(set.network, Some(keys.0));
}
// We're retiring this set because the set after it accepted the handover // We're retiring this set because the set after it accepted the handover
Self::deposit_event(Event::AcceptedHandover { Self::deposit_event(Event::AcceptedHandover {
@ -740,7 +744,7 @@ pub mod pallet {
Grandpa::<T>::new_session( Grandpa::<T>::new_session(
true, true,
session, session,
next_validators.into_iter().map(|(id, w)| (GrandpaAuthorityId::from(id), w)).collect(), now_validators.into_iter().map(|(id, w)| (GrandpaAuthorityId::from(id), w)).collect(),
); );
// Clear SeraiDisabledIndices, only preserving keys still present in the new session // Clear SeraiDisabledIndices, only preserving keys still present in the new session

View file

@ -91,6 +91,8 @@ pub fn build(name: String) {
if name.contains("-processor") { if name.contains("-processor") {
dockerfile_path = dockerfile_path =
dockerfile_path.join("processor").join(name.split('-').next().unwrap()).join("Dockerfile"); dockerfile_path.join("processor").join(name.split('-').next().unwrap()).join("Dockerfile");
} else if name == "serai-fast-epoch" {
dockerfile_path = dockerfile_path.join("serai").join("Dockerfile.fast-epoch");
} else { } else {
dockerfile_path = dockerfile_path.join(&name).join("Dockerfile"); dockerfile_path = dockerfile_path.join(&name).join("Dockerfile");
} }
@ -145,7 +147,7 @@ pub fn build(name: String) {
meta(repo_path.join("message-queue")), meta(repo_path.join("message-queue")),
meta(repo_path.join("coordinator")), meta(repo_path.join("coordinator")),
], ],
"runtime" | "serai" => vec![ "runtime" | "serai" | "serai-fast-epoch" => vec![
meta(repo_path.join("common")), meta(repo_path.join("common")),
meta(repo_path.join("crypto")), meta(repo_path.join("crypto")),
meta(repo_path.join("substrate")), meta(repo_path.join("substrate")),