Slash reports (#523)

* report_slashes plumbing in Substrate

Notably delays the SetRetired event until it provides a slash report or the set
after it becomes the set to report its slashes.

* Add dedicated AcceptedHandover event

* Add SlashReport TX to Tributary

* Create SlashReport TXs

* Handle SlashReport TXs

* Add logic to generate a SlashReport to the coordinator

* Route SlashReportSigner into the processor

* Finish routing the SlashReport signing/TX publication

* Add serai feature to processor's serai-client
This commit is contained in:
Luke Parker 2024-01-29 03:48:53 -05:00 committed by GitHub
parent 0b8c7ade6e
commit 4913873b10
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 917 additions and 67 deletions

View file

@ -9,7 +9,10 @@ use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use ciphersuite::{
group::ff::{Field, PrimeField},
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;
@ -240,7 +243,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
coordinator::ProcessorMessage::InvalidParticipant { id, .. } |
coordinator::ProcessorMessage::CosignPreprocess { id, .. } |
coordinator::ProcessorMessage::BatchPreprocess { id, .. } |
coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } |
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
// This causes an action on our P2P net yet not on any Tributary
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
let cosigned_block = CosignedBlock {
network,
@ -258,6 +263,55 @@ async fn handle_processor_message<D: Db, P: P2p>(
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
None
}
// This causes an action on Substrate yet not on any Tributary
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
let set = ValidatorSet { network, session: *session };
let signature: &[u8] = signature.as_ref();
let signature = serai_client::Signature(signature.try_into().unwrap());
let slashes = crate::tributary::SlashReport::get(&txn, set)
.expect("signed slash report despite not having slash report locally");
let slashes_pubs =
slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
let tx = serai_client::SeraiValidatorSets::report_slashes(
network,
slashes
.into_iter()
.map(|(address, points)| (serai_client::SeraiAddress(address), points))
.collect::<Vec<_>>()
.try_into()
.unwrap(),
signature.clone(),
);
loop {
if serai.publish(&tx).await.is_ok() {
break None;
}
// Check if the slashes shouldn't still be reported. If not, break.
let Ok(serai) = serai.as_of_latest_finalized_block().await else {
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
continue;
};
let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else {
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
continue;
};
let Some(key) = key else {
break None;
};
// If this is the key for this slash report, then this will verify
use sp_application_crypto::RuntimePublic;
if !key.verify(
&serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs),
&signature,
) {
break None;
}
}
}
},
// These don't return a relevant Tributary as there's no Tributary with action expected
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
@ -550,7 +604,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
// slash) and censor transactions (yet don't explicitly ban)
vec![]
}
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => {
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } |
coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => {
vec![Transaction::SubstrateSign(SignData {
plan: id.id,
attempt: id.attempt,
@ -665,6 +720,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
#[allow(clippy::match_same_arms)] // Allowed to preserve layout
coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(),
#[allow(clippy::match_same_arms)]
coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(),
},
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
processor_messages::substrate::ProcessorMessage::Batch { .. } |
@ -963,6 +1020,8 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
new_tributary_spec_send.send(spec).unwrap();
}
let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel();
let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel();
// Handle new Substrate blocks
@ -972,6 +1031,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
processors.clone(),
serai.clone(),
new_tributary_spec_send,
perform_slash_report_send,
tributary_retired_send,
));
@ -1026,10 +1086,12 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
let raw_db = raw_db.clone();
let key = key.clone();
let specs = Arc::new(RwLock::new(HashMap::new()));
let tributaries = Arc::new(RwLock::new(HashMap::new()));
// Spawn a task to maintain a local view of the tributaries for whenever recognized_id is
// called
tokio::spawn({
let specs = specs.clone();
let tributaries = tributaries.clone();
let mut set_to_genesis = HashMap::new();
async move {
@ -1038,9 +1100,11 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
Ok(TributaryEvent::NewTributary(tributary)) => {
set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis());
tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary);
specs.write().await.insert(tributary.spec.set(), tributary.spec);
}
Ok(TributaryEvent::TributaryRetired(set)) => {
if let Some(genesis) = set_to_genesis.remove(&set) {
specs.write().await.remove(&set);
tributaries.write().await.remove(&genesis);
}
}
@ -1053,6 +1117,84 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
}
});
// Also spawn a task to handle slash reports, as this needs such a view of tributaries
tokio::spawn({
let mut raw_db = raw_db.clone();
let key = key.clone();
let tributaries = tributaries.clone();
async move {
'task_loop: loop {
match perform_slash_report_recv.recv().await {
Some(set) => {
let (genesis, validators) = loop {
let specs = specs.read().await;
let Some(spec) = specs.get(&set) else {
// If we don't have this Tributary because it's retired, break and move on
if RetiredTributaryDb::get(&raw_db, set).is_some() {
continue 'task_loop;
}
// This may happen if the task above is simply slow
log::warn!("tributary we don't have yet is supposed to perform a slash report");
continue;
};
break (spec.genesis(), spec.validators());
};
let mut slashes = vec![];
for (validator, _) in validators {
if validator == (<Ristretto as Ciphersuite>::generator() * key.deref()) {
continue;
}
let validator = validator.to_bytes();
let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some();
// TODO: Properly type this
let points = if fatally {
u32::MAX
} else {
tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0)
};
slashes.push(points);
}
let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed());
tx.sign(&mut OsRng, genesis, &key);
let mut first = true;
loop {
if !first {
sleep(Duration::from_millis(100)).await;
}
first = false;
let tributaries = tributaries.read().await;
let Some(tributary) = tributaries.get(&genesis) else {
// If we don't have this Tributary because it's retired, break and move on
if RetiredTributaryDb::get(&raw_db, set).is_some() {
break;
}
// This may happen if the task above is simply slow
log::warn!("tributary we don't have yet is supposed to perform a slash report");
continue;
};
// This is safe to perform multiple times and solely needs atomicity with regards
// to itself
// TODO: Should this not take a txn accordingly? It's best practice to take a txn,
// yet taking a txn fails to declare its achieved independence
let mut txn = raw_db.txn();
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
txn.commit();
break;
}
}
None => panic!("perform slash report sender closed"),
}
}
}
});
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
let mut raw_db = raw_db.clone();

View file

@ -208,10 +208,12 @@ async fn handle_batch_and_burns<Pro: Processors>(
// Handle a specific Substrate block, returning an error when it fails to get data
// (not blocking / holding)
#[allow(clippy::too_many_arguments)]
async fn handle_block<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
@ -287,6 +289,27 @@ async fn handle_block<D: Db, Pro: Processors>(
event_id += 1;
}
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
};
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling
// Send a oneshot receiver so we can await the response?
perform_slash_report.send(set).unwrap();
let mut txn = db.txn();
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
@ -320,10 +343,12 @@ async fn handle_block<D: Db, Pro: Processors>(
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
@ -349,7 +374,17 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
.expect("couldn't get block before the latest finalized block");
log::info!("handling substrate block {b}");
handle_block(db, key, new_tributary_spec, tributary_retired, processors, serai, block).await?;
handle_block(
db,
key,
new_tributary_spec,
perform_slash_report,
tributary_retired,
processors,
serai,
block,
)
.await?;
*next_block += 1;
let mut txn = db.txn();
@ -368,6 +403,7 @@ pub async fn scan_task<D: Db, Pro: Processors>(
processors: Pro,
serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
) {
log::info!("scanning substrate");
@ -443,6 +479,7 @@ pub async fn scan_task<D: Db, Pro: Processors>(
&mut db,
&key,
&new_tributary_spec,
&perform_slash_report,
&tributary_retired,
&processors,
&serai,

View file

@ -7,7 +7,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{ValidatorSet, KeyPair},
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
};
use processor_messages::coordinator::SubstrateSignableId;
@ -79,7 +79,7 @@ fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
#[test]
fn tx_size_limit() {
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};
use serai_client::validator_sets::primitives::MAX_KEY_LEN;
use tributary::TRANSACTION_SIZE_LIMIT;
@ -277,4 +277,17 @@ fn serialize_transaction() {
signature: random_signed_with_nonce(&mut OsRng, 2).signature,
});
}
test_read_write(&Transaction::SlashReport(
{
let amount =
usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();
let mut points = vec![];
for _ in 0 .. amount {
points.push((OsRng.next_u64() >> 32).try_into().unwrap());
}
points
},
random_signed_with_nonce(&mut OsRng, 0),
));
}

View file

@ -51,10 +51,13 @@ create_db!(
TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
// TODO: Revisit the point of this
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
// TODO: Combine these two
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),
VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,
@ -73,6 +76,11 @@ create_db!(
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
SlashReported: (genesis: [u8; 32]) -> u16,
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
}
);
@ -116,7 +124,13 @@ impl AttemptDb {
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
let attempt = Self::get(getter, genesis, &topic);
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation)) {
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
// should always happen (eventually)
if attempt.is_none() &&
((topic == Topic::Dkg) ||
(topic == Topic::DkgConfirmation) ||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
{
return Some(0);
}
attempt

View file

@ -738,6 +738,39 @@ impl<
};
self.processors.send(self.spec.set().network, msg).await;
}
Transaction::SlashReport(points, signed) => {
// Uses &[] as we only need the length which is independent to who else was removed
let signer_range = self.spec.i(&[], signed.signer).unwrap();
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
if points.len() != (self.spec.validators().len() - 1) {
self.fatal_slash(
signed.signer.to_bytes(),
"submitted a distinct amount of slash points to participants",
);
return;
}
if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {
self.fatal_slash(signed.signer.to_bytes(), "submitted multiple slash points");
return;
}
SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points);
let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0);
let now_reported = prior_reported + signer_len;
SlashReported::set(self.txn, genesis, &now_reported);
if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) {
SlashReportCutOff::set(
self.txn,
genesis,
// 30 minutes into the future
&(u64::from(self.block_number) +
((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))),
);
}
}
}
}
}

View file

@ -16,7 +16,7 @@ use serai_client::{
use serai_db::DbTxn;
use processor_messages::coordinator::SubstrateSignableId;
use processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId};
use tributary::{
TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader,
@ -520,6 +520,24 @@ impl<
.await;
}
}
SubstrateSignableId::SlashReport => {
// If this Tributary hasn't been retired...
// (published SlashReport/took too long to do so)
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
let report = SlashReport::get(self.txn, self.spec.set())
.expect("re-attempting signing a SlashReport we don't have?");
self
.processors
.send(
self.spec.set().network,
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
id,
report,
},
)
.await;
}
}
}
}
Topic::Sign(id) => {
@ -542,6 +560,94 @@ impl<
}
}
}
if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) {
// Grab every slash report
let mut all_reports = vec![];
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else {
continue;
};
// Assign them 0 points for themselves
report.insert(i, 0);
// Uses &[] as we only need the length which is independent to who else was removed
let signer_i = self.spec.i(&[], validator).unwrap();
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
// Push `n` copies, one for each of their shares
for _ in 0 .. signer_len {
all_reports.push(report.clone());
}
}
// For each participant, grab their median
let mut medians = vec![];
for p in 0 .. self.spec.validators().len() {
let mut median_calc = vec![];
for report in &all_reports {
median_calc.push(report[p]);
}
median_calc.sort_unstable();
medians.push(median_calc[median_calc.len() / 2]);
}
// Grab the points of the last party within the best-performing threshold
// This is done by first expanding the point values by the amount of shares
let mut sorted_medians = vec![];
for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() {
for _ in 0 .. shares {
sorted_medians.push(medians[i]);
}
}
// Then performing the sort
sorted_medians.sort_unstable();
let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1];
// Reduce everyone's points by this value
for median in &mut medians {
*median = median.saturating_sub(worst_points_by_party_within_threshold);
}
// The threshold now has the proper incentive to report this as they no longer suffer
// negative effects
//
// Additionally, if all validators had degraded performance, they don't all get penalized for
// what's likely outside their control (as it occurred universally)
// Mark everyone fatally slashed with u32::MAX
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() {
medians[i] = u32::MAX;
}
}
let mut report = vec![];
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
if medians[i] != 0 {
report.push((validator.to_bytes(), medians[i]));
}
}
// This does lock in the report, meaning further slash point accumulations won't be reported
// They still have value to be locally tracked due to local decisions made based off
// accumulated slash reports
SlashReport::set(self.txn, self.spec.set(), &report);
// Start a signing protocol for this
self
.processors
.send(
self.spec.set().network,
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
id: SubstrateSignId {
session: self.spec.set().session,
id: SubstrateSignableId::SlashReport,
attempt: 0,
},
report,
},
)
.await;
}
}
}

View file

@ -190,6 +190,8 @@ pub enum Transaction {
first_signer: <Ristretto as Ciphersuite>::G,
signature: SchnorrSignature<Ristretto>,
},
SlashReport(Vec<u32>, Signed),
}
impl Debug for Transaction {
@ -244,6 +246,11 @@ impl Debug for Transaction {
.field("plan", &hex::encode(plan))
.field("tx_hash", &hex::encode(tx_hash))
.finish_non_exhaustive(),
Transaction::SlashReport(points, signed) => fmt
.debug_struct("Transaction::SignCompleted")
.field("points", points)
.field("signed", signed)
.finish(),
}
}
}
@ -413,6 +420,25 @@ impl ReadWrite for Transaction {
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
}
11 => {
let mut len = [0];
reader.read_exact(&mut len)?;
let len = len[0];
// If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct
// validators (the amount of validators reported on) will be at most
// `MAX_KEY_SHARES_PER_SET - 1`
if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) {
Err(io::Error::other("more points reported than allowed validator"))?;
}
let mut points = vec![0u32; len.into()];
for points in &mut points {
let mut these_points = [0; 4];
reader.read_exact(&mut these_points)?;
*points = u32::from_le_bytes(these_points);
}
Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?))
}
_ => Err(io::Error::other("invalid transaction type")),
}
}
@ -529,6 +555,14 @@ impl ReadWrite for Transaction {
writer.write_all(&first_signer.to_bytes())?;
signature.write(writer)
}
Transaction::SlashReport(points, signed) => {
writer.write_all(&[11])?;
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
for points in points {
writer.write_all(&points.to_le_bytes())?;
}
signed.write_without_nonce(writer)
}
}
}
}
@ -559,6 +593,10 @@ impl TransactionTrait for Transaction {
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
}
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
Transaction::SlashReport(_, signed) => {
TransactionKind::Signed(b"slash_report".to_vec(), signed)
}
}
}
@ -622,10 +660,13 @@ impl Transaction {
Transaction::Sign(data) => data.label.nonce(),
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
Transaction::SlashReport(_, _) => 0,
};
(
nonce,
#[allow(clippy::match_same_arms)]
match tx {
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
Transaction::DkgCommitments { ref mut signed, .. } |
@ -642,6 +683,8 @@ impl Transaction {
Transaction::Sign(ref mut data) => &mut data.signed,
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
Transaction::SlashReport(_, ref mut signed) => signed,
},
)
}

View file

@ -58,7 +58,7 @@ zalloc = { path = "../common/zalloc" }
serai-db = { path = "../common/db", optional = true }
serai-env = { path = "../common/env", optional = true }
# TODO: Replace with direct usage of primitives
serai-client = { path = "../substrate/client", default-features = false }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
messages = { package = "serai-processor-messages", path = "./messages", optional = true }

View file

@ -169,6 +169,7 @@ pub mod coordinator {
pub enum SubstrateSignableId {
CosigningSubstrateBlock([u8; 32]),
Batch(u32),
SlashReport,
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
@ -181,6 +182,7 @@ pub mod coordinator {
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum CoordinatorMessage {
CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 },
SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> },
SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap<Participant, [u8; 64]> },
SubstrateShares { id: SubstrateSignId, shares: HashMap<Participant, [u8; 32]> },
// Re-attempt a batch signing protocol.
@ -209,8 +211,11 @@ pub mod coordinator {
InvalidParticipant { id: SubstrateSignId, participant: Participant },
CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> },
BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> },
SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> },
SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> },
// TODO: Make these signatures [u8; 64]?
CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec<u8> },
SignedSlashReport { session: Session, signature: Vec<u8> },
}
}
@ -354,12 +359,15 @@ impl CoordinatorMessage {
}
CoordinatorMessage::Coordinator(msg) => {
let (sub, id) = match msg {
// Unique since this is the entire message
// Unique since this ID contains the hash of the block being cosigned
coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()),
// Unique since there's only one of these per session/attempt, and ID is inclusive to
// both
coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()),
// Unique since this embeds the batch ID (including its network) and attempt
coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (1, id.encode()),
coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (2, id.encode()),
coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (3, id.encode()),
coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()),
coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()),
coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()),
};
let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub];
@ -426,8 +434,11 @@ impl ProcessorMessage {
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()),
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()),
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()),
coordinator::ProcessorMessage::SubstrateShare { id, .. } => (4, id.encode()),
coordinator::ProcessorMessage::CosignedBlock { block, .. } => (5, block.encode()),
coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()),
coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()),
// Unique since only one instance of a signature matters
coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()),
coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]),
};
let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];

View file

@ -213,6 +213,10 @@ impl<D: Db> BatchSigner<D> {
panic!("BatchSigner passed CosignSubstrateBlock")
}
CoordinatorMessage::SignSlashReport { .. } => {
panic!("Cosigner passed SignSlashReport")
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
let (session, id, attempt) = self.verify_id(&id).ok()?;

View file

@ -124,6 +124,10 @@ impl Cosigner {
panic!("Cosigner passed CosignSubstrateBlock")
}
CoordinatorMessage::SignSlashReport { .. } => {
panic!("Cosigner passed SignSlashReport")
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
assert_eq!(id.session, self.session);
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {

View file

@ -55,6 +55,9 @@ use cosigner::Cosigner;
mod batch_signer;
use batch_signer::BatchSigner;
mod slash_report_signer;
use slash_report_signer::SlashReportSigner;
mod multisigs;
use multisigs::{MultisigEvent, MultisigManager};
@ -101,6 +104,7 @@ struct TributaryMutable<N: Network, D: Db> {
// Solely mutated by the tributary.
cosigner: Option<Cosigner>,
slash_report_signer: Option<SlashReportSigner>,
}
// Items which are mutably borrowed by Substrate.
@ -233,28 +237,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
}
}
CoordinatorMessage::Coordinator(msg) => {
let is_batch = match msg {
CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } => false,
CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } |
CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => {
matches!(&id.id, SubstrateSignableId::Batch(_))
}
CoordinatorCoordinatorMessage::BatchReattempt { .. } => true,
};
if is_batch {
if let Some(msg) = tributary_mutable
.batch_signer
.as_mut()
.expect(
"coordinator told us to sign a batch when we don't currently have a Substrate signer",
)
.handle(txn, msg)
{
coordinator.send(msg).await;
}
} else {
match msg {
CoordinatorMessage::Coordinator(msg) => match msg {
CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => {
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock")
@ -271,7 +254,34 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
log::warn!("Cosigner::new returned None");
}
}
CoordinatorCoordinatorMessage::SignSlashReport { id, report } => {
assert_eq!(id.id, SubstrateSignableId::SlashReport);
let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else {
panic!("didn't have key shares for the key we were told to perform a slash report with");
};
if let Some((slash_report_signer, msg)) =
SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt)
{
tributary_mutable.slash_report_signer = Some(slash_report_signer);
coordinator.send(msg).await;
} else {
log::warn!("SlashReportSigner::new returned None");
}
}
_ => {
let (is_cosign, is_batch, is_slash_report) = match msg {
CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } |
CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false),
CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } |
CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => (
matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)),
matches!(&id.id, SubstrateSignableId::Batch(_)),
matches!(&id.id, SubstrateSignableId::SlashReport),
),
CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false),
};
if is_cosign {
if let Some(cosigner) = tributary_mutable.cosigner.as_mut() {
if let Some(msg) = cosigner.handle(txn, msg) {
coordinator.send(msg).await;
@ -282,10 +292,31 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
"this is an error if we didn't reboot",
);
}
} else if is_batch {
if let Some(msg) = tributary_mutable
.batch_signer
.as_mut()
.expect(
"coordinator told us to sign a batch when we don't currently have a Substrate signer",
)
.handle(txn, msg)
{
coordinator.send(msg).await;
}
} else if is_slash_report {
if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() {
if let Some(msg) = slash_report_signer.handle(txn, msg) {
coordinator.send(msg).await;
}
} else {
log::warn!(
"received message for slash report signer yet didn't have {}",
"a slash report signer. this is an error if we didn't reboot",
);
}
}
}
}
},
CoordinatorMessage::Substrate(msg) => {
match msg {
@ -540,7 +571,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
(
raw_db.clone(),
TributaryMutable { key_gen, batch_signer, cosigner: None, signers },
TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers },
multisig_manager,
)
}

View file

@ -0,0 +1,293 @@
use core::fmt;
use std::collections::HashMap;
use rand_core::OsRng;
use frost::{
curve::Ristretto,
ThresholdKeys, FrostError,
algorithm::Algorithm,
sign::{
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
AlgorithmSignMachine, AlgorithmSignatureMachine,
},
};
use frost_schnorrkel::Schnorrkel;
use log::{info, warn};
use serai_client::{
Public,
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, report_slashes_message},
};
use messages::coordinator::*;
use crate::{Get, DbTxn, create_db};
create_db! {
SlashReportSignerDb {
Completed: (session: Session) -> (),
Attempt: (session: Session, attempt: u32) -> (),
}
}
type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;
type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<
<Schnorrkel as Algorithm<Ristretto>>::Signature,
>>::SignatureShare;
pub struct SlashReportSigner {
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
report: Vec<([u8; 32], u32)>,
attempt: u32,
#[allow(clippy::type_complexity)]
preprocessing: Option<(Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
#[allow(clippy::type_complexity)]
signing: Option<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
}
impl fmt::Debug for SlashReportSigner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("SlashReportSigner")
.field("session", &self.session)
.field("report", &self.report)
.field("attempt", &self.attempt)
.field("preprocessing", &self.preprocessing.is_some())
.field("signing", &self.signing.is_some())
.finish_non_exhaustive()
}
}
impl SlashReportSigner {
pub fn new(
txn: &mut impl DbTxn,
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
report: Vec<([u8; 32], u32)>,
attempt: u32,
) -> Option<(SlashReportSigner, ProcessorMessage)> {
assert!(!keys.is_empty());
if Completed::get(txn, session).is_some() {
return None;
}
if Attempt::get(txn, session, attempt).is_some() {
warn!(
"already attempted signing slash report for session {:?}, attempt #{}. {}",
session, attempt, "this is an error if we didn't reboot",
);
return None;
}
Attempt::set(txn, session, attempt, &());
info!("signing slash report for session {:?} with attempt #{}", session, attempt);
let mut machines = vec![];
let mut preprocesses = vec![];
let mut serialized_preprocesses = vec![];
for keys in &keys {
// b"substrate" is a literal from sp-core
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone());
let (machine, preprocess) = machine.preprocess(&mut OsRng);
machines.push(machine);
serialized_preprocesses.push(preprocess.serialize().try_into().unwrap());
preprocesses.push(preprocess);
}
let preprocessing = Some((machines, preprocesses));
let substrate_sign_id =
SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt };
Some((
SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None },
ProcessorMessage::SlashReportPreprocess {
id: substrate_sign_id,
preprocesses: serialized_preprocesses,
},
))
}
#[must_use]
pub fn handle(
&mut self,
txn: &mut impl DbTxn,
msg: CoordinatorMessage,
) -> Option<ProcessorMessage> {
match msg {
CoordinatorMessage::CosignSubstrateBlock { .. } => {
panic!("SlashReportSigner passed CosignSubstrateBlock")
}
CoordinatorMessage::SignSlashReport { .. } => {
panic!("SlashReportSigner passed SignSlashReport")
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
assert_eq!(id.session, self.session);
assert_eq!(id.id, SubstrateSignableId::SlashReport);
if id.attempt != self.attempt {
panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing")
}
let (machines, our_preprocesses) = match self.preprocessing.take() {
// Either rebooted or RPC error, or some invariant
None => {
warn!("not preprocessing. this is an error if we didn't reboot");
return None;
}
Some(preprocess) => preprocess,
};
let mut parsed = HashMap::new();
for l in {
let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();
let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
};
if !preprocess_ref.is_empty() {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
}
parsed.insert(l, res);
}
let preprocesses = parsed;
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
let mut shares = vec![];
let mut serialized_shares = vec![];
for (m, machine) in machines.into_iter().enumerate() {
let mut preprocesses = preprocesses.clone();
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
if i != m {
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
}
}
let (machine, share) = match machine.sign(
preprocesses,
&report_slashes_message(
&ValidatorSet { network: self.network, session: self.session },
&self
.report
.clone()
.into_iter()
.map(|(validator, points)| (Public(validator), points))
.collect::<Vec<_>>(),
),
) {
Ok(res) => res,
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
};
if m == 0 {
signature_machine = Some(machine);
}
let mut share_bytes = [0; 32];
share_bytes.copy_from_slice(&share.serialize());
serialized_shares.push(share_bytes);
shares.push(share);
}
self.signing = Some((signature_machine.unwrap(), shares));
// Broadcast our shares
Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares })
}
CoordinatorMessage::SubstrateShares { id, shares } => {
assert_eq!(id.session, self.session);
assert_eq!(id.id, SubstrateSignableId::SlashReport);
if id.attempt != self.attempt {
panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing")
}
let (machine, our_shares) = match self.signing.take() {
// Rebooted, RPC error, or some invariant
None => {
// If preprocessing has this ID, it means we were never sent the preprocess by the
// coordinator
if self.preprocessing.is_some() {
panic!("never preprocessed yet signing?");
}
warn!("not preprocessing. this is an error if we didn't reboot");
return None;
}
Some(signing) => signing,
};
let mut parsed = HashMap::new();
for l in {
let mut keys = shares.keys().copied().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut share_ref = shares.get(&l).unwrap().as_slice();
let Ok(res) = machine.read_share(&mut share_ref) else {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
};
if !share_ref.is_empty() {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
}
parsed.insert(l, res);
}
let mut shares = parsed;
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
}
let sig = match machine.complete(shares) {
Ok(res) => res,
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
};
info!("signed slash report for session {:?} with attempt #{}", self.session, id.attempt);
Completed::set(txn, self.session, &());
Some(ProcessorMessage::SignedSlashReport {
session: self.session,
signature: sig.to_bytes().to_vec(),
})
}
CoordinatorMessage::BatchReattempt { .. } => {
panic!("BatchReattempt passed to SlashReportSigner")
}
}
}
}

View file

@ -1,3 +1,5 @@
use sp_core::{ConstU32, bounded_vec::BoundedVec};
pub use serai_validator_sets_primitives as primitives;
use serai_primitives::*;
@ -12,6 +14,11 @@ pub enum Call {
key_pair: KeyPair,
signature: Signature,
},
report_slashes {
network: NetworkId,
slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,
signature: Signature,
},
allocate {
network: NetworkId,
amount: Amount,
@ -41,6 +48,12 @@ pub enum Event {
set: ValidatorSet,
key_pair: KeyPair,
},
AcceptedHandover {
set: ValidatorSet,
},
SetRetired {
set: ValidatorSet,
},
AllocationIncreased {
validator: SeraiAddress,
network: NetworkId,
@ -57,7 +70,4 @@ pub enum Event {
network: NetworkId,
session: Session,
},
SetRetired {
set: ValidatorSet,
},
}

View file

@ -69,6 +69,23 @@ impl<'a> SeraiValidatorSets<'a> {
.await
}
pub async fn accepted_handover_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {
self
.0
.events(|event| {
if let serai_abi::Event::ValidatorSets(event) = event {
if matches!(event, ValidatorSetsEvent::AcceptedHandover { .. }) {
Some(event.clone())
} else {
None
}
} else {
None
}
})
.await
}
pub async fn set_retired_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {
self
.0
@ -131,6 +148,13 @@ impl<'a> SeraiValidatorSets<'a> {
self.0.storage(PALLET, "Keys", (sp_core::hashing::twox_64(&set.encode()), set)).await
}
pub async fn key_pending_slash_report(
&self,
network: NetworkId,
) -> Result<Option<Public>, SeraiError> {
self.0.storage(PALLET, "PendingSlashReport", network).await
}
pub fn set_keys(
network: NetworkId,
removed_participants: Vec<SeraiAddress>,
@ -144,4 +168,17 @@ impl<'a> SeraiValidatorSets<'a> {
signature,
}))
}
pub fn report_slashes(
network: NetworkId,
slashes: sp_runtime::BoundedVec<
(SeraiAddress, u32),
sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>,
>,
signature: Signature,
) -> Transaction {
Serai::unsigned(serai_abi::Call::ValidatorSets(
serai_abi::validator_sets::Call::report_slashes { network, slashes, signature },
))
}
}

View file

@ -307,6 +307,10 @@ pub mod pallet {
#[pallet::getter(fn keys)]
pub type Keys<T: Config> = StorageMap<_, Twox64Concat, ValidatorSet, KeyPair, OptionQuery>;
/// The key for validator sets which can (and still need to) publish their slash reports.
#[pallet::storage]
pub type PendingSlashReport<T: Config> = StorageMap<_, Identity, NetworkId, Public, OptionQuery>;
/// Disabled validators.
#[pallet::storage]
pub type SeraiDisabledIndices<T: Config> = StorageMap<_, Identity, u32, Public, OptionQuery>;
@ -325,6 +329,12 @@ pub mod pallet {
set: ValidatorSet,
key_pair: KeyPair,
},
AcceptedHandover {
set: ValidatorSet,
},
SetRetired {
set: ValidatorSet,
},
AllocationIncreased {
validator: T::AccountId,
network: NetworkId,
@ -341,9 +351,6 @@ pub mod pallet {
network: NetworkId,
session: Session,
},
SetRetired {
set: ValidatorSet,
},
}
impl<T: Config> Pallet<T> {
@ -681,8 +688,21 @@ pub mod pallet {
}
pub fn retire_set(set: ValidatorSet) {
Keys::<T>::remove(set);
Pallet::<T>::deposit_event(Event::SetRetired { set });
let keys = Keys::<T>::take(set).unwrap();
// If the prior prior set didn't report, emit they're retired now
if PendingSlashReport::<T>::get(set.network).is_some() {
Self::deposit_event(Event::SetRetired {
set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) },
});
}
// This overwrites the prior value as the prior to-report set's stake presumably just
// unlocked, making their report unenforceable
PendingSlashReport::<T>::set(set.network, Some(keys.0));
// We're retiring this set because the set after it accepted the handover
Self::deposit_event(Event::AcceptedHandover {
set: ValidatorSet { network: set.network, session: Session(set.session.0 + 1) },
});
}
/// Take the amount deallocatable.
@ -883,6 +903,31 @@ pub mod pallet {
Ok(())
}
#[pallet::call_index(1)]
#[pallet::weight(0)] // TODO
pub fn report_slashes(
origin: OriginFor<T>,
network: NetworkId,
slashes: BoundedVec<(Public, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,
signature: Signature,
) -> DispatchResult {
ensure_none(origin)?;
// signature isn't checked as this is an unsigned transaction, and validate_unsigned
// (called by pre_dispatch) checks it
let _ = signature;
// TODO: Handle slashes
let _ = slashes;
// Emit set retireed
Pallet::<T>::deposit_event(Event::SetRetired {
set: ValidatorSet { network, session: Session(Self::session(network).unwrap().0 - 1) },
});
Ok(())
}
#[pallet::call_index(2)]
#[pallet::weight(0)] // TODO
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
@ -1012,11 +1057,34 @@ pub mod pallet {
}
ValidTransaction::with_tag_prefix("ValidatorSets")
.and_provides(set)
.and_provides((0, set))
.longevity(u64::MAX)
.propagate(true)
.build()
}
Call::report_slashes { network, ref slashes, ref signature } => {
let network = *network;
// Don't allow Serai to publish a slash report as BABE/GRANDPA handles slashes directly
if network == NetworkId::Serai {
Err(InvalidTransaction::Custom(0))?;
}
let Some(key) = PendingSlashReport::<T>::take(network) else {
// Assumed already published
Err(InvalidTransaction::Stale)?
};
// There must have been a previous session is PendingSlashReport is populated
let set =
ValidatorSet { network, session: Session(Self::session(network).unwrap().0 - 1) };
if !key.verify(&report_slashes_message(&set, slashes), signature) {
Err(InvalidTransaction::BadProof)?;
}
ValidTransaction::with_tag_prefix("ValidatorSets")
.and_provides((1, set))
.longevity(MAX_KEY_SHARES_PER_SET.into())
.propagate(true)
.build()
}
Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => {
Err(InvalidTransaction::Call)?
}

View file

@ -107,6 +107,10 @@ pub fn set_keys_message(
(b"ValidatorSets-set_keys", set, removed_participants, key_pair).encode()
}
pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> Vec<u8> {
(b"ValidatorSets-report_slashes", set, slashes).encode()
}
/// For a set of validators whose key shares may exceed the maximum, reduce until they equal the
/// maximum.
///