add specific network/coin/balance types (#619)
Some checks failed
Full Stack Tests / build (push) Has been cancelled
Message Queue Tests / build (push) Has been cancelled
Coordinator Tests / build (push) Has been cancelled
Lint / clippy (macos-13) (push) Has been cancelled
Lint / machete (push) Has been cancelled
Lint / clippy (macos-14) (push) Has been cancelled
Lint / clippy (ubuntu-latest) (push) Has been cancelled
Lint / clippy (windows-latest) (push) Has been cancelled
Lint / deny (push) Has been cancelled
Lint / fmt (push) Has been cancelled
Processor Tests / build (push) Has been cancelled
Reproducible Runtime / build (push) Has been cancelled
Tests / test-infra (push) Has been cancelled
Tests / test-serai-client (push) Has been cancelled
Monero Tests / unit-tests (push) Has been cancelled
Monero Tests / integration-tests (v0.17.3.2) (push) Has been cancelled
Monero Tests / integration-tests (v0.18.3.4) (push) Has been cancelled
Tests / test-substrate (push) Has been cancelled

* add specific network/coin/balance types

* misc fixes

* fix clippy

* misc fixes

* fix pr comments

* Make halting for external networks

* fix encode/decode
This commit is contained in:
akildemir 2024-10-07 05:16:11 +03:00 committed by GitHub
parent d7ecab605e
commit 435f1d9ae1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
91 changed files with 1536 additions and 1055 deletions

1
Cargo.lock generated
View file

@ -8393,6 +8393,7 @@ dependencies = [
"sp-core",
"sp-io",
"sp-runtime",
"sp-std",
"zeroize",
]

View file

@ -12,9 +12,9 @@ use tokio::{
use borsh::BorshSerialize;
use sp_application_crypto::RuntimePublic;
use serai_client::{
primitives::{NETWORKS, NetworkId, Signature},
validator_sets::primitives::{Session, ValidatorSet},
SeraiError, TemporalSerai, Serai,
primitives::{ExternalNetworkId, Signature, EXTERNAL_NETWORKS},
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError, TemporalSerai,
};
use serai_db::{Get, DbTxn, Db, create_db};
@ -28,17 +28,17 @@ use crate::{
create_db! {
CosignDb {
ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: NetworkId) -> CosignedBlock,
DistinctChain: (set: ValidatorSet) -> (),
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
DistinctChain: (set: ExternalValidatorSet) -> (),
}
}
pub struct CosignEvaluator<D: Db> {
db: Mutex<D>,
serai: Arc<Serai>,
stakes: RwLock<Option<HashMap<NetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<NetworkId, CosignedBlock>>,
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
}
impl<D: Db> CosignEvaluator<D> {
@ -79,7 +79,7 @@ impl<D: Db> CosignEvaluator<D> {
let serai = self.serai.as_of_latest_finalized_block().await?;
let mut stakes = HashMap::new();
for network in NETWORKS {
for network in EXTERNAL_NETWORKS {
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
if set_key {
@ -87,7 +87,7 @@ impl<D: Db> CosignEvaluator<D> {
network,
serai
.validator_sets()
.total_allocated_stake(network)
.total_allocated_stake(network.into())
.await?
.expect("network which published a batch didn't have a stake set")
.0,
@ -126,9 +126,9 @@ impl<D: Db> CosignEvaluator<D> {
async fn set_with_keys_fn(
serai: &TemporalSerai<'_>,
network: NetworkId,
) -> Result<Option<ValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network).await? else {
network: ExternalNetworkId,
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
return Ok(None);
};
@ -136,13 +136,13 @@ impl<D: Db> CosignEvaluator<D> {
Ok(Some(
if serai
.validator_sets()
.keys(ValidatorSet { network, session: prior_session })
.keys(ExternalValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ValidatorSet { network, session: prior_session }
ExternalValidatorSet { network, session: prior_session }
} else {
ValidatorSet { network, session: latest_session }
ExternalValidatorSet { network, session: latest_session }
},
))
}
@ -204,16 +204,12 @@ impl<D: Db> CosignEvaluator<D> {
let mut total_stake = 0;
let mut total_on_distinct_chain = 0;
for network in NETWORKS {
if network == NetworkId::Serai {
continue;
}
for network in EXTERNAL_NETWORKS {
// Get the current set for this network
let set_with_keys = {
let mut res;
while {
res = set_with_keys_fn(&serai, cosign.network).await;
res = set_with_keys_fn(&serai, network).await;
res.is_err()
} {
log::error!(
@ -231,7 +227,8 @@ impl<D: Db> CosignEvaluator<D> {
let stake = {
let mut res;
while {
res = serai.validator_sets().total_allocated_stake(set_with_keys.network).await;
res =
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
res.is_err()
} {
log::error!(
@ -271,7 +268,7 @@ impl<D: Db> CosignEvaluator<D> {
#[allow(clippy::new_ret_no_self)]
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
let mut latest_cosigns = HashMap::new();
for network in NETWORKS {
for network in EXTERNAL_NETWORKS {
if let Some(cosign) = LatestCosign::get(&db, network) {
latest_cosigns.insert(network, cosign);
}

View file

@ -6,9 +6,9 @@ use blake2::{
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
in_instructions::primitives::{Batch, SignedBatch},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
};
pub use serai_db::*;
@ -18,21 +18,21 @@ use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
create_db!(
MainDb {
HandledMessageDb: (network: NetworkId) -> u64,
HandledMessageDb: (network: ExternalNetworkId) -> u64,
ActiveTributaryDb: () -> Vec<u8>,
RetiredTributaryDb: (set: ValidatorSet) -> (),
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
FirstPreprocessDb: (
network: NetworkId,
network: ExternalNetworkId,
id_type: RecognizedIdType,
id: &[u8]
) -> Vec<Vec<u8>>,
LastReceivedBatchDb: (network: NetworkId) -> u32,
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: NetworkId) -> u32,
HandoverBatchDb: (set: ValidatorSet) -> u32,
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
}
);
@ -61,7 +61,7 @@ impl ActiveTributaryDb {
ActiveTributaryDb::set(txn, &existing_bytes);
}
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
let mut active = Self::active_tributaries(txn).1;
for i in 0 .. active.len() {
if active[i].set() == set {
@ -82,7 +82,7 @@ impl ActiveTributaryDb {
impl FirstPreprocessDb {
pub fn save_first_preprocess(
txn: &mut impl DbTxn,
network: NetworkId,
network: ExternalNetworkId,
id_type: RecognizedIdType,
id: &[u8],
preprocess: &Vec<Vec<u8>>,
@ -108,19 +108,19 @@ impl ExpectedBatchDb {
}
impl HandoverBatchDb {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
Self::set(txn, set, &batch);
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
}
}
impl QueuedBatchesDb {
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
let mut batches = Self::get(txn, set).unwrap_or_default();
batch.write(&mut batches).unwrap();
Self::set(txn, set, &batches);
}
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set));

View file

@ -23,8 +23,8 @@ use serai_db::{DbTxn, Db};
use scale::Encode;
use borsh::BorshSerialize;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},
Public, Serai, SeraiInInstructions,
};
@ -79,7 +79,7 @@ pub struct ActiveTributary<D: Db, P: P2p> {
#[derive(Clone)]
pub enum TributaryEvent<D: Db, P: P2p> {
NewTributary(ActiveTributary<D, P>),
TributaryRetired(ValidatorSet),
TributaryRetired(ExternalValidatorSet),
}
// Creates a new tributary and sends it to all listeners.
@ -145,7 +145,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
p2p: &P,
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
network: NetworkId,
network: ExternalNetworkId,
msg: &processors::Message,
) -> bool {
#[allow(clippy::nonminimal_bool)]
@ -193,7 +193,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
.iter()
.map(|plan| plan.session)
.filter(|session| {
RetiredTributaryDb::get(&txn, ValidatorSet { network, session: *session }).is_none()
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })
.is_none()
})
.collect::<HashSet<_>>();
@ -265,7 +266,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
// This causes an action on Substrate yet not on any Tributary
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
let set = ValidatorSet { network, session: *session };
let set = ExternalValidatorSet { network, session: *session };
let signature: &[u8] = signature.as_ref();
let signature = serai_client::Signature(signature.try_into().unwrap());
@ -393,7 +394,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
if let Some(relevant_tributary_value) = relevant_tributary {
if RetiredTributaryDb::get(
&txn,
ValidatorSet { network: msg.network, session: relevant_tributary_value },
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },
)
.is_some()
{
@ -782,7 +783,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
processors: Pro,
p2p: P,
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
network: NetworkId,
network: ExternalNetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
@ -831,7 +832,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
#[allow(clippy::too_many_arguments)]
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
mut db: D,
network: NetworkId,
network: ExternalNetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
@ -905,7 +906,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
for batch in start_id ..= last_id {
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
if let Some(session) = is_pre_handover {
let set = ValidatorSet { network, session };
let set = ExternalValidatorSet { network, session };
let mut queued = QueuedBatchesDb::take(&mut txn, set);
// is_handover_batch is only set for handover `Batch`s we're participating in, making
// this safe
@ -923,7 +924,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
if let Some(session) = is_handover {
for queued in QueuedBatchesDb::take(&mut txn, ValidatorSet { network, session }) {
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })
{
to_publish.push((session, queued));
}
}
@ -970,10 +972,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
) {
let mut channels = HashMap::new();
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let (processor_send, processor_recv) = mpsc::unbounded_channel();
tokio::spawn(handle_processor_messages(
db.clone(),
@ -1195,7 +1194,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
}
});
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
let mut raw_db = raw_db.clone();
let key = key.clone();

View file

@ -11,7 +11,9 @@ use rand_core::{RngCore, OsRng};
use scale::{Decode, Encode};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
use serai_client::{
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
};
use serai_db::Db;
@ -69,7 +71,7 @@ const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignedBlock {
pub network: NetworkId,
pub network: ExternalNetworkId,
pub block_number: u64,
pub block: [u8; 32],
pub signature: [u8; 64],
@ -208,8 +210,8 @@ pub struct HeartbeatBatch {
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
type Id: Send + Sync + Clone + Copy + fmt::Debug;
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
@ -309,7 +311,7 @@ struct Behavior {
#[allow(clippy::type_complexity)]
#[derive(Clone)]
pub struct LibP2p {
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>,
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
@ -397,7 +399,7 @@ impl LibP2p {
let (receive_send, receive_recv) = mpsc::unbounded_channel();
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
}
@ -407,7 +409,8 @@ impl LibP2p {
// The addrs we're currently dialing, and the networks associated with them
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
// The peers we're currently connected to, and the networks associated with them
let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
let connected_peers =
Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));
// Find and connect to peers
let (connect_to_network_send, mut connect_to_network_recv) =
@ -420,7 +423,7 @@ impl LibP2p {
let connect_to_network_send = connect_to_network_send.clone();
async move {
loop {
let connect = |network: NetworkId, addr: Multiaddr| {
let connect = |network: ExternalNetworkId, addr: Multiaddr| {
let dialing_peers = dialing_peers.clone();
let connected_peers = connected_peers.clone();
let to_dial_send = to_dial_send.clone();
@ -507,7 +510,7 @@ impl LibP2p {
connect_to_network_networks.insert(network);
}
for network in connect_to_network_networks {
if let Ok(mut nodes) = serai.p2p_validators(network).await {
if let Ok(mut nodes) = serai.p2p_validators(network.into()).await {
// If there's an insufficient amount of nodes known, connect to all yet add it
// back and break
if nodes.len() < TARGET_PEERS {
@ -557,7 +560,7 @@ impl LibP2p {
// Subscribe to any new topics
set = subscribe_recv.recv() => {
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) =
set.expect("subscribe_recv closed. are we shutting down?");
let topic = topic_for_set(set);
if subscribe {
@ -776,7 +779,7 @@ impl LibP2p {
impl P2p for LibP2p {
type Id = PeerId;
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()
@ -785,7 +788,7 @@ impl P2p for LibP2p {
.expect("subscribe_send closed. are we shutting down?");
}
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()

View file

@ -1,6 +1,6 @@
use std::sync::Arc;
use serai_client::primitives::NetworkId;
use serai_client::primitives::ExternalNetworkId;
use processor_messages::{ProcessorMessage, CoordinatorMessage};
use message_queue::{Service, Metadata, client::MessageQueue};
@ -8,27 +8,27 @@ use message_queue::{Service, Metadata, client::MessageQueue};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Message {
pub id: u64,
pub network: NetworkId,
pub network: ExternalNetworkId,
pub msg: ProcessorMessage,
}
#[async_trait::async_trait]
pub trait Processors: 'static + Send + Sync + Clone {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: NetworkId) -> Message;
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: ExternalNetworkId) -> Message;
async fn ack(&self, msg: Message);
}
#[async_trait::async_trait]
impl Processors for Arc<MessageQueue> {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let msg: CoordinatorMessage = msg.into();
let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = borsh::to_vec(&msg).unwrap();
self.queue(metadata, msg).await;
}
async fn recv(&self, network: NetworkId) -> Message {
async fn recv(&self, network: ExternalNetworkId) -> Message {
let msg = self.next(Service::Processor(network)).await;
assert_eq!(msg.from, Service::Processor(network));

View file

@ -19,9 +19,9 @@ use ciphersuite::{Ciphersuite, Ristretto};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
SeraiError, Serai,
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError,
};
use serai_db::*;
@ -70,13 +70,18 @@ impl LatestCosignedBlock {
db_channel! {
SubstrateDbChannels {
CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]),
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
}
}
impl CosignTransactions {
// Append a cosign transaction.
pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) {
pub fn append_cosign(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
number: u64,
hash: [u8; 32],
) {
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
}
}
@ -256,22 +261,22 @@ async fn advance_cosign_protocol_inner(
// Using the keys of the prior block ensures this deadlock isn't reached
let serai = serai.as_of(actual_block.header.parent_hash.into());
for network in serai_client::primitives::NETWORKS {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
// Get the latest session to have set keys
let set_with_keys = {
let Some(latest_session) = serai.validator_sets().session(network).await? else {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
continue;
};
let prior_session = Session(latest_session.0.saturating_sub(1));
if serai
.validator_sets()
.keys(ValidatorSet { network, session: prior_session })
.keys(ExternalValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ValidatorSet { network, session: prior_session }
ExternalValidatorSet { network, session: prior_session }
} else {
let set = ValidatorSet { network, session: latest_session };
let set = ExternalValidatorSet { network, session: latest_session };
if serai.validator_sets().keys(set).await?.is_none() {
continue;
}
@ -280,7 +285,7 @@ async fn advance_cosign_protocol_inner(
};
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap()));
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
}
break;

View file

@ -1,4 +1,4 @@
use serai_client::primitives::NetworkId;
use serai_client::primitives::ExternalNetworkId;
pub use serai_db::*;
@ -9,7 +9,7 @@ mod inner_db {
SubstrateDb {
NextBlock: () -> u64,
HandledEvent: (block: [u8; 32]) -> u32,
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
}
);
}

View file

@ -9,11 +9,14 @@ use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::{
SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, NetworkId},
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
in_instructions::InInstructionsEvent,
coins::CoinsEvent,
in_instructions::InInstructionsEvent,
primitives::{BlockHash, ExternalNetworkId},
validator_sets::{
primitives::{ExternalValidatorSet, ValidatorSet},
ValidatorSetsEvent,
},
Block, Serai, SeraiError, TemporalSerai,
};
use serai_db::DbTxn;
@ -52,9 +55,9 @@ async fn handle_new_set<D: Db>(
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
serai: &Serai,
block: &Block,
set: ValidatorSet,
set: ExternalValidatorSet,
) -> Result<(), SeraiError> {
if in_set(key, &serai.as_of(block.hash()), set)
if in_set(key, &serai.as_of(block.hash()), set.into())
.await?
.expect("NewSet for set which doesn't exist")
{
@ -64,7 +67,7 @@ async fn handle_new_set<D: Db>(
let serai = serai.as_of(block.hash());
let serai = serai.validator_sets();
let set_participants =
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
};
@ -131,7 +134,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
};
let mut batch_block = HashMap::new();
let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
let mut burns = HashMap::new();
let serai = serai.as_of(block.hash());
@ -205,8 +208,8 @@ async fn handle_block<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
processors: &Pro,
serai: &Serai,
block: Block,
@ -226,12 +229,8 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("NewSet event wasn't NewSet: {new_set:?}");
};
// If this is Serai, do nothing
// We only coordinate/process external networks
if set.network == NetworkId::Serai {
continue;
}
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh new set event {:?}", new_set);
let mut txn = db.txn();
@ -286,10 +285,7 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
};
if set.network == NetworkId::Serai {
continue;
}
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling
@ -307,10 +303,7 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
};
if set.network == NetworkId::Serai {
continue;
}
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh set retired event {:?}", retired_set);
let mut txn = db.txn();
@ -340,8 +333,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
processors: &Pro,
serai: &Serai,
next_block: &mut u64,
@ -395,8 +388,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
processors: Pro,
serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
) {
log::info!("scanning substrate");
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
@ -494,9 +487,12 @@ pub async fn scan_task<D: Db, Pro: Processors>(
/// retry.
pub(crate) async fn expected_next_batch(
serai: &Serai,
network: NetworkId,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(
serai: &Serai,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
let serai = serai.as_of_latest_finalized_block().await?;
let last = serai.in_instructions().last_batch_for_network(network).await?;
Ok(if let Some(last) = last { last + 1 } else { 0 })
@ -519,7 +515,7 @@ pub(crate) async fn expected_next_batch(
/// This is deemed fine.
pub(crate) async fn verify_published_batches<D: Db>(
txn: &mut D::Transaction<'_>,
network: NetworkId,
network: ExternalNetworkId,
optimistic_up_to: u32,
) -> Option<u32> {
// TODO: Localize from MainDb to SubstrateDb

View file

@ -4,7 +4,7 @@ use std::{
collections::{VecDeque, HashSet, HashMap},
};
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
use processor_messages::CoordinatorMessage;
@ -20,7 +20,7 @@ use crate::{
pub mod tributary;
#[derive(Clone)]
pub struct MemProcessors(pub Arc<RwLock<HashMap<NetworkId, VecDeque<CoordinatorMessage>>>>);
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
impl MemProcessors {
#[allow(clippy::new_without_default)]
pub fn new() -> MemProcessors {
@ -30,12 +30,12 @@ impl MemProcessors {
#[async_trait::async_trait]
impl Processors for MemProcessors {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let mut processors = self.0.write().await;
let processor = processors.entry(network).or_insert_with(VecDeque::new);
processor.push_back(msg.into());
}
async fn recv(&self, _: NetworkId) -> Message {
async fn recv(&self, _: ExternalNetworkId) -> Message {
todo!()
}
async fn ack(&self, _: Message) {
@ -65,8 +65,8 @@ impl LocalP2p {
impl P2p for LocalP2p {
type Id = usize;
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice();

View file

@ -15,8 +15,8 @@ use ciphersuite::{
use sp_application_crypto::sr25519;
use borsh::BorshDeserialize;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
};
use tokio::time::sleep;
@ -50,7 +50,7 @@ pub fn new_spec<R: RngCore + CryptoRng>(
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
let set_participants = keys
.iter()

View file

@ -10,7 +10,7 @@ use frost::Participant;
use sp_runtime::traits::Verify;
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{ValidatorSet, KeyPair},
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
};
use tokio::time::sleep;
@ -350,7 +350,7 @@ async fn dkg_test() {
async fn publish_set_keys(
&self,
_db: &(impl Sync + Get),
set: ValidatorSet,
set: ExternalValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@ -362,7 +362,7 @@ async fn dkg_test() {
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
&serai_client::Public(
frost::dkg::musig::musig_key::<Ristretto>(
&serai_client::validator_sets::primitives::musig_context(set),
&serai_client::validator_sets::primitives::musig_context(set.into()),
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
)
.unwrap()

View file

@ -7,7 +7,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
};
use processor_messages::coordinator::SubstrateSignableId;
@ -31,7 +31,7 @@ impl PublishSeraiTransaction for () {
async fn publish_set_keys(
&self,
_db: &(impl Sync + serai_db::Get),
_set: ValidatorSet,
_set: ExternalValidatorSet,
_removed: Vec<SeraiAddress>,
_key_pair: KeyPair,
_signature: Signature,

View file

@ -6,7 +6,7 @@ use borsh::{BorshSerialize, BorshDeserialize};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
use processor_messages::coordinator::SubstrateSignableId;
@ -46,7 +46,7 @@ pub enum Accumulation {
create_db!(
Tributary {
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32],
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
@ -80,7 +80,7 @@ create_db!(
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
SlashReported: (genesis: [u8; 32]) -> u16,
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
}
);

View file

@ -1,6 +1,6 @@
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::validator_sets::primitives::ValidatorSet;
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tributary::{
ReadWrite,
@ -40,7 +40,7 @@ pub fn removed_as_of_dkg_attempt(
pub fn removed_as_of_set_keys(
getter: &impl Get,
set: ValidatorSet,
set: ExternalValidatorSet,
genesis: [u8; 32],
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
// SeraiDkgCompleted has the key placed on-chain.

View file

@ -10,7 +10,7 @@ use tokio::sync::broadcast;
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{KeyPair, ValidatorSet},
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
Serai,
};
@ -38,7 +38,7 @@ pub enum RecognizedIdType {
pub trait RIDTrait {
async fn recognized_id(
&self,
set: ValidatorSet,
set: ExternalValidatorSet,
genesis: [u8; 32],
kind: RecognizedIdType,
id: Vec<u8>,
@ -47,12 +47,12 @@ pub trait RIDTrait {
#[async_trait::async_trait]
impl<
FRid: Send + Future<Output = ()>,
F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
> RIDTrait for F
{
async fn recognized_id(
&self,
set: ValidatorSet,
set: ExternalValidatorSet,
genesis: [u8; 32],
kind: RecognizedIdType,
id: Vec<u8>,
@ -66,7 +66,7 @@ pub trait PublishSeraiTransaction {
async fn publish_set_keys(
&self,
db: &(impl Sync + Get),
set: ValidatorSet,
set: ExternalValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@ -86,7 +86,7 @@ mod impl_pst_for_serai {
async fn publish(
serai: &Serai,
db: &impl Get,
set: ValidatorSet,
set: ExternalValidatorSet,
tx: serai_client::Transaction,
meta: $Meta,
) -> bool {
@ -128,7 +128,7 @@ mod impl_pst_for_serai {
async fn publish_set_keys(
&self,
db: &(impl Sync + Get),
set: ValidatorSet,
set: ExternalValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@ -140,7 +140,7 @@ mod impl_pst_for_serai {
key_pair,
signature,
);
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
if matches!(serai.keys(set).await, Ok(Some(_))) {
log::info!("another coordinator set key pair for {:?}", set);
return true;

View file

@ -119,7 +119,7 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
let algorithm = Schnorrkel::new(b"substrate");
let keys: ThresholdKeys<Ristretto> =
musig(&musig_context(self.spec.set()), self.key, participants)
musig(&musig_context(self.spec.set().into()), self.key, participants)
.expect("signing for a set we aren't in/validator present multiple times")
.into();

View file

@ -9,7 +9,7 @@ use frost::Participant;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet};
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};
fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
@ -43,7 +43,7 @@ fn borsh_deserialize_validators<R: io::Read>(
pub struct TributarySpec {
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
set: ExternalValidatorSet,
#[borsh(
serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators"
@ -55,7 +55,7 @@ impl TributarySpec {
pub fn new(
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
set: ExternalValidatorSet,
set_participants: Vec<(PublicKey, u16)>,
) -> TributarySpec {
let mut validators = vec![];
@ -68,7 +68,7 @@ impl TributarySpec {
Self { serai_block, start_time, set, validators }
}
pub fn set(&self) -> ValidatorSet {
pub fn set(&self) -> ExternalValidatorSet {
self.set
}

View file

@ -6,7 +6,7 @@ pub(crate) use std::{
pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
pub(crate) use schnorr_signatures::SchnorrSignature;
pub(crate) use serai_primitives::NetworkId;
pub(crate) use serai_primitives::ExternalNetworkId;
pub(crate) use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
@ -193,10 +193,7 @@ async fn main() {
KEYS.write().unwrap().insert(service, key);
let mut queues = QUEUES.write().unwrap();
if service == Service::Coordinator {
for network in serai_primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
for network in serai_primitives::EXTERNAL_NETWORKS {
queues.insert(
(service, Service::Processor(network)),
RwLock::new(Queue(db.clone(), service, Service::Processor(network))),
@ -210,17 +207,13 @@ async fn main() {
}
};
// Make queues for each NetworkId, other than Serai
for network in serai_primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
// Make queues for each ExternalNetworkId
for network in serai_primitives::EXTERNAL_NETWORKS {
// Use a match so we error if the list of NetworkIds changes
let Some(key) = read_key(match network {
NetworkId::Serai => unreachable!(),
NetworkId::Bitcoin => "BITCOIN_KEY",
NetworkId::Ethereum => "ETHEREUM_KEY",
NetworkId::Monero => "MONERO_KEY",
ExternalNetworkId::Bitcoin => "BITCOIN_KEY",
ExternalNetworkId::Ethereum => "ETHEREUM_KEY",
ExternalNetworkId::Monero => "MONERO_KEY",
}) els