mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-03 09:29:46 +00:00
Remove the "expected" next ID
It's an unnecessary extra layer better handled locally.
This commit is contained in:
parent
90318d7214
commit
269db1c4be
6 changed files with 12 additions and 18 deletions
|
@ -870,6 +870,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
|||
// Alternatively, a peek method with local delineation of handled messages would work.
|
||||
|
||||
let msg = processors.recv().await;
|
||||
// TODO: Check this ID is sane (last handled ID or expected next ID)
|
||||
if last_msg == Some(msg.id) {
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
|
|
|
@ -28,8 +28,7 @@ impl Processors for Arc<MessageQueue> {
|
|||
self.queue(metadata, msg.into_bytes()).await;
|
||||
}
|
||||
async fn recv(&mut self) -> Message {
|
||||
// TODO: Use a proper expected next ID
|
||||
let msg = self.next(0).await;
|
||||
let msg = self.next().await;
|
||||
|
||||
let network = match msg.from {
|
||||
Service::Processor(network) => network,
|
||||
|
|
|
@ -140,9 +140,9 @@ impl MessageQueue {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn next(&self, expected: u64) -> QueuedMessage {
|
||||
pub async fn next(&self) -> QueuedMessage {
|
||||
loop {
|
||||
let json = self.json_call("next", serde_json::json!([self.service, expected])).await;
|
||||
let json = self.json_call("next", serde_json::json!([self.service])).await;
|
||||
|
||||
// Convert from a Value to a type via reserialization
|
||||
let msg: Option<QueuedMessage> = serde_json::from_str(
|
||||
|
@ -174,7 +174,6 @@ impl MessageQueue {
|
|||
);
|
||||
}
|
||||
// TODO: Verify the sender's signature
|
||||
// TODO: Check the ID is sane
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
|
|
@ -107,16 +107,11 @@ mod binaries {
|
|||
/*
|
||||
Gets the next message in queue for this service.
|
||||
|
||||
This is not authenticated due to the fact every nonce would have to be saved to prevent replays,
|
||||
or a challenge-response protocol implemented. Neither are worth doing when there should be no
|
||||
sensitive data on this server.
|
||||
|
||||
The expected index is used to ensure a service didn't fall out of sync with this service. It
|
||||
should always be either the next message's ID or *TODO*.
|
||||
This is not authenticated due to the fact every nonce would have to be saved to prevent
|
||||
replays, or a challenge-response protocol implemented. Neither are worth doing when there
|
||||
should be no sensitive data on this server.
|
||||
*/
|
||||
pub(crate) fn get_next_message(service: Service, _expected: u64) -> Option<QueuedMessage> {
|
||||
// TODO: Verify the expected next message ID matches
|
||||
|
||||
pub(crate) fn get_next_message(service: Service) -> Option<QueuedMessage> {
|
||||
let queue_outer = (*QUEUES).read().unwrap();
|
||||
let queue = queue_outer[&service].read().unwrap();
|
||||
let next = queue.last_acknowledged().map(|i| i + 1).unwrap_or(0);
|
||||
|
@ -229,8 +224,8 @@ async fn main() {
|
|||
.unwrap();
|
||||
module
|
||||
.register_method("next", |args, _| {
|
||||
let args = args.parse::<(Service, u64)>().unwrap();
|
||||
Ok(get_next_message(args.0, args.1))
|
||||
let args = args.parse::<Service>().unwrap();
|
||||
Ok(get_next_message(args))
|
||||
})
|
||||
.unwrap();
|
||||
module
|
||||
|
|
|
@ -25,8 +25,7 @@ impl Coordinator for MessageQueue {
|
|||
}
|
||||
|
||||
async fn recv(&mut self) -> Message {
|
||||
// TODO2: Use a proper expected next ID
|
||||
let msg = self.next(0).await;
|
||||
let msg = self.next().await;
|
||||
|
||||
let id = msg.id;
|
||||
|
||||
|
|
|
@ -471,6 +471,7 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
|
|||
let (main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &network).await;
|
||||
|
||||
// We can't load this from the DB as we can't guarantee atomic increments with the ack function
|
||||
// TODO: Load with a slight tolerance
|
||||
let mut last_coordinator_msg = None;
|
||||
|
||||
loop {
|
||||
|
|
Loading…
Reference in a new issue