mirror of
https://github.com/hinto-janai/cuprate.git
synced 2025-01-10 12:54:44 +00:00
fixes
This commit is contained in:
parent
fec033a460
commit
266c71f4a9
3 changed files with 8 additions and 9 deletions
|
@ -179,7 +179,7 @@ where
|
||||||
Some(res) => {
|
Some(res) => {
|
||||||
// res has already been set, replace it if this peer claims higher cumulative difficulty
|
// res has already been set, replace it if this peer claims higher cumulative difficulty
|
||||||
if res.0.cumulative_difficulty() < task_res.0.cumulative_difficulty() {
|
if res.0.cumulative_difficulty() < task_res.0.cumulative_difficulty() {
|
||||||
let _unused = mem::replace(res, task_res);
|
drop(mem::replace(res, task_res));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
|
|
|
@ -193,7 +193,7 @@ impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
|
||||||
};
|
};
|
||||||
|
|
||||||
// An error here means _all_ receivers were dropped which we assume will never happen.
|
// An error here means _all_ receivers were dropped which we assume will never happen.
|
||||||
let _unused = match direction {
|
drop(match direction {
|
||||||
Some(ConnectionDirection::Inbound) => {
|
Some(ConnectionDirection::Inbound) => {
|
||||||
self.tx_broadcast_channel_inbound.send(nex_tx_info)
|
self.tx_broadcast_channel_inbound.send(nex_tx_info)
|
||||||
}
|
}
|
||||||
|
@ -201,10 +201,10 @@ impl<N: NetworkZone> Service<BroadcastRequest<N>> for BroadcastSvc<N> {
|
||||||
self.tx_broadcast_channel_outbound.send(nex_tx_info)
|
self.tx_broadcast_channel_outbound.send(nex_tx_info)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let _unused = self.tx_broadcast_channel_outbound.send(nex_tx_info.clone());
|
drop(self.tx_broadcast_channel_outbound.send(nex_tx_info.clone()));
|
||||||
self.tx_broadcast_channel_inbound.send(nex_tx_info)
|
self.tx_broadcast_channel_inbound.send(nex_tx_info)
|
||||||
}
|
}
|
||||||
};
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,10 +336,9 @@ impl<N: NetworkZone> Stream for BroadcastMessageStream<N> {
|
||||||
Poll::Ready(Some(BroadcastMessage::NewTransaction(txs)))
|
Poll::Ready(Some(BroadcastMessage::NewTransaction(txs)))
|
||||||
} else {
|
} else {
|
||||||
tracing::trace!("Diffusion flush timer expired but no txs to diffuse");
|
tracing::trace!("Diffusion flush timer expired but no txs to diffuse");
|
||||||
#[expect(
|
// poll next_flush now to register the waker with it.
|
||||||
clippy::let_underscore_must_use,
|
// the waker will already be registered with the block broadcast channel."
|
||||||
reason = "poll next_flush now to register the waker with it. the waker will already be registered with the block broadcast channel."
|
#[expect(clippy::let_underscore_must_use)]
|
||||||
)]
|
|
||||||
let _ = this.next_flush.poll(cx);
|
let _ = this.next_flush.poll(cx);
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
//! returns the peer to the pool when it is dropped.
|
//! returns the peer to the pool when it is dropped.
|
||||||
//!
|
//!
|
||||||
//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
|
//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code
|
||||||
//! as internally this uses blocking `RwLocks`.
|
//! as internally this uses blocking `RwLock`s.
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
|
|
Loading…
Reference in a new issue