mirror of
https://github.com/hinto-janai/cuprate.git
synced 2025-01-22 10:34:37 +00:00
finish incoming tx handler
This commit is contained in:
parent
d2c7e49e80
commit
b6d94cf780
14 changed files with 328 additions and 239 deletions
|
@ -20,7 +20,7 @@ cuprate-levin = { path = "../../net/levin" }
|
||||||
cuprate-wire = { path = "../../net/wire" }
|
cuprate-wire = { path = "../../net/wire" }
|
||||||
cuprate-p2p = { path = "../../p2p/p2p" }
|
cuprate-p2p = { path = "../../p2p/p2p" }
|
||||||
cuprate-p2p-core = { path = "../../p2p/p2p-core" }
|
cuprate-p2p-core = { path = "../../p2p/p2p-core" }
|
||||||
cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower" }
|
cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower", features = ["txpool"] }
|
||||||
cuprate-async-buffer = { path = "../../p2p/async-buffer" }
|
cuprate-async-buffer = { path = "../../p2p/async-buffer" }
|
||||||
cuprate-address-book = { path = "../../p2p/address-book" }
|
cuprate-address-book = { path = "../../p2p/address-book" }
|
||||||
cuprate-blockchain = { path = "../../storage/blockchain", features = ["service"] }
|
cuprate-blockchain = { path = "../../storage/blockchain", features = ["service"] }
|
||||||
|
|
|
@ -4,5 +4,4 @@
|
||||||
|
|
||||||
mod dandelion;
|
mod dandelion;
|
||||||
mod incoming_tx;
|
mod incoming_tx;
|
||||||
mod manager;
|
|
||||||
mod txs_being_handled;
|
mod txs_being_handled;
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use cuprate_dandelion_tower::pool::DandelionPoolService;
|
use cuprate_dandelion_tower::pool::DandelionPoolService;
|
||||||
use cuprate_dandelion_tower::{DandelionConfig, DandelionRouter};
|
use cuprate_dandelion_tower::{DandelionConfig, DandelionRouter, Graph};
|
||||||
use cuprate_p2p::NetworkInterface;
|
use cuprate_p2p::NetworkInterface;
|
||||||
use cuprate_p2p_core::ClearNet;
|
use cuprate_p2p_core::ClearNet;
|
||||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
||||||
|
@ -11,10 +13,17 @@ mod stem_service;
|
||||||
mod tx_store;
|
mod tx_store;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct DandelionTx(Bytes);
|
pub struct DandelionTx(Bytes);
|
||||||
|
|
||||||
type TxId = [u8; 32];
|
type TxId = [u8; 32];
|
||||||
|
|
||||||
|
const DANDELION_CONFIG: DandelionConfig = DandelionConfig {
|
||||||
|
time_between_hop: Duration::from_millis(175),
|
||||||
|
epoch_duration: Duration::from_secs(10 * 60),
|
||||||
|
fluff_probability: 0.12,
|
||||||
|
graph: Graph::FourRegular,
|
||||||
|
};
|
||||||
|
|
||||||
type ConcreteDandelionRouter = DandelionRouter<
|
type ConcreteDandelionRouter = DandelionRouter<
|
||||||
stem_service::OutboundPeerStream,
|
stem_service::OutboundPeerStream,
|
||||||
diffuse_service::DiffuseService,
|
diffuse_service::DiffuseService,
|
||||||
|
@ -35,12 +44,7 @@ pub fn start_dandelion_pool_manager(
|
||||||
txpool_read_handle,
|
txpool_read_handle,
|
||||||
txpool_write_handle,
|
txpool_write_handle,
|
||||||
},
|
},
|
||||||
DandelionConfig {
|
DANDELION_CONFIG,
|
||||||
time_between_hop: Default::default(),
|
|
||||||
epoch_duration: Default::default(),
|
|
||||||
fluff_probability: 0.0,
|
|
||||||
graph: Default::default(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,14 +53,7 @@ pub fn dandelion_router(clear_net: NetworkInterface<ClearNet>) -> ConcreteDandel
|
||||||
diffuse_service::DiffuseService {
|
diffuse_service::DiffuseService {
|
||||||
clear_net_broadcast_service: clear_net.broadcast_svc(),
|
clear_net_broadcast_service: clear_net.broadcast_svc(),
|
||||||
},
|
},
|
||||||
stem_service::OutboundPeerStream {
|
stem_service::OutboundPeerStream { clear_net },
|
||||||
clear_net: clear_net.clone(),
|
DANDELION_CONFIG,
|
||||||
},
|
|
||||||
DandelionConfig {
|
|
||||||
time_between_hop: Default::default(),
|
|
||||||
epoch_duration: Default::default(),
|
|
||||||
fluff_probability: 0.0,
|
|
||||||
graph: Default::default(),
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,26 @@
|
||||||
use std::task::{Context, Poll};
|
use std::{
|
||||||
|
future::{ready, Ready},
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
use tower::Service;
|
use tower::Service;
|
||||||
|
|
||||||
use crate::txpool::dandelion::DandelionTx;
|
|
||||||
use cuprate_dandelion_tower::traits::DiffuseRequest;
|
use cuprate_dandelion_tower::traits::DiffuseRequest;
|
||||||
use cuprate_p2p::{BroadcastRequest, BroadcastSvc, NetworkInterface};
|
use cuprate_p2p::{BroadcastRequest, BroadcastSvc, NetworkInterface};
|
||||||
use cuprate_p2p_core::ClearNet;
|
use cuprate_p2p_core::ClearNet;
|
||||||
|
|
||||||
|
use super::DandelionTx;
|
||||||
|
|
||||||
|
/// The dandelion diffusion service.
|
||||||
pub struct DiffuseService {
|
pub struct DiffuseService {
|
||||||
pub clear_net_broadcast_service: BroadcastSvc<ClearNet>,
|
pub clear_net_broadcast_service: BroadcastSvc<ClearNet>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service<DiffuseRequest<DandelionTx>> for DiffuseService {
|
impl Service<DiffuseRequest<DandelionTx>> for DiffuseService {
|
||||||
type Response = BroadcastSvc::Response;
|
type Response = ();
|
||||||
type Error = tower::BoxError;
|
type Error = tower::BoxError;
|
||||||
type Future = BroadcastSvc::Future;
|
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||||
|
|
||||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
self.clear_net_broadcast_service
|
self.clear_net_broadcast_service
|
||||||
|
@ -22,11 +29,17 @@ impl Service<DiffuseRequest<DandelionTx>> for DiffuseService {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: DiffuseRequest<DandelionTx>) -> Self::Future {
|
fn call(&mut self, req: DiffuseRequest<DandelionTx>) -> Self::Future {
|
||||||
|
// TODO: Call `into_inner` when 1.82.0 stabilizes
|
||||||
self.clear_net_broadcast_service
|
self.clear_net_broadcast_service
|
||||||
.call(BroadcastRequest::Transaction {
|
.call(BroadcastRequest::Transaction {
|
||||||
tx_bytes: req.0 .0,
|
tx_bytes: req.0 .0,
|
||||||
direction: None,
|
direction: None,
|
||||||
received_from: None,
|
received_from: None,
|
||||||
})
|
})
|
||||||
|
.now_or_never()
|
||||||
|
.unwrap()
|
||||||
|
.expect("Broadcast service is Infallible");
|
||||||
|
|
||||||
|
ready(Ok(()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,23 @@
|
||||||
use super::DandelionTx;
|
use std::{
|
||||||
use bytes::Bytes;
|
pin::Pin,
|
||||||
use cuprate_dandelion_tower::traits::StemRequest;
|
task::{Context, Poll},
|
||||||
use cuprate_dandelion_tower::OutboundPeer;
|
};
|
||||||
|
|
||||||
|
use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer};
|
||||||
use cuprate_p2p::NetworkInterface;
|
use cuprate_p2p::NetworkInterface;
|
||||||
use cuprate_p2p_core::client::Client;
|
use cuprate_p2p_core::{
|
||||||
use cuprate_p2p_core::{ClearNet, NetworkZone, PeerRequest, ProtocolRequest};
|
client::{Client, InternalPeerID},
|
||||||
use cuprate_wire::protocol::NewTransactions;
|
ClearNet, NetworkZone, PeerRequest, ProtocolRequest,
|
||||||
use cuprate_wire::NetworkAddress;
|
};
|
||||||
|
use cuprate_wire::{protocol::NewTransactions, NetworkAddress};
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use std::pin::Pin;
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
use tower::Service;
|
use tower::Service;
|
||||||
|
|
||||||
|
use super::DandelionTx;
|
||||||
|
|
||||||
|
/// The dandelion outbound peer stream.
|
||||||
pub struct OutboundPeerStream {
|
pub struct OutboundPeerStream {
|
||||||
pub clear_net: NetworkInterface<ClearNet>,
|
pub clear_net: NetworkInterface<ClearNet>,
|
||||||
}
|
}
|
||||||
|
@ -20,22 +26,29 @@ impl Stream for OutboundPeerStream {
|
||||||
type Item = Result<OutboundPeer<NetworkAddress, StemPeerService<ClearNet>>, tower::BoxError>;
|
type Item = Result<OutboundPeer<NetworkAddress, StemPeerService<ClearNet>>, tower::BoxError>;
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
// TODO: make the outbound peer choice random.
|
||||||
Poll::Ready(Some(Ok(self
|
Poll::Ready(Some(Ok(self
|
||||||
.clear_net
|
.clear_net
|
||||||
.client_pool()
|
.client_pool()
|
||||||
.outbound_client()
|
.outbound_client()
|
||||||
.map_or(OutboundPeer::Exhausted, |client| {
|
.map_or(OutboundPeer::Exhausted, |client| {
|
||||||
OutboundPeer::Peer(client.info.id.into(), StemPeerService(client))
|
let addr = match client.info.id {
|
||||||
|
InternalPeerID::KnownAddr(addr) => addr,
|
||||||
|
InternalPeerID::Unknown(_) => panic!("Outbound peer had an unknown address"),
|
||||||
|
};
|
||||||
|
|
||||||
|
OutboundPeer::Peer(addr.into(), StemPeerService(client))
|
||||||
}))))
|
}))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StemPeerService<N>(Client<N>);
|
/// The stem service, used to send stem txs.
|
||||||
|
pub struct StemPeerService<N: NetworkZone>(Client<N>);
|
||||||
|
|
||||||
impl<N: NetworkZone> Service<StemRequest<DandelionTx>> for StemPeerService<N> {
|
impl<N: NetworkZone> Service<StemRequest<DandelionTx>> for StemPeerService<N> {
|
||||||
type Response = ();
|
type Response = <Client<N> as Service<PeerRequest>>::Response;
|
||||||
type Error = tower::BoxError;
|
type Error = tower::BoxError;
|
||||||
type Future = Client::Future;
|
type Future = <Client<N> as Service<PeerRequest>>::Future;
|
||||||
|
|
||||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
self.0.poll_ready(cx)
|
self.0.poll_ready(cx)
|
||||||
|
|
|
@ -1,15 +1,27 @@
|
||||||
use crate::txpool::dandelion::{DandelionTx, TxId};
|
use std::{
|
||||||
use bytes::Bytes;
|
f32::consts::E,
|
||||||
use cuprate_dandelion_tower::traits::{TxStoreRequest, TxStoreResponse};
|
task::{Context, Poll},
|
||||||
use cuprate_database::RuntimeError;
|
};
|
||||||
use cuprate_txpool::service::interface::{TxpoolReadRequest, TxpoolReadResponse};
|
|
||||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
use tower::util::Oneshot;
|
|
||||||
use tower::{Service, ServiceExt};
|
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
|
use futures::{future::BoxFuture, FutureExt, StreamExt, TryFutureExt};
|
||||||
|
use tower::{util::Oneshot, Service, ServiceExt};
|
||||||
|
|
||||||
|
use cuprate_dandelion_tower::{
|
||||||
|
traits::{TxStoreRequest, TxStoreResponse},
|
||||||
|
State,
|
||||||
|
};
|
||||||
|
use cuprate_database::RuntimeError;
|
||||||
|
use cuprate_txpool::service::{
|
||||||
|
interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse},
|
||||||
|
TxpoolReadHandle, TxpoolWriteHandle,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{DandelionTx, TxId};
|
||||||
|
|
||||||
|
/// The dandelion tx-store service.
|
||||||
|
///
|
||||||
|
/// This is just mapping the interface [`cuprate_dandelion_tower`] wants to what [`cuprate_txpool`] provides.
|
||||||
pub struct TxStoreService {
|
pub struct TxStoreService {
|
||||||
pub txpool_read_handle: TxpoolReadHandle,
|
pub txpool_read_handle: TxpoolReadHandle,
|
||||||
pub txpool_write_handle: TxpoolWriteHandle,
|
pub txpool_write_handle: TxpoolWriteHandle,
|
||||||
|
@ -31,17 +43,34 @@ impl Service<TxStoreRequest<TxId>> for TxStoreService {
|
||||||
.clone()
|
.clone()
|
||||||
.oneshot(TxpoolReadRequest::TxBlob(tx_id))
|
.oneshot(TxpoolReadRequest::TxBlob(tx_id))
|
||||||
.map(|res| match res {
|
.map(|res| match res {
|
||||||
Ok(TxpoolReadResponse::TxBlob(blob)) => Ok(TxStoreResponse::Transaction(Some(
|
Ok(TxpoolReadResponse::TxBlob {
|
||||||
(DandelionTx(Bytes::from(blob)), todo!()),
|
tx_blob,
|
||||||
))),
|
state_stem,
|
||||||
|
}) => {
|
||||||
|
let state = if state_stem {
|
||||||
|
State::Stem
|
||||||
|
} else {
|
||||||
|
State::Fluff
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(TxStoreResponse::Transaction(Some((
|
||||||
|
DandelionTx(Bytes::from(tx_blob)),
|
||||||
|
state,
|
||||||
|
))))
|
||||||
|
}
|
||||||
Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)),
|
Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)),
|
||||||
Err(e) => Err(e.into()),
|
Err(e) => Err(e.into()),
|
||||||
Ok(_) => unreachable!(),
|
Ok(_) => unreachable!(),
|
||||||
})
|
})
|
||||||
.boxed(),
|
.boxed(),
|
||||||
TxStoreRequest::Promote(tx_id) => {
|
TxStoreRequest::Promote(tx_id) => self
|
||||||
todo!()
|
.txpool_write_handle
|
||||||
}
|
.oneshot(TxpoolWriteRequest::Promote(tx_id))
|
||||||
|
.map(|res| match res {
|
||||||
|
Ok(_) | Err(RuntimeError::KeyNotFound) => TxStoreResponse::Ok,
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
})
|
||||||
|
.boxed(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,58 +1,76 @@
|
||||||
use std::collections::HashSet;
|
use std::{
|
||||||
use std::future::ready;
|
collections::HashSet,
|
||||||
use std::sync::Arc;
|
future::ready,
|
||||||
use std::task::{Context, Poll};
|
sync::Arc,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::blockchain::ConcreteTxVerifierService;
|
|
||||||
use crate::txpool::txs_being_handled::{tx_blob_hash, TxBeingHandledLocally, TxsBeingHandled};
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use cuprate_consensus::transactions::new_tx_verification_data;
|
|
||||||
use cuprate_consensus::{
|
|
||||||
BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService,
|
|
||||||
ExtendedConsensusError, TxVerifierService, VerifyTxRequest, VerifyTxResponse,
|
|
||||||
};
|
|
||||||
use cuprate_dandelion_tower::pool::{DandelionPoolService, IncomingTx, IncomingTxBuilder};
|
|
||||||
use cuprate_dandelion_tower::TxState;
|
|
||||||
use cuprate_helper::asynch::rayon_spawn_async;
|
|
||||||
use cuprate_txpool::service::interface::{
|
|
||||||
TxpoolReadRequest, TxpoolWriteRequest, TxpoolWriteResponse,
|
|
||||||
};
|
|
||||||
use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle};
|
|
||||||
use cuprate_wire::NetworkAddress;
|
|
||||||
use dashmap::DashSet;
|
use dashmap::DashSet;
|
||||||
use futures::future::BoxFuture;
|
use futures::{future::BoxFuture, FutureExt};
|
||||||
use futures::FutureExt;
|
|
||||||
use monero_serai::transaction::Transaction;
|
use monero_serai::transaction::Transaction;
|
||||||
use sha3::{Digest, Sha3_256};
|
use sha3::{Digest, Sha3_256};
|
||||||
use tower::{Service, ServiceExt};
|
use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
|
use cuprate_consensus::{
|
||||||
|
transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse,
|
||||||
|
BlockChainContextService, ExtendedConsensusError, TxVerifierService, VerifyTxRequest,
|
||||||
|
VerifyTxResponse,
|
||||||
|
};
|
||||||
|
use cuprate_dandelion_tower::{
|
||||||
|
pool::{DandelionPoolService, IncomingTx, IncomingTxBuilder},
|
||||||
|
TxState,
|
||||||
|
};
|
||||||
|
use cuprate_helper::asynch::rayon_spawn_async;
|
||||||
|
use cuprate_txpool::service::{
|
||||||
|
interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse},
|
||||||
|
TxpoolReadHandle, TxpoolWriteHandle,
|
||||||
|
};
|
||||||
|
use cuprate_types::TransactionVerificationData;
|
||||||
|
use cuprate_wire::NetworkAddress;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
blockchain::ConcreteTxVerifierService,
|
||||||
|
constants::PANIC_CRITICAL_SERVICE_ERROR,
|
||||||
|
signals::REORG_LOCK,
|
||||||
|
txpool::txs_being_handled::{tx_blob_hash, TxBeingHandledLocally, TxsBeingHandled},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// An error that can happen handling an incoming tx.
|
||||||
pub enum IncomingTxError {
|
pub enum IncomingTxError {
|
||||||
Parse(std::io::Error),
|
Parse(std::io::Error),
|
||||||
Consensus(ExtendedConsensusError),
|
Consensus(ExtendedConsensusError),
|
||||||
DuplicateTransaction,
|
DuplicateTransaction,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum IncomingTxs {
|
/// Incoming transactions.
|
||||||
Bytes {
|
pub struct IncomingTxs {
|
||||||
txs: Vec<Bytes>,
|
pub txs: Vec<Bytes>,
|
||||||
state: TxState<NetworkAddress>,
|
pub state: TxState<NetworkAddress>,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The transaction type used for dandelion++.
|
||||||
|
#[derive(Clone)]
|
||||||
struct DandelionTx(Bytes);
|
struct DandelionTx(Bytes);
|
||||||
|
|
||||||
|
/// A transaction ID/hash.
|
||||||
type TxId = [u8; 32];
|
type TxId = [u8; 32];
|
||||||
|
|
||||||
|
/// The service than handles incoming transaction pool transactions.
|
||||||
|
///
|
||||||
|
/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes.
|
||||||
pub struct IncomingTxHandler {
|
pub struct IncomingTxHandler {
|
||||||
txs_being_added: Arc<TxsBeingHandled>,
|
/// A store of txs currently being handled in incoming tx requests.
|
||||||
|
txs_being_handled: TxsBeingHandled,
|
||||||
|
/// The blockchain context cache.
|
||||||
blockchain_context_cache: BlockChainContextService,
|
blockchain_context_cache: BlockChainContextService,
|
||||||
|
/// The dandelion txpool manager.
|
||||||
dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, NetworkAddress>,
|
dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, NetworkAddress>,
|
||||||
|
/// The transaction verifier service.
|
||||||
tx_verifier_service: ConcreteTxVerifierService,
|
tx_verifier_service: ConcreteTxVerifierService,
|
||||||
|
/// The txpool write handle.
|
||||||
txpool_write_handle: TxpoolWriteHandle,
|
txpool_write_handle: TxpoolWriteHandle,
|
||||||
|
/// The txpool read handle.
|
||||||
txpool_read_handle: TxpoolReadHandle,
|
txpool_read_handle: TxpoolReadHandle,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,41 +84,44 @@ impl Service<IncomingTxs> for IncomingTxHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: IncomingTxs) -> Self::Future {
|
fn call(&mut self, req: IncomingTxs) -> Self::Future {
|
||||||
let IncomingTxs::Bytes { mut txs, state } = req;
|
let IncomingTxs::Bytes { txs, state } = req;
|
||||||
|
|
||||||
let mut local_tracker = self.txs_being_added.local_tracker();
|
handle_incoming_txs(
|
||||||
|
txs,
|
||||||
txs.retain(|bytes| local_tracker.try_add_tx(bytes.as_ref()));
|
state,
|
||||||
|
self.txs_being_handled.clone(),
|
||||||
if txs.is_empty() {
|
self.blockchain_context_cache.clone(),
|
||||||
return ready(Ok(())).boxed();
|
self.tx_verifier_service.clone(),
|
||||||
|
self.txpool_write_handle.clone(),
|
||||||
|
self.txpool_read_handle.clone(),
|
||||||
|
self.dandelion_pool_manager.clone(),
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut blockchain_context_cache = self.blockchain_context_cache.clone();
|
async fn handle_incoming_txs(
|
||||||
let mut tx_verifier_service = self.tx_verifier_service.clone();
|
txs: Vec<Bytes>,
|
||||||
let mut txpool_write_handle = self.txpool_write_handle.clone();
|
state: TxState<NetworkAddress>,
|
||||||
|
txs_being_handled: TxsBeingHandled,
|
||||||
|
mut blockchain_context_cache: BlockChainContextService,
|
||||||
|
mut tx_verifier_service: ConcreteTxVerifierService,
|
||||||
|
mut txpool_write_handle: TxpoolWriteHandle,
|
||||||
|
mut txpool_read_handle: TxpoolReadHandle,
|
||||||
|
mut dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, NetworkAddress>,
|
||||||
|
) -> Result<(), IncomingTxError> {
|
||||||
|
let reorg_guard = REORG_LOCK.read().await;
|
||||||
|
|
||||||
async move {
|
let (txs, txs_being_handled_guard) =
|
||||||
let txs = rayon_spawn_async(move || {
|
prepare_incoming_txs(txs, txs_being_handled, &mut txpool_read_handle).await?;
|
||||||
txs.into_iter()
|
|
||||||
.map(|bytes| {
|
|
||||||
let tx = Transaction::read(&mut bytes.as_ref())
|
|
||||||
.map_err(IncomingTxError::Parse)?;
|
|
||||||
|
|
||||||
let tx = new_tx_verification_data(tx)
|
|
||||||
.map_err(|e| IncomingTxError::Consensus(e.into()))?;
|
|
||||||
|
|
||||||
Ok(Arc::new(tx))
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, IncomingTxError>>()
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let BlockChainContextResponse::Context(context) = blockchain_context_cache
|
let BlockChainContextResponse::Context(context) = blockchain_context_cache
|
||||||
.ready()
|
.ready()
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
.call(BlockChainContextRequest::GetContext)
|
.call(BlockChainContextRequest::GetContext)
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
@ -109,7 +130,8 @@ impl Service<IncomingTxs> for IncomingTxHandler {
|
||||||
|
|
||||||
tx_verifier_service
|
tx_verifier_service
|
||||||
.ready()
|
.ready()
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
.call(VerifyTxRequest::Prepped {
|
.call(VerifyTxRequest::Prepped {
|
||||||
txs: txs.clone(),
|
txs: txs.clone(),
|
||||||
current_chain_height: context.chain_height,
|
current_chain_height: context.chain_height,
|
||||||
|
@ -117,58 +139,70 @@ impl Service<IncomingTxs> for IncomingTxHandler {
|
||||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||||
hf: context.current_hf,
|
hf: context.current_hf,
|
||||||
})
|
})
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(IncomingTxError::Consensus)?;
|
||||||
|
|
||||||
txpool_write_handle
|
for tx in txs {
|
||||||
.ready()
|
handle_valid_tx(
|
||||||
.await?
|
|
||||||
.call(TxpoolWriteRequest::AddTransaction {
|
|
||||||
tx,
|
tx,
|
||||||
state_stem: state.state_stem(),
|
state.clone(),
|
||||||
})
|
&mut txpool_write_handle,
|
||||||
.await;
|
&mut dandelion_pool_manager,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
todo!()
|
Ok(())
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_incoming_txs(
|
/// Prepares the incoming transactions for verification.
|
||||||
txs: Vec<Bytes>,
|
///
|
||||||
state: TxState<NetworkAddress>,
|
/// This will filter out all transactions already in the pool or txs already being handled in another request.
|
||||||
tx_being_handled_locally: TxBeingHandledLocally,
|
async fn prepare_incoming_txs(
|
||||||
mut blockchain_context_cache: BlockChainContextService,
|
tx_blobs: Vec<Bytes>,
|
||||||
mut tx_verifier_service: ConcreteTxVerifierService,
|
txs_being_handled: TxsBeingHandled,
|
||||||
mut txpool_write_handle: TxpoolWriteHandle,
|
txpool_read_handle: &mut TxpoolReadHandle,
|
||||||
mut txpool_read_handle: TxpoolReadHandle,
|
) -> Result<(Vec<Arc<TransactionVerificationData>>, TxBeingHandledLocally), IncomingTxError> {
|
||||||
mut dandelion_pool_manager: DandelionPoolService<DandelionTx, TxId, NetworkAddress>,
|
|
||||||
) -> Result<(), IncomingTxError> {
|
|
||||||
let mut tx_blob_hashes = HashSet::new();
|
let mut tx_blob_hashes = HashSet::new();
|
||||||
|
let mut txs_being_handled_loacally = txs_being_handled.local_tracker();
|
||||||
|
|
||||||
let txs = txs
|
// Compute the blob hash for each tx and filter out the txs currently being handled by another incoming tx batch.
|
||||||
|
let txs = tx_blobs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|tx_blob| {
|
.filter_map(|tx_blob| {
|
||||||
let tx_blob_hash = tx_blob_hash(tx_blob.as_ref());
|
let tx_blob_hash = tx_blob_hash(tx_blob.as_ref());
|
||||||
|
|
||||||
|
// If a duplicate is in here the incoming tx batch contained the same tx twice.
|
||||||
if !tx_blob_hashes.insert(tx_blob_hash) {
|
if !tx_blob_hashes.insert(tx_blob_hash) {
|
||||||
return Err(IncomingTxError::DuplicateTransaction);
|
return Some(Err(IncomingTxError::DuplicateTransaction));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tx_blob_hash, tx_blob))
|
// If a duplicate is here it is being handled in another batch.
|
||||||
|
if !txs_being_handled_loacally.try_add_tx(tx_blob_hash) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Ok((tx_blob_hash, tx_blob)))
|
||||||
})
|
})
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
let TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes) = txpool_read_handle
|
// Filter the txs already in the txpool out.
|
||||||
|
// This will leave the txs already in the pool in [`TxBeingHandledLocally`] but that shouldn't be an issue.
|
||||||
|
let TxpoolReadResponse::FilterKnownTxBlobHashes(tx_blob_hashes) = txpool_read_handle
|
||||||
.ready()
|
.ready()
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
.call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes))
|
.call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes))
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
let txs = rayon_spawn_async(move || {
|
// Now prepare the txs for verification.
|
||||||
txs.into_iter()
|
rayon_spawn_async(move || {
|
||||||
|
let txs = txs
|
||||||
|
.into_iter()
|
||||||
.filter_map(|(tx_blob_hash, tx_blob)| {
|
.filter_map(|(tx_blob_hash, tx_blob)| {
|
||||||
if tx_blob_hashes.contains(&tx_blob_hash) {
|
if tx_blob_hashes.contains(&tx_blob_hash) {
|
||||||
Some(tx_blob)
|
Some(tx_blob)
|
||||||
|
@ -184,66 +218,55 @@ async fn handle_incoming_txs(
|
||||||
|
|
||||||
Ok(Arc::new(tx))
|
Ok(Arc::new(tx))
|
||||||
})
|
})
|
||||||
.collect::<Result<Vec<_>, IncomingTxError>>()
|
.collect::<Result<Vec<_>, IncomingTxError>>()?;
|
||||||
|
|
||||||
|
Ok((txs, txs_being_handled_loacally))
|
||||||
})
|
})
|
||||||
.await?;
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
let BlockChainContextResponse::Context(context) = blockchain_context_cache
|
async fn handle_valid_tx(
|
||||||
.ready()
|
tx: Arc<TransactionVerificationData>,
|
||||||
.await?
|
state: TxState<NetworkAddress>,
|
||||||
.call(BlockChainContextRequest::GetContext)
|
txpool_write_handle: &mut TxpoolWriteHandle,
|
||||||
.await?
|
dandelion_pool_manager: &mut DandelionPoolService<DandelionTx, TxId, NetworkAddress>,
|
||||||
else {
|
) {
|
||||||
unreachable!()
|
let incoming_tx =
|
||||||
};
|
IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx.tx_blob)), tx.tx_hash);
|
||||||
|
|
||||||
let context = context.unchecked_blockchain_context();
|
|
||||||
|
|
||||||
tx_verifier_service
|
|
||||||
.ready()
|
|
||||||
.await?
|
|
||||||
.call(VerifyTxRequest::Prepped {
|
|
||||||
txs: txs.clone(),
|
|
||||||
current_chain_height: context.chain_height,
|
|
||||||
top_hash: context.top_hash,
|
|
||||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
|
||||||
hf: context.current_hf,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for tx in txs {
|
|
||||||
let incoming_tx = IncomingTxBuilder::new(Bytes::copy_from_slice(&tx.tx_blob), tx.tx_hash);
|
|
||||||
|
|
||||||
let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle
|
let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle
|
||||||
.ready()
|
.ready()
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
.call(TxpoolWriteRequest::AddTransaction {
|
.call(TxpoolWriteRequest::AddTransaction {
|
||||||
tx,
|
tx,
|
||||||
state_stem: state.state_stem(),
|
state_stem: state.state_stem(),
|
||||||
})
|
})
|
||||||
.await?
|
.await
|
||||||
|
.expect("TODO")
|
||||||
else {
|
else {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: track double spends to quickly ignore them from their blob hash.
|
// TODO: track double spends to quickly ignore them from their blob hash.
|
||||||
if let Some(tx_hash) = double_spend {
|
if let Some(tx_hash) = double_spend {
|
||||||
continue;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: check blockchain for double spends to prevent a race condition.
|
// TODO: check blockchain for double spends to prevent a race condition.
|
||||||
|
|
||||||
// TODO: fill this in properly.
|
// TODO: fill this in properly.
|
||||||
let incoming_tx = incoming_tx
|
let incoming_tx = incoming_tx
|
||||||
.with_routing_state(state.clone())
|
.with_routing_state(state)
|
||||||
.with_state_in_db(None)
|
.with_state_in_db(None)
|
||||||
.build()
|
.build()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
dandelion_pool_manager
|
dandelion_pool_manager
|
||||||
.ready()
|
.ready()
|
||||||
.await?
|
.await
|
||||||
|
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||||
.call(incoming_tx)
|
.call(incoming_tx)
|
||||||
.await?;
|
.await
|
||||||
}
|
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
|
|
|
@ -7,6 +7,7 @@ pub fn tx_blob_hash(tx_bytes: &[u8]) -> [u8; 32] {
|
||||||
hasher.update(tx_bytes);
|
hasher.update(tx_bytes);
|
||||||
hasher.finalize().into()
|
hasher.finalize().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TxsBeingHandled(Arc<DashSet<[u8; 32]>>);
|
pub struct TxsBeingHandled(Arc<DashSet<[u8; 32]>>);
|
||||||
|
|
||||||
|
@ -25,11 +26,7 @@ pub struct TxBeingHandledLocally {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TxBeingHandledLocally {
|
impl TxBeingHandledLocally {
|
||||||
pub fn try_add_tx(&mut self, tx_bytes: &[u8]) -> bool {
|
pub fn try_add_tx(&mut self, tx_blob_hash: [u8; 32]) -> bool {
|
||||||
let mut hasher = Sha3_256::new();
|
|
||||||
hasher.update(tx_bytes);
|
|
||||||
let tx_blob_hash = hasher.finalize().into();
|
|
||||||
|
|
||||||
if !self.txs_being_handled.0.insert(tx_blob_hash) {
|
if !self.txs_being_handled.0.insert(tx_blob_hash) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ pub enum TxState<Id> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Id> TxState<Id> {
|
impl<Id> TxState<Id> {
|
||||||
pub const fn state_stem(&self) -> bool {
|
pub fn state_stem(&self) -> bool {
|
||||||
matches!(self, Self::Local | Self::Stem { .. })
|
matches!(self, Self::Local | Self::Stem { .. })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter");
|
||||||
/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`]
|
/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`]
|
||||||
/// will return an `async`hronous channel that can be `.await`ed upon
|
/// will return an `async`hronous channel that can be `.await`ed upon
|
||||||
/// to receive the corresponding response.
|
/// to receive the corresponding response.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub struct DatabaseWriteHandle<Req, Res> {
|
pub struct DatabaseWriteHandle<Req, Res> {
|
||||||
/// Sender channel to the database write thread-pool.
|
/// Sender channel to the database write thread-pool.
|
||||||
///
|
///
|
||||||
|
@ -30,6 +30,14 @@ pub struct DatabaseWriteHandle<Req, Res> {
|
||||||
crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
|
crossbeam::channel::Sender<(Req, oneshot::Sender<Result<Res, RuntimeError>>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<Req, Res> Clone for DatabaseWriteHandle<Req, Res> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
sender: self.sender.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<Req, Res> DatabaseWriteHandle<Req, Res>
|
impl<Req, Res> DatabaseWriteHandle<Req, Res>
|
||||||
where
|
where
|
||||||
Req: Send + 'static,
|
Req: Send + 'static,
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
//! Tx-pool [`service`](super) interface.
|
//! Tx-pool [`service`](super) interface.
|
||||||
//!
|
//!
|
||||||
//! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums.
|
//! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums.
|
||||||
|
use std::{collections::HashSet, sync::Arc};
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use cuprate_types::TransactionVerificationData;
|
use cuprate_types::TransactionVerificationData;
|
||||||
|
|
||||||
use crate::types::TransactionHash;
|
use crate::types::{TransactionBlobHash, TransactionHash};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TxpoolReadRequest
|
//---------------------------------------------------------------------------------------------------- TxpoolReadRequest
|
||||||
/// The transaction pool [`tower::Service`] read request type.
|
/// The transaction pool [`tower::Service`] read request type.
|
||||||
|
@ -17,8 +15,10 @@ pub enum TxpoolReadRequest {
|
||||||
TxBlob(TransactionHash),
|
TxBlob(TransactionHash),
|
||||||
/// A request for the [`TransactionVerificationData`] of a transaction in the tx pool.
|
/// A request for the [`TransactionVerificationData`] of a transaction in the tx pool.
|
||||||
TxVerificationData(TransactionHash),
|
TxVerificationData(TransactionHash),
|
||||||
|
/// A request to filter (remove) all **known** transactions from the set.
|
||||||
FilterKnownTxBlobHashes(HashSet<TransactionHash>),
|
///
|
||||||
|
/// The hash is **not** the transaction hash, it is the hash of the serialized tx-blob.
|
||||||
|
FilterKnownTxBlobHashes(HashSet<TransactionBlobHash>),
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TxpoolReadResponse
|
//---------------------------------------------------------------------------------------------------- TxpoolReadResponse
|
||||||
|
@ -26,12 +26,14 @@ pub enum TxpoolReadRequest {
|
||||||
#[expect(clippy::large_enum_variant)]
|
#[expect(clippy::large_enum_variant)]
|
||||||
pub enum TxpoolReadResponse {
|
pub enum TxpoolReadResponse {
|
||||||
/// A response containing the raw bytes of a transaction.
|
/// A response containing the raw bytes of a transaction.
|
||||||
// TODO: use bytes::Bytes.
|
TxBlob {
|
||||||
TxBlob(Vec<u8>),
|
tx_blob: Vec<u8>,
|
||||||
|
state_stem: bool,
|
||||||
|
},
|
||||||
/// A response of [`TransactionVerificationData`].
|
/// A response of [`TransactionVerificationData`].
|
||||||
TxVerificationData(TransactionVerificationData),
|
TxVerificationData(TransactionVerificationData),
|
||||||
|
/// The response for [`TxpoolReadRequest::FilterKnownTxBlobHashes`].
|
||||||
FilterKnownTxBlobHashes(HashSet<TransactionHash>),
|
FilterKnownTxBlobHashes(HashSet<TransactionBlobHash>),
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TxpoolWriteRequest
|
//---------------------------------------------------------------------------------------------------- TxpoolWriteRequest
|
||||||
|
@ -53,6 +55,11 @@ pub enum TxpoolWriteRequest {
|
||||||
///
|
///
|
||||||
/// Returns [`TxpoolWriteResponse::Ok`].
|
/// Returns [`TxpoolWriteResponse::Ok`].
|
||||||
RemoveTransaction(TransactionHash),
|
RemoveTransaction(TransactionHash),
|
||||||
|
/// Promote a transaction from the stem pool to the fluff pool.
|
||||||
|
/// If the tx is already in the fluff pool this does nothing.
|
||||||
|
///
|
||||||
|
/// Returns [`TxpoolWriteResponse::Ok`].
|
||||||
|
Promote(TransactionHash),
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TxpoolWriteResponse
|
//---------------------------------------------------------------------------------------------------- TxpoolWriteResponse
|
||||||
|
|
|
@ -58,6 +58,7 @@ fn map_request(
|
||||||
match request {
|
match request {
|
||||||
TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash),
|
TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash),
|
||||||
TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash),
|
TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash),
|
||||||
|
_ => todo!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,9 @@ pub type KeyImage = [u8; 32];
|
||||||
/// A transaction hash.
|
/// A transaction hash.
|
||||||
pub type TransactionHash = [u8; 32];
|
pub type TransactionHash = [u8; 32];
|
||||||
|
|
||||||
|
/// A transaction blob hash.
|
||||||
|
pub type TransactionBlobHash = [u8; 32];
|
||||||
|
|
||||||
bitflags::bitflags! {
|
bitflags::bitflags! {
|
||||||
/// Flags representing the state of the transaction in the pool.
|
/// Flags representing the state of the transaction in the pool.
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)]
|
||||||
|
|
Loading…
Reference in a new issue