This commit is contained in:
Boog900 2024-12-04 18:58:50 +00:00
parent 3c751a6406
commit 8ad5983e98
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
9 changed files with 54 additions and 75 deletions

View file

@ -1,67 +0,0 @@
# ____ _
# / ___| _ _ __ _ __ __ _| |_ ___
# | | | | | | '_ \| '__/ _` | __/ _ \
# | |__| |_| | |_) | | | (_| | || __/
# \____\__,_| .__/|_| \__,_|\__\___|
# |_|
#
## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet".
network = "Mainnet"
## Tracing config.
[tracing.stdout]
## The minimum level for log events to be displayed.
level = "info"
## Clear-net config.
[p2p.clear_net]
## The number of outbound connections we should make and maintain.
outbound_connections = 64
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
extra_outbound_connections = 8
## The maximum number of incoming we should allow.
max_inbound_connections = 128
## The percent of outbound connections that should be to nodes we have not connected to before.
gray_peers_percent = 0.7
## The port to accept connections on, if left `0` no connections will be accepted.
p2p_port = 0
## The IP address to listen to connections on.
listen_on = "0.0.0.0"
## The Clear-net addressbook config.
[p2p.clear_net.address_book_config]
## The size of the white peer list, which contains peers we have made a connection to before.
max_white_list_length = 1_000
## The size of the gray peer list, which contains peers we have not made a connection to before.
max_gray_list_length = 5_000
## The amount of time between address book saves.
peer_save_period = { secs = 90, nanos = 0 }
## The block downloader config.
[p2p.block_downloader]
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
buffer_size = 50_000_000
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
in_progress_queue_size = 50_000_000
## The target size of a batch of blocks (bytes), must not exceed 100MB.
target_batch_size = 5_000_000
## The amount of time between checking the pool of connected peers for free peers to download blocks.
check_client_pool_interval = { secs = 30, nanos = 0 }
## Storage config
[storage]
## The amount of reader threads to spawn.
reader_threads = "OnePerThread"
## Txpool storage config.
[storage.txpool]
## The database sync mode for the txpool.
sync_mode = "Async"
## The maximum size of all the txs in the pool (bytes).
max_txpool_byte_size = 100_000_000
## Blockchain storage config.
[storage.blockchain]
## The database sync mode for the blockchain.
sync_mode = "Async"

View file

@ -17,7 +17,7 @@ level = "info"
## Clear-net config. ## Clear-net config.
[p2p.clear_net] [p2p.clear_net]
## The number of outbound connections we should make and maintain. ## The number of outbound connections we should make and maintain.
outbound_connections = 64 outbound_connections = 32
## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing. ## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing.
extra_outbound_connections = 8 extra_outbound_connections = 8
## The maximum number of incoming we should allow. ## The maximum number of incoming we should allow.

View file

@ -133,11 +133,15 @@ impl BlockchainManager {
) { ) {
loop { loop {
tokio::select! { tokio::select! {
/*
Some(batch) = block_batch_rx.recv() => { Some(batch) = block_batch_rx.recv() => {
self.handle_incoming_block_batch( self.handle_incoming_block_batch(
batch, batch,
).await; ).await;
} }
*/
Some(incoming_command) = command_rx.recv() => { Some(incoming_command) = command_rx.recv() => {
self.handle_command(incoming_command).await; self.handle_command(incoming_command).await;
} }

View file

@ -47,10 +47,12 @@ fn main() {
let rt = init_tokio_rt(); let rt = init_tokio_rt();
let db_thread_pool = cuprate_database_service::init_thread_pool(cuprate_database_service::ReaderThreads::Percent(0.3));
let (mut blockchain_read_handle, mut blockchain_write_handle, _) = let (mut blockchain_read_handle, mut blockchain_write_handle, _) =
cuprate_blockchain::service::init(config.blockchain_config()).unwrap(); cuprate_blockchain::service::init_with_pool(config.blockchain_config(), db_thread_pool.clone()).unwrap();
let (txpool_read_handle, txpool_write_handle, _) = let (txpool_read_handle, txpool_write_handle, _) =
cuprate_txpool::service::init(config.txpool_config()).unwrap(); cuprate_txpool::service::init_with_pool(config.txpool_config(), db_thread_pool).unwrap();
rt.block_on(async move { rt.block_on(async move {
blockchain::check_add_genesis( blockchain::check_add_genesis(
@ -105,6 +107,7 @@ fn main() {
fn init_tokio_rt() -> tokio::runtime::Runtime { fn init_tokio_rt() -> tokio::runtime::Runtime {
tokio::runtime::Builder::new_multi_thread() tokio::runtime::Builder::new_multi_thread()
.worker_threads(13)
.enable_all() .enable_all()
.build() .build()
.unwrap() .unwrap()

View file

@ -2,14 +2,14 @@
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::sync::Arc; use std::sync::Arc;
use rayon::ThreadPool;
use cuprate_database::{ConcreteEnv, InitError}; use cuprate_database::{ConcreteEnv, InitError};
use cuprate_types::{AltBlockInformation, VerifiedBlockInformation}; use cuprate_types::{AltBlockInformation, VerifiedBlockInformation};
use crate::{ use crate::{
config::Config, config::Config,
service::{ service::{
init_read_service, init_write_service, init_read_service, init_write_service, init_read_service_with_pool,
types::{BlockchainReadHandle, BlockchainWriteHandle}, types::{BlockchainReadHandle, BlockchainWriteHandle},
}, },
}; };
@ -46,6 +46,27 @@ pub fn init(
Ok((readers, writer, db)) Ok((readers, writer, db))
} }
pub fn init_with_pool(
config: Config,
pool: Arc<ThreadPool>,
) -> Result<
(
BlockchainReadHandle,
BlockchainWriteHandle,
Arc<ConcreteEnv>,
),
InitError,
> {
// Initialize the database itself.
let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer.
let readers = init_read_service_with_pool(Arc::clone(&db), pool);
let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db))
}
//---------------------------------------------------------------------------------------------------- Compact history //---------------------------------------------------------------------------------------------------- Compact history
/// Given a position in the compact history, returns the height offset that should be in that position. /// Given a position in the compact history, returns the height offset that should be in that position.
/// ///

View file

@ -128,7 +128,7 @@ mod write;
pub use write::init_write_service; pub use write::init_write_service;
mod free; mod free;
pub use free::init; pub use free::{init, init_with_pool};
mod types; mod types;
pub use types::{BlockchainReadHandle, BlockchainWriteHandle}; pub use types::{BlockchainReadHandle, BlockchainWriteHandle};

View file

@ -128,5 +128,5 @@ mod read;
mod types; mod types;
mod write; mod write;
pub use free::init; pub use free::{init, init_with_pool};
pub use types::{TxpoolReadHandle, TxpoolWriteHandle}; pub use types::{TxpoolReadHandle, TxpoolWriteHandle};

View file

@ -1,5 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use rayon::ThreadPool;
use cuprate_database::{ConcreteEnv, InitError}; use cuprate_database::{ConcreteEnv, InitError};
use crate::{ use crate::{
@ -10,6 +12,7 @@ use crate::{
}, },
Config, Config,
}; };
use crate::service::read::init_read_service_with_pool;
//---------------------------------------------------------------------------------------------------- Init //---------------------------------------------------------------------------------------------------- Init
#[cold] #[cold]
@ -35,3 +38,18 @@ pub fn init(
Ok((readers, writer, db)) Ok((readers, writer, db))
} }
pub fn init_with_pool(
config: Config,
pool: Arc<ThreadPool>,
) -> Result<(TxpoolReadHandle, TxpoolWriteHandle, Arc<ConcreteEnv>), InitError> {
// Initialize the database itself.
let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer.
let readers = init_read_service_with_pool(Arc::clone(&db), pool);
let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db))
}

View file

@ -44,7 +44,7 @@ pub(super) fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -
/// Should be called _once_ per actual database. /// Should be called _once_ per actual database.
#[cold] #[cold]
#[inline(never)] // Only called once. #[inline(never)] // Only called once.
fn init_read_service_with_pool(env: Arc<ConcreteEnv>, pool: Arc<ThreadPool>) -> TxpoolReadHandle { pub(super) fn init_read_service_with_pool(env: Arc<ConcreteEnv>, pool: Arc<ThreadPool>) -> TxpoolReadHandle {
DatabaseReadService::new(env, pool, map_request) DatabaseReadService::new(env, pool, map_request)
} }