mirror of
https://github.com/serai-dex/serai.git
synced 2025-02-03 19:56:36 +00:00
Provide a way to create the machine
The BasicQueue returned obscures the TendermintImport struct. Accordingly, a Future scoped with access is returned upwards, which when awaited will create the machine. This makes creating the machine optional while maintaining scope boundaries. Is sufficient to create a 1-node net which produces and finalizes blocks.
This commit is contained in:
parent
39984bd07b
commit
9b0dca06d0
8 changed files with 104 additions and 89 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -7387,6 +7387,7 @@ name = "serai-consensus"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"futures",
|
||||||
"log",
|
"log",
|
||||||
"sc-basic-authorship",
|
"sc-basic-authorship",
|
||||||
"sc-client-api",
|
"sc-client-api",
|
||||||
|
|
|
@ -17,6 +17,7 @@ async-trait = "0.1"
|
||||||
|
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
|
||||||
|
futures = "0.3"
|
||||||
tokio = { version = "1", features = ["sync", "rt"] }
|
tokio = { version = "1", features = ["sync", "rt"] }
|
||||||
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
|
@ -1,16 +1,15 @@
|
||||||
use std::{
|
use std::{
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, RwLock},
|
sync::{Arc, RwLock},
|
||||||
task::{Poll, Context},
|
task::{Poll, /* Wake, Waker, */ Context},
|
||||||
future::Future,
|
future::Future,
|
||||||
|
time::SystemTime,
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::runtime::Handle;
|
|
||||||
|
|
||||||
use sp_inherents::CreateInherentDataProviders;
|
use sp_inherents::CreateInherentDataProviders;
|
||||||
use sp_runtime::traits::{Header, Block};
|
use sp_runtime::traits::{Header, Block};
|
||||||
use sp_blockchain::HeaderBackend;
|
use sp_blockchain::HeaderBackend;
|
||||||
use sp_api::{TransactionFor, ProvideRuntimeApi};
|
use sp_api::{BlockId, TransactionFor, ProvideRuntimeApi};
|
||||||
|
|
||||||
use sp_consensus::{Error, Environment};
|
use sp_consensus::{Error, Environment};
|
||||||
use sc_consensus::{BlockImport, BlockImportStatus, BlockImportError, Link, BasicQueue};
|
use sc_consensus::{BlockImport, BlockImportStatus, BlockImportError, Link, BasicQueue};
|
||||||
|
@ -20,6 +19,8 @@ use sc_client_api::{Backend, Finalizer};
|
||||||
|
|
||||||
use substrate_prometheus_endpoint::Registry;
|
use substrate_prometheus_endpoint::Registry;
|
||||||
|
|
||||||
|
use tendermint_machine::{ext::BlockNumber, TendermintMachine};
|
||||||
|
|
||||||
use crate::tendermint::TendermintImport;
|
use crate::tendermint::TendermintImport;
|
||||||
|
|
||||||
pub type TendermintImportQueue<Block, Transaction> = BasicQueue<Block, Transaction>;
|
pub type TendermintImportQueue<Block, Transaction> = BasicQueue<Block, Transaction>;
|
||||||
|
@ -84,16 +85,33 @@ pub fn import_queue<
|
||||||
env: E,
|
env: E,
|
||||||
spawner: &impl sp_core::traits::SpawnEssentialNamed,
|
spawner: &impl sp_core::traits::SpawnEssentialNamed,
|
||||||
registry: Option<&Registry>,
|
registry: Option<&Registry>,
|
||||||
) -> TendermintImportQueue<B, TransactionFor<C, B>>
|
) -> (impl Future<Output = ()>, TendermintImportQueue<B, TransactionFor<C, B>>)
|
||||||
where
|
where
|
||||||
I::Error: Into<Error>,
|
I::Error: Into<Error>,
|
||||||
TransactionFor<C, B>: Send + Sync + 'static,
|
TransactionFor<C, B>: Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
let import = TendermintImport::new(client, inner, providers, env);
|
let import = TendermintImport::new(client, inner, providers, env);
|
||||||
|
|
||||||
|
let authority = {
|
||||||
|
let machine_clone = import.machine.clone();
|
||||||
|
let mut import_clone = import.clone();
|
||||||
|
async move {
|
||||||
|
*machine_clone.write().unwrap() = Some(TendermintMachine::new(
|
||||||
|
import_clone.clone(),
|
||||||
|
// TODO
|
||||||
|
0,
|
||||||
|
(BlockNumber(1), SystemTime::now()),
|
||||||
|
import_clone
|
||||||
|
.get_proposal(&import_clone.client.header(BlockId::Number(0u8.into())).unwrap().unwrap())
|
||||||
|
.await,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let boxed = Box::new(import.clone());
|
let boxed = Box::new(import.clone());
|
||||||
|
|
||||||
let queue =
|
let queue =
|
||||||
|| BasicQueue::new(import.clone(), boxed.clone(), Some(boxed.clone()), spawner, registry);
|
|| BasicQueue::new(import.clone(), boxed.clone(), Some(boxed.clone()), spawner, registry);
|
||||||
*Handle::current().block_on(import.queue.write()) = Some(queue());
|
*futures::executor::block_on(import.queue.write()) = Some(queue());
|
||||||
queue()
|
(authority, queue())
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,14 +9,12 @@ use sp_blockchain::HeaderBackend;
|
||||||
use sp_api::{TransactionFor, ProvideRuntimeApi};
|
use sp_api::{TransactionFor, ProvideRuntimeApi};
|
||||||
|
|
||||||
use sp_consensus::{Error, Environment};
|
use sp_consensus::{Error, Environment};
|
||||||
use sc_consensus::{BlockImport, JustificationImport, BasicQueue};
|
use sc_consensus::{BlockImport, JustificationImport};
|
||||||
|
|
||||||
use sc_client_api::{Backend, Finalizer};
|
use sc_client_api::{Backend, Finalizer};
|
||||||
|
|
||||||
use crate::tendermint::TendermintImport;
|
use crate::tendermint::TendermintImport;
|
||||||
|
|
||||||
pub type TendermintImportQueue<Block, Transaction> = BasicQueue<Block, Transaction>;
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<
|
impl<
|
||||||
B: Block,
|
B: Block,
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use std::sync::Arc;
|
use std::{sync::Arc, future::Future};
|
||||||
|
|
||||||
use sp_api::TransactionFor;
|
use sp_api::TransactionFor;
|
||||||
use sp_consensus::Error;
|
|
||||||
|
|
||||||
use sc_executor::{NativeVersion, NativeExecutionDispatch, NativeElseWasmExecutor};
|
use sc_executor::{NativeVersion, NativeExecutionDispatch, NativeElseWasmExecutor};
|
||||||
use sc_transaction_pool::FullPool;
|
use sc_transaction_pool::FullPool;
|
||||||
|
@ -50,8 +49,8 @@ pub fn import_queue(
|
||||||
client: Arc<FullClient>,
|
client: Arc<FullClient>,
|
||||||
pool: Arc<FullPool<Block, FullClient>>,
|
pool: Arc<FullPool<Block, FullClient>>,
|
||||||
registry: Option<&Registry>,
|
registry: Option<&Registry>,
|
||||||
) -> Result<TendermintImportQueue<Block, TransactionFor<FullClient, Block>>, Error> {
|
) -> (impl Future<Output = ()>, TendermintImportQueue<Block, TransactionFor<FullClient, Block>>) {
|
||||||
Ok(import_queue::import_queue(
|
import_queue::import_queue(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
client.clone(),
|
client.clone(),
|
||||||
Arc::new(|_, _| async { Ok(sp_timestamp::InherentDataProvider::from_system_time()) }),
|
Arc::new(|_, _| async { Ok(sp_timestamp::InherentDataProvider::from_system_time()) }),
|
||||||
|
@ -64,18 +63,7 @@ pub fn import_queue(
|
||||||
),
|
),
|
||||||
&task_manager.spawn_essential_handle(),
|
&task_manager.spawn_essential_handle(),
|
||||||
registry,
|
registry,
|
||||||
))
|
)
|
||||||
}
|
|
||||||
|
|
||||||
// If we're an authority, produce blocks
|
|
||||||
pub fn authority(
|
|
||||||
task_manager: &TaskManager,
|
|
||||||
client: Arc<FullClient>,
|
|
||||||
network: Arc<sc_network::NetworkService<Block, <Block as sp_runtime::traits::Block>::Hash>>,
|
|
||||||
pool: Arc<FullPool<Block, FullClient>>,
|
|
||||||
registry: Option<&Registry>,
|
|
||||||
) {
|
|
||||||
todo!()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -28,7 +28,7 @@ use sc_client_api::{Backend, Finalizer};
|
||||||
|
|
||||||
use tendermint_machine::{
|
use tendermint_machine::{
|
||||||
ext::{BlockError, Commit, Network},
|
ext::{BlockError, Commit, Network},
|
||||||
SignedMessage,
|
SignedMessage, TendermintHandle,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -45,13 +45,16 @@ pub(crate) struct TendermintImport<
|
||||||
I: Send + Sync + BlockImport<B, Transaction = TransactionFor<C, B>> + 'static,
|
I: Send + Sync + BlockImport<B, Transaction = TransactionFor<C, B>> + 'static,
|
||||||
CIDP: CreateInherentDataProviders<B, ()> + 'static,
|
CIDP: CreateInherentDataProviders<B, ()> + 'static,
|
||||||
E: Send + Sync + Environment<B> + 'static,
|
E: Send + Sync + Environment<B> + 'static,
|
||||||
> {
|
> where
|
||||||
|
TransactionFor<C, B>: Send + Sync + 'static,
|
||||||
|
{
|
||||||
_block: PhantomData<B>,
|
_block: PhantomData<B>,
|
||||||
_backend: PhantomData<Be>,
|
_backend: PhantomData<Be>,
|
||||||
|
|
||||||
importing_block: Arc<RwLock<Option<B::Hash>>>,
|
importing_block: Arc<RwLock<Option<B::Hash>>>,
|
||||||
|
pub(crate) machine: Arc<RwLock<Option<TendermintHandle<Self>>>>,
|
||||||
|
|
||||||
client: Arc<C>,
|
pub(crate) client: Arc<C>,
|
||||||
pub(crate) inner: Arc<AsyncRwLock<I>>,
|
pub(crate) inner: Arc<AsyncRwLock<I>>,
|
||||||
providers: Arc<CIDP>,
|
providers: Arc<CIDP>,
|
||||||
|
|
||||||
|
@ -67,6 +70,8 @@ impl<
|
||||||
CIDP: CreateInherentDataProviders<B, ()> + 'static,
|
CIDP: CreateInherentDataProviders<B, ()> + 'static,
|
||||||
E: Send + Sync + Environment<B> + 'static,
|
E: Send + Sync + Environment<B> + 'static,
|
||||||
> Clone for TendermintImport<B, Be, C, I, CIDP, E>
|
> Clone for TendermintImport<B, Be, C, I, CIDP, E>
|
||||||
|
where
|
||||||
|
TransactionFor<C, B>: Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
TendermintImport {
|
TendermintImport {
|
||||||
|
@ -74,6 +79,7 @@ impl<
|
||||||
_backend: PhantomData,
|
_backend: PhantomData,
|
||||||
|
|
||||||
importing_block: self.importing_block.clone(),
|
importing_block: self.importing_block.clone(),
|
||||||
|
machine: self.machine.clone(),
|
||||||
|
|
||||||
client: self.client.clone(),
|
client: self.client.clone(),
|
||||||
inner: self.inner.clone(),
|
inner: self.inner.clone(),
|
||||||
|
@ -107,6 +113,7 @@ where
|
||||||
_backend: PhantomData,
|
_backend: PhantomData,
|
||||||
|
|
||||||
importing_block: Arc::new(RwLock::new(None)),
|
importing_block: Arc::new(RwLock::new(None)),
|
||||||
|
machine: Arc::new(RwLock::new(None)),
|
||||||
|
|
||||||
client,
|
client,
|
||||||
inner: Arc::new(AsyncRwLock::new(inner)),
|
inner: Arc::new(AsyncRwLock::new(inner)),
|
||||||
|
@ -233,28 +240,28 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_proposal(&mut self, block: &B) -> B {
|
pub(crate) async fn get_proposal(&mut self, header: &B::Header) -> B {
|
||||||
let inherent_data = match self.providers.create_inherent_data_providers(block.hash(), ()).await
|
let inherent_data =
|
||||||
{
|
match self.providers.create_inherent_data_providers(header.hash(), ()).await {
|
||||||
Ok(providers) => match providers.create_inherent_data() {
|
Ok(providers) => match providers.create_inherent_data() {
|
||||||
Ok(data) => Some(data),
|
Ok(data) => Some(data),
|
||||||
|
Err(err) => {
|
||||||
|
warn!(target: "tendermint", "Failed to create inherent data: {}", err);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!(target: "tendermint", "Failed to create inherent data: {}", err);
|
warn!(target: "tendermint", "Failed to create inherent data providers: {}", err);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "tendermint", "Failed to create inherent data providers: {}", err);
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
.unwrap_or_else(InherentData::new);
|
||||||
.unwrap_or_else(InherentData::new);
|
|
||||||
|
|
||||||
let proposer = self
|
let proposer = self
|
||||||
.env
|
.env
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.init(block.header())
|
.init(header)
|
||||||
.await
|
.await
|
||||||
.expect("Failed to create a proposer for the new block");
|
.expect("Failed to create a proposer for the new block");
|
||||||
// TODO: Production time, size limit
|
// TODO: Production time, size limit
|
||||||
|
@ -355,6 +362,6 @@ where
|
||||||
|
|
||||||
async fn add_block(&mut self, block: B, commit: Commit<TendermintSigner>) -> B {
|
async fn add_block(&mut self, block: B, commit: Commit<TendermintSigner>) -> B {
|
||||||
self.import_justification_actual(block.hash(), (CONSENSUS_ID, commit.encode())).unwrap();
|
self.import_justification_actual(block.hash(), (CONSENSUS_ID, commit.encode())).unwrap();
|
||||||
self.get_proposal(&block).await
|
self.get_proposal(block.header()).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,23 +60,23 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
|
|
||||||
Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||||
service::new_partial(&config)?;
|
service::new_partial(&config)?.1;
|
||||||
Ok((cmd.run(client, import_queue), task_manager))
|
Ok((cmd.run(client, import_queue), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
||||||
Ok((cmd.run(client, config.database), task_manager))
|
Ok((cmd.run(client, config.database), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
||||||
Ok((cmd.run(client, config.chain_spec), task_manager))
|
Ok((cmd.run(client, config.chain_spec), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||||
service::new_partial(&config)?;
|
service::new_partial(&config)?.1;
|
||||||
Ok((cmd.run(client, import_queue), task_manager))
|
Ok((cmd.run(client, import_queue), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
|
@ -85,14 +85,15 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?;
|
let PartialComponents { client, task_manager, backend, .. } =
|
||||||
|
service::new_partial(&config)?.1;
|
||||||
Ok((cmd.run(client, backend, None), task_manager))
|
Ok((cmd.run(client, backend, None), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::Benchmark(cmd)) => cli.create_runner(cmd)?.sync_run(|config| match cmd {
|
Some(Subcommand::Benchmark(cmd)) => cli.create_runner(cmd)?.sync_run(|config| match cmd {
|
||||||
BenchmarkCmd::Pallet(cmd) => cmd.run::<Block, service::ExecutorDispatch>(config),
|
BenchmarkCmd::Pallet(cmd) => cmd.run::<Block, service::ExecutorDispatch>(config),
|
||||||
|
|
||||||
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.client),
|
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.1.client),
|
||||||
|
|
||||||
#[cfg(not(feature = "runtime-benchmarks"))]
|
#[cfg(not(feature = "runtime-benchmarks"))]
|
||||||
BenchmarkCmd::Storage(_) => {
|
BenchmarkCmd::Storage(_) => {
|
||||||
|
@ -101,12 +102,12 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
|
|
||||||
#[cfg(feature = "runtime-benchmarks")]
|
#[cfg(feature = "runtime-benchmarks")]
|
||||||
BenchmarkCmd::Storage(cmd) => {
|
BenchmarkCmd::Storage(cmd) => {
|
||||||
let PartialComponents { client, backend, .. } = service::new_partial(&config)?;
|
let PartialComponents { client, backend, .. } = service::new_partial(&config)?.1;
|
||||||
cmd.run(config, client, backend.expose_db(), backend.expose_storage())
|
cmd.run(config, client, backend.expose_db(), backend.expose_storage())
|
||||||
}
|
}
|
||||||
|
|
||||||
BenchmarkCmd::Overhead(cmd) => {
|
BenchmarkCmd::Overhead(cmd) => {
|
||||||
let client = service::new_partial(&config)?.client;
|
let client = service::new_partial(&config)?.1.client;
|
||||||
cmd.run(
|
cmd.run(
|
||||||
config,
|
config,
|
||||||
client.clone(),
|
client.clone(),
|
||||||
|
@ -117,7 +118,7 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
BenchmarkCmd::Extrinsic(cmd) => {
|
BenchmarkCmd::Extrinsic(cmd) => {
|
||||||
let client = service::new_partial(&config)?.client;
|
let client = service::new_partial(&config)?.1.client;
|
||||||
cmd.run(
|
cmd.run(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
inherent_benchmark_data()?,
|
inherent_benchmark_data()?,
|
||||||
|
@ -134,7 +135,7 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
None => cli.create_runner(&cli.run)?.run_node_until_exit(|config| async {
|
None => cli.create_runner(&cli.run)?.run_node_until_exit(|config| async {
|
||||||
service::new_full(config).map_err(sc_cli::Error::Service)
|
service::new_full(config).await.map_err(sc_cli::Error::Service)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::sync::Arc;
|
use std::{sync::Arc, future::Future};
|
||||||
|
|
||||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
|
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
|
||||||
use sc_executor::NativeElseWasmExecutor;
|
use sc_executor::NativeElseWasmExecutor;
|
||||||
|
@ -19,7 +19,9 @@ type PartialComponents = sc_service::PartialComponents<
|
||||||
Option<Telemetry>,
|
Option<Telemetry>,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
pub fn new_partial(config: &Configuration) -> Result<PartialComponents, ServiceError> {
|
pub fn new_partial(
|
||||||
|
config: &Configuration,
|
||||||
|
) -> Result<(impl Future<Output = ()>, PartialComponents), ServiceError> {
|
||||||
if config.keystore_remote.is_some() {
|
if config.keystore_remote.is_some() {
|
||||||
return Err(ServiceError::Other("Remote Keystores are not supported".to_string()));
|
return Err(ServiceError::Other("Remote Keystores are not supported".to_string()));
|
||||||
}
|
}
|
||||||
|
@ -63,38 +65,44 @@ pub fn new_partial(config: &Configuration) -> Result<PartialComponents, ServiceE
|
||||||
client.clone(),
|
client.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let import_queue = serai_consensus::import_queue(
|
let (authority, import_queue) = serai_consensus::import_queue(
|
||||||
&task_manager,
|
&task_manager,
|
||||||
client.clone(),
|
client.clone(),
|
||||||
transaction_pool.clone(),
|
transaction_pool.clone(),
|
||||||
config.prometheus_registry(),
|
config.prometheus_registry(),
|
||||||
)?;
|
);
|
||||||
|
|
||||||
let select_chain = serai_consensus::TendermintSelectChain::new(backend.clone());
|
let select_chain = serai_consensus::TendermintSelectChain::new(backend.clone());
|
||||||
|
|
||||||
Ok(sc_service::PartialComponents {
|
Ok((
|
||||||
client,
|
authority,
|
||||||
backend,
|
sc_service::PartialComponents {
|
||||||
task_manager,
|
client,
|
||||||
import_queue,
|
backend,
|
||||||
keystore_container,
|
task_manager,
|
||||||
select_chain,
|
import_queue,
|
||||||
transaction_pool,
|
keystore_container,
|
||||||
other: telemetry,
|
select_chain,
|
||||||
})
|
transaction_pool,
|
||||||
|
other: telemetry,
|
||||||
|
},
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
pub async fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||||
let sc_service::PartialComponents {
|
let (
|
||||||
client,
|
authority,
|
||||||
backend,
|
sc_service::PartialComponents {
|
||||||
mut task_manager,
|
client,
|
||||||
import_queue,
|
backend,
|
||||||
keystore_container,
|
mut task_manager,
|
||||||
select_chain: _,
|
import_queue,
|
||||||
other: mut telemetry,
|
keystore_container,
|
||||||
transaction_pool,
|
select_chain: _,
|
||||||
} = new_partial(&config)?;
|
other: mut telemetry,
|
||||||
|
transaction_pool,
|
||||||
|
},
|
||||||
|
) = new_partial(&config)?;
|
||||||
|
|
||||||
let (network, system_rpc_tx, tx_handler_controller, network_starter) =
|
let (network, system_rpc_tx, tx_handler_controller, network_starter) =
|
||||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||||
|
@ -116,9 +124,6 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let role = config.role.clone();
|
|
||||||
let prometheus_registry = config.prometheus_registry().cloned();
|
|
||||||
|
|
||||||
let rpc_extensions_builder = {
|
let rpc_extensions_builder = {
|
||||||
let client = client.clone();
|
let client = client.clone();
|
||||||
let pool = transaction_pool.clone();
|
let pool = transaction_pool.clone();
|
||||||
|
@ -133,6 +138,8 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let is_authority = config.role.is_authority();
|
||||||
|
|
||||||
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||||
network: network.clone(),
|
network: network.clone(),
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
|
@ -147,14 +154,8 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||||
telemetry: telemetry.as_mut(),
|
telemetry: telemetry.as_mut(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if role.is_authority() {
|
if is_authority {
|
||||||
serai_consensus::authority(
|
authority.await;
|
||||||
&task_manager,
|
|
||||||
client,
|
|
||||||
network,
|
|
||||||
transaction_pool,
|
|
||||||
prometheus_registry.as_ref(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
network_starter.start_network();
|
network_starter.start_network();
|
||||||
|
|
Loading…
Reference in a new issue