mirror of
https://github.com/serai-dex/serai.git
synced 2025-01-24 19:46:12 +00:00
Remove Tendermint for GRANDPA
Updates to polkadot-v0.9.40, with a variety of dependency updates accordingly. Substrate thankfully now uses k256 0.13, pathing the way for #256. We couldn't upgrade to polkadot-v0.9.40 without this due to polkadot-v0.9.40 having fundamental changes to syncing. While we could've updated tendermint, it's not worth the continued development effort given its inability to work with multiple validator sets. Purges sc-tendermint. Keeps tendermint-machine for #163. Closes #137, #148, #157, #171. #96 and #99 should be re-scoped/clarified. #134 and #159 also should be clarified. #169 is also no longer a priority since we're only considering temporal deployments of tendermint. #170 also isn't since we're looking at effectively sharded validator sets, so there should be no singular large set needing high performance.
This commit is contained in:
parent
534e1bb11d
commit
aea6ac104f
42 changed files with 1089 additions and 2143 deletions
837
Cargo.lock
generated
837
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -23,6 +23,8 @@ members = [
|
||||||
"processor/messages",
|
"processor/messages",
|
||||||
"processor",
|
"processor",
|
||||||
|
|
||||||
|
"tendermint",
|
||||||
|
|
||||||
"substrate/serai/primitives",
|
"substrate/serai/primitives",
|
||||||
"substrate/serai/client",
|
"substrate/serai/client",
|
||||||
|
|
||||||
|
@ -35,11 +37,6 @@ members = [
|
||||||
"substrate/validator-sets/primitives",
|
"substrate/validator-sets/primitives",
|
||||||
"substrate/validator-sets/pallet",
|
"substrate/validator-sets/pallet",
|
||||||
|
|
||||||
"substrate/tendermint/machine",
|
|
||||||
"substrate/tendermint/primitives",
|
|
||||||
"substrate/tendermint/client",
|
|
||||||
"substrate/tendermint/pallet",
|
|
||||||
|
|
||||||
"substrate/runtime",
|
"substrate/runtime",
|
||||||
"substrate/node",
|
"substrate/node",
|
||||||
]
|
]
|
||||||
|
|
|
@ -28,6 +28,11 @@ wallet.
|
||||||
- `processor`: A generic chain processor to process data for Serai and process
|
- `processor`: A generic chain processor to process data for Serai and process
|
||||||
events from Serai, executing transactions as expected and needed.
|
events from Serai, executing transactions as expected and needed.
|
||||||
|
|
||||||
|
- `tendermint`: An abstracted implementation of the Tendermint BFT algorithm,
|
||||||
|
planned to be used in our own micro-blockchain for temporal data needed to
|
||||||
|
coordinate the Serai validators, yet irrelevant to the Serai network as a
|
||||||
|
whole.
|
||||||
|
|
||||||
- `substrate`: Substrate crates used to instantiate the Serai network.
|
- `substrate`: Substrate crates used to instantiate the Serai network.
|
||||||
|
|
||||||
- `deploy`: Scripts to deploy a Serai node/test environment.
|
- `deploy`: Scripts to deploy a Serai node/test environment.
|
||||||
|
|
|
@ -56,10 +56,6 @@ exceptions = [
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0"], name = "validator-sets-pallet" },
|
{ allow = ["AGPL-3.0"], name = "validator-sets-pallet" },
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0"], name = "sp-tendermint" },
|
|
||||||
{ allow = ["AGPL-3.0"], name = "pallet-tendermint" },
|
|
||||||
{ allow = ["AGPL-3.0"], name = "sc-tendermint" },
|
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-runtime" },
|
{ allow = ["AGPL-3.0"], name = "serai-runtime" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-node" },
|
{ allow = ["AGPL-3.0"], name = "serai-node" },
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ ADD common /serai/common
|
||||||
ADD crypto /serai/crypto
|
ADD crypto /serai/crypto
|
||||||
ADD coins /serai/coins
|
ADD coins /serai/coins
|
||||||
ADD processor /serai/processor
|
ADD processor /serai/processor
|
||||||
ADD contracts /serai/contracts
|
ADD tendermint /serai/tendermint
|
||||||
ADD substrate /serai/substrate
|
ADD substrate /serai/substrate
|
||||||
ADD Cargo.toml /serai
|
ADD Cargo.toml /serai
|
||||||
ADD Cargo.lock /serai
|
ADD Cargo.lock /serai
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::Encode;
|
||||||
|
|
||||||
use sp_runtime::RuntimeDebug;
|
use sp_runtime::RuntimeDebug;
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ pub use in_instructions_primitives as primitives;
|
||||||
use primitives::{InInstruction, InInstructionWithBalance, SignedBatch};
|
use primitives::{InInstruction, InInstructionWithBalance, SignedBatch};
|
||||||
|
|
||||||
#[derive(Clone, Copy, Encode, RuntimeDebug)]
|
#[derive(Clone, Copy, Encode, RuntimeDebug)]
|
||||||
#[cfg_attr(feature = "std", derive(Decode, thiserror::Error))]
|
#[cfg_attr(feature = "std", derive(scale::Decode, thiserror::Error))]
|
||||||
pub enum PalletError {
|
pub enum PalletError {
|
||||||
#[cfg_attr(feature = "std", error("batch for unrecognized network"))]
|
#[cfg_attr(feature = "std", error("batch for unrecognized network"))]
|
||||||
UnrecognizedNetwork,
|
UnrecognizedNetwork,
|
||||||
|
@ -42,7 +42,6 @@ pub mod pallet {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pallet::pallet]
|
#[pallet::pallet]
|
||||||
#[pallet::generate_store(pub(crate) trait Store)]
|
|
||||||
pub struct Pallet<T>(PhantomData<T>);
|
pub struct Pallet<T>(PhantomData<T>);
|
||||||
|
|
||||||
// Latest block number agreed upon for a coin
|
// Latest block number agreed upon for a coin
|
||||||
|
|
|
@ -16,16 +16,19 @@ async-trait = "0.1"
|
||||||
|
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
|
|
||||||
|
futures = "0.3"
|
||||||
jsonrpsee = { version = "0.16", features = ["server"] }
|
jsonrpsee = { version = "0.16", features = ["server"] }
|
||||||
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-keyring = { git = "https://github.com/serai-dex/substrate" }
|
sp-keyring = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-inherents = { git = "https://github.com/serai-dex/substrate" }
|
sp-inherents = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
sp-timestamp = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/substrate" }
|
sp-runtime = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-blockchain = { git = "https://github.com/serai-dex/substrate" }
|
sp-blockchain = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-api = { git = "https://github.com/serai-dex/substrate" }
|
sp-api = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-block-builder = { git = "https://github.com/serai-dex/substrate" }
|
sp-block-builder = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sp-consensus = { git = "https://github.com/serai-dex/substrate" }
|
sp-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
sp-consensus-babe = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
frame-benchmarking = { git = "https://github.com/serai-dex/substrate" }
|
frame-benchmarking = { git = "https://github.com/serai-dex/substrate" }
|
||||||
frame-benchmarking-cli = { git = "https://github.com/serai-dex/substrate" }
|
frame-benchmarking-cli = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
@ -39,9 +42,18 @@ sc-executor = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-service = { git = "https://github.com/serai-dex/substrate" }
|
sc-service = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-client-db = { git = "https://github.com/serai-dex/substrate" }
|
sc-client-db = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-client-api = { git = "https://github.com/serai-dex/substrate" }
|
sc-client-api = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
sc-network-common = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-network = { git = "https://github.com/serai-dex/substrate" }
|
sc-network = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-consensus = { git = "https://github.com/serai-dex/substrate" }
|
sc-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
|
sc-consensus-babe = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
sc-consensus-babe-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
|
sc-consensus-grandpa = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
sc-consensus-grandpa-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
|
sc-authority-discovery = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
sc-telemetry = { git = "https://github.com/serai-dex/substrate" }
|
sc-telemetry = { git = "https://github.com/serai-dex/substrate" }
|
||||||
sc-cli = { git = "https://github.com/serai-dex/substrate" }
|
sc-cli = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
|
@ -50,8 +62,6 @@ sc-rpc-api = { git = "https://github.com/serai-dex/substrate" }
|
||||||
substrate-frame-rpc-system = { git = "https://github.com/serai-dex/substrate" }
|
substrate-frame-rpc-system = { git = "https://github.com/serai-dex/substrate" }
|
||||||
pallet-transaction-payment-rpc = { git = "https://github.com/serai-dex/substrate" }
|
pallet-transaction-payment-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||||
|
|
||||||
sc-tendermint = { path = "../tendermint/client" }
|
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
substrate-build-script-utils = { git = "https://github.com/serai-dex/substrate.git" }
|
substrate-build-script-utils = { git = "https://github.com/serai-dex/substrate.git" }
|
||||||
|
|
||||||
|
|
|
@ -3,9 +3,9 @@ use sp_core::Pair as PairTrait;
|
||||||
use sc_service::ChainType;
|
use sc_service::ChainType;
|
||||||
|
|
||||||
use serai_runtime::{
|
use serai_runtime::{
|
||||||
primitives::*, tokens::primitives::ADDRESS as TOKENS_ADDRESS, tendermint::crypto::Public,
|
primitives::*, tokens::primitives::ADDRESS as TOKENS_ADDRESS, WASM_BINARY, opaque::SessionKeys,
|
||||||
WASM_BINARY, opaque::SessionKeys, GenesisConfig, SystemConfig, BalancesConfig, AssetsConfig,
|
BABE_GENESIS_EPOCH_CONFIG, GenesisConfig, SystemConfig, BalancesConfig, AssetsConfig,
|
||||||
ValidatorSetsConfig, SessionConfig,
|
ValidatorSetsConfig, SessionConfig, BabeConfig, GrandpaConfig, AuthorityDiscoveryConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>;
|
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>;
|
||||||
|
@ -21,7 +21,11 @@ fn testnet_genesis(
|
||||||
) -> GenesisConfig {
|
) -> GenesisConfig {
|
||||||
let session_key = |name| {
|
let session_key = |name| {
|
||||||
let key = account_from_name(name);
|
let key = account_from_name(name);
|
||||||
(key, key, SessionKeys { tendermint: Public::from(key) })
|
(
|
||||||
|
key,
|
||||||
|
key,
|
||||||
|
SessionKeys { babe: key.into(), grandpa: key.into(), authority_discovery: key.into() },
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
GenesisConfig {
|
GenesisConfig {
|
||||||
|
@ -47,7 +51,6 @@ fn testnet_genesis(
|
||||||
accounts: vec![],
|
accounts: vec![],
|
||||||
},
|
},
|
||||||
|
|
||||||
session: SessionConfig { keys: validators.iter().map(|name| session_key(*name)).collect() },
|
|
||||||
validator_sets: ValidatorSetsConfig {
|
validator_sets: ValidatorSetsConfig {
|
||||||
bond: Amount(1_000_000 * 10_u64.pow(8)),
|
bond: Amount(1_000_000 * 10_u64.pow(8)),
|
||||||
networks: vec![
|
networks: vec![
|
||||||
|
@ -57,6 +60,11 @@ fn testnet_genesis(
|
||||||
],
|
],
|
||||||
participants: validators.iter().map(|name| account_from_name(name)).collect(),
|
participants: validators.iter().map(|name| account_from_name(name)).collect(),
|
||||||
},
|
},
|
||||||
|
session: SessionConfig { keys: validators.iter().map(|name| session_key(*name)).collect() },
|
||||||
|
babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG) },
|
||||||
|
grandpa: GrandpaConfig { authorities: vec![] },
|
||||||
|
|
||||||
|
authority_discovery: AuthorityDiscoveryConfig { keys: vec![] },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serai_runtime::Block;
|
use serai_runtime::Block;
|
||||||
|
|
||||||
use sc_service::{PruningMode, PartialComponents};
|
use sc_service::{PruningMode, PartialComponents};
|
||||||
|
@ -9,7 +11,7 @@ use crate::{
|
||||||
chain_spec,
|
chain_spec,
|
||||||
cli::{Cli, Subcommand},
|
cli::{Cli, Subcommand},
|
||||||
command_helper::{RemarkBuilder, inherent_benchmark_data},
|
command_helper::{RemarkBuilder, inherent_benchmark_data},
|
||||||
service,
|
service::{self, FullClient},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl SubstrateCli for Cli {
|
impl SubstrateCli for Cli {
|
||||||
|
@ -62,23 +64,23 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
|
|
||||||
Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||||
service::new_partial(&config)?.1;
|
service::new_partial(&config)?;
|
||||||
Ok((cmd.run(client, import_queue), task_manager))
|
Ok((cmd.run(client, import_queue), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
||||||
Ok((cmd.run(client, config.database), task_manager))
|
Ok((cmd.run(client, config.database), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
||||||
Ok((cmd.run(client, config.chain_spec), task_manager))
|
Ok((cmd.run(client, config.chain_spec), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||||
service::new_partial(&config)?.1;
|
service::new_partial(&config)?;
|
||||||
Ok((cmd.run(client, import_queue), task_manager))
|
Ok((cmd.run(client, import_queue), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
|
@ -87,15 +89,19 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||||
let PartialComponents { client, task_manager, backend, .. } =
|
let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?;
|
||||||
service::new_partial(&config)?.1;
|
let aux_revert = Box::new(|client: Arc<FullClient>, backend, blocks| {
|
||||||
Ok((cmd.run(client, backend, None), task_manager))
|
sc_consensus_babe::revert(client.clone(), backend, blocks)?;
|
||||||
|
sc_consensus_grandpa::revert(client, blocks)?;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Some(Subcommand::Benchmark(cmd)) => cli.create_runner(cmd)?.sync_run(|config| match cmd {
|
Some(Subcommand::Benchmark(cmd)) => cli.create_runner(cmd)?.sync_run(|config| match cmd {
|
||||||
BenchmarkCmd::Pallet(cmd) => cmd.run::<Block, service::ExecutorDispatch>(config),
|
BenchmarkCmd::Pallet(cmd) => cmd.run::<Block, service::ExecutorDispatch>(config),
|
||||||
|
|
||||||
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.1.client),
|
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.client),
|
||||||
|
|
||||||
#[cfg(not(feature = "runtime-benchmarks"))]
|
#[cfg(not(feature = "runtime-benchmarks"))]
|
||||||
BenchmarkCmd::Storage(_) => {
|
BenchmarkCmd::Storage(_) => {
|
||||||
|
@ -104,12 +110,12 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
|
|
||||||
#[cfg(feature = "runtime-benchmarks")]
|
#[cfg(feature = "runtime-benchmarks")]
|
||||||
BenchmarkCmd::Storage(cmd) => {
|
BenchmarkCmd::Storage(cmd) => {
|
||||||
let PartialComponents { client, backend, .. } = service::new_partial(&config)?.1;
|
let PartialComponents { client, backend, .. } = service::new_partial(&config)?;
|
||||||
cmd.run(config, client, backend.expose_db(), backend.expose_storage())
|
cmd.run(config, client, backend.expose_db(), backend.expose_storage())
|
||||||
}
|
}
|
||||||
|
|
||||||
BenchmarkCmd::Overhead(cmd) => {
|
BenchmarkCmd::Overhead(cmd) => {
|
||||||
let client = service::new_partial(&config)?.1.client;
|
let client = service::new_partial(&config)?.client;
|
||||||
cmd.run(
|
cmd.run(
|
||||||
config,
|
config,
|
||||||
client.clone(),
|
client.clone(),
|
||||||
|
@ -120,7 +126,7 @@ pub fn run() -> sc_cli::Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
BenchmarkCmd::Extrinsic(cmd) => {
|
BenchmarkCmd::Extrinsic(cmd) => {
|
||||||
let client = service::new_partial(&config)?.1.client;
|
let client = service::new_partial(&config)?.client;
|
||||||
cmd.run(
|
cmd.run(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
inherent_benchmark_data()?,
|
inherent_benchmark_data()?,
|
||||||
|
|
|
@ -1,42 +1,24 @@
|
||||||
use std::{
|
use std::{boxed::Box, sync::Arc};
|
||||||
error::Error,
|
|
||||||
boxed::Box,
|
|
||||||
sync::Arc,
|
|
||||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
|
||||||
str::FromStr,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_runtime::traits::{Block as BlockTrait};
|
use futures::stream::StreamExt;
|
||||||
use sp_inherents::CreateInherentDataProviders;
|
|
||||||
use sp_consensus::DisableProofRecording;
|
use sp_timestamp::InherentDataProvider as TimestampInherent;
|
||||||
use sp_api::ProvideRuntimeApi;
|
use sp_consensus_babe::{SlotDuration, inherents::InherentDataProvider as BabeInherent};
|
||||||
|
|
||||||
use sc_executor::{NativeVersion, NativeExecutionDispatch, NativeElseWasmExecutor};
|
use sc_executor::{NativeVersion, NativeExecutionDispatch, NativeElseWasmExecutor};
|
||||||
use sc_transaction_pool::FullPool;
|
|
||||||
use sc_network::NetworkService;
|
use sc_network_common::sync::warp::WarpSyncParams;
|
||||||
|
use sc_network::{Event, NetworkEventStream};
|
||||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager, TFullClient};
|
use sc_service::{error::Error as ServiceError, Configuration, TaskManager, TFullClient};
|
||||||
|
|
||||||
use sc_client_api::BlockBackend;
|
use sc_client_api::BlockBackend;
|
||||||
|
|
||||||
use sc_telemetry::{Telemetry, TelemetryWorker};
|
use sc_telemetry::{Telemetry, TelemetryWorker};
|
||||||
|
|
||||||
pub(crate) use sc_tendermint::{
|
use serai_runtime::{self as runtime, opaque::Block, RuntimeApi};
|
||||||
TendermintClientMinimal, TendermintValidator, TendermintImport, TendermintAuthority,
|
|
||||||
TendermintSelectChain, import_queue,
|
|
||||||
};
|
|
||||||
use serai_runtime::{self as runtime, BLOCK_SIZE, TARGET_BLOCK_TIME, opaque::Block, RuntimeApi};
|
|
||||||
|
|
||||||
type FullBackend = sc_service::TFullBackend<Block>;
|
use sc_consensus_babe::{self, SlotProportion};
|
||||||
pub type FullClient = TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
|
use sc_consensus_grandpa as grandpa;
|
||||||
|
|
||||||
type PartialComponents = sc_service::PartialComponents<
|
|
||||||
FullClient,
|
|
||||||
FullBackend,
|
|
||||||
TendermintSelectChain<Block, FullBackend>,
|
|
||||||
sc_consensus::DefaultImportQueue<Block, FullClient>,
|
|
||||||
sc_transaction_pool::FullPool<Block, FullClient>,
|
|
||||||
Option<Telemetry>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub struct ExecutorDispatch;
|
pub struct ExecutorDispatch;
|
||||||
impl NativeExecutionDispatch for ExecutorDispatch {
|
impl NativeExecutionDispatch for ExecutorDispatch {
|
||||||
|
@ -54,57 +36,36 @@ impl NativeExecutionDispatch for ExecutorDispatch {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Cidp;
|
type FullBackend = sc_service::TFullBackend<Block>;
|
||||||
#[async_trait::async_trait]
|
pub type FullClient = TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
|
||||||
impl CreateInherentDataProviders<Block, ()> for Cidp {
|
|
||||||
type InherentDataProviders = ();
|
type SelectChain = sc_consensus::LongestChain<FullBackend, Block>;
|
||||||
async fn create_inherent_data_providers(
|
type GrandpaBlockImport = grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, SelectChain>;
|
||||||
&self,
|
type BabeBlockImport = sc_consensus_babe::BabeBlockImport<Block, FullClient, GrandpaBlockImport>;
|
||||||
_: <Block as BlockTrait>::Hash,
|
|
||||||
_: (),
|
type PartialComponents = sc_service::PartialComponents<
|
||||||
) -> Result<Self::InherentDataProviders, Box<dyn Send + Sync + Error>> {
|
FullClient,
|
||||||
Ok(())
|
FullBackend,
|
||||||
}
|
SelectChain,
|
||||||
|
sc_consensus::DefaultImportQueue<Block, FullClient>,
|
||||||
|
sc_transaction_pool::FullPool<Block, FullClient>,
|
||||||
|
(
|
||||||
|
BabeBlockImport,
|
||||||
|
sc_consensus_babe::BabeLink<Block>,
|
||||||
|
grandpa::LinkHalf<Block, FullClient, SelectChain>,
|
||||||
|
grandpa::SharedVoterState,
|
||||||
|
Option<Telemetry>,
|
||||||
|
),
|
||||||
|
>;
|
||||||
|
|
||||||
|
fn create_inherent_data_providers(
|
||||||
|
slot_duration: SlotDuration,
|
||||||
|
) -> (BabeInherent, TimestampInherent) {
|
||||||
|
let timestamp = TimestampInherent::from_system_time();
|
||||||
|
(BabeInherent::from_timestamp_and_slot_duration(*timestamp, slot_duration), timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TendermintValidatorFirm;
|
pub fn new_partial(config: &Configuration) -> Result<PartialComponents, ServiceError> {
|
||||||
impl TendermintClientMinimal for TendermintValidatorFirm {
|
|
||||||
// TODO: This is passed directly to propose, which warns not to use the hard limit as finalize
|
|
||||||
// may grow the block. We don't use storage proofs and use the Executive finalize_block. Is that
|
|
||||||
// guaranteed not to grow the block?
|
|
||||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize = { BLOCK_SIZE as usize };
|
|
||||||
// 3 seconds
|
|
||||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32 = { (TARGET_BLOCK_TIME / 2) as u32 };
|
|
||||||
// 1 second
|
|
||||||
const LATENCY_TIME_IN_SECONDS: u32 = { (TARGET_BLOCK_TIME / 2 / 3) as u32 };
|
|
||||||
|
|
||||||
type Block = Block;
|
|
||||||
type Backend = sc_client_db::Backend<Block>;
|
|
||||||
type Api = <FullClient as ProvideRuntimeApi<Block>>::Api;
|
|
||||||
type Client = FullClient;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TendermintValidator for TendermintValidatorFirm {
|
|
||||||
type CIDP = Cidp;
|
|
||||||
type Environment = sc_basic_authorship::ProposerFactory<
|
|
||||||
FullPool<Block, FullClient>,
|
|
||||||
Self::Backend,
|
|
||||||
Self::Client,
|
|
||||||
DisableProofRecording,
|
|
||||||
>;
|
|
||||||
|
|
||||||
type Network = Arc<NetworkService<Block, <Block as BlockTrait>::Hash>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_partial(
|
|
||||||
config: &Configuration,
|
|
||||||
) -> Result<(TendermintImport<TendermintValidatorFirm>, PartialComponents), ServiceError> {
|
|
||||||
debug_assert_eq!(TARGET_BLOCK_TIME, 6);
|
|
||||||
|
|
||||||
if config.keystore_remote.is_some() {
|
|
||||||
return Err(ServiceError::Other("Remote Keystores are not supported".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let telemetry = config
|
let telemetry = config
|
||||||
.telemetry_endpoints
|
.telemetry_endpoints
|
||||||
.clone()
|
.clone()
|
||||||
|
@ -136,6 +97,8 @@ pub fn new_partial(
|
||||||
telemetry
|
telemetry
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let select_chain = sc_consensus::LongestChain::new(backend.clone());
|
||||||
|
|
||||||
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
||||||
config.transaction_pool.clone(),
|
config.transaction_pool.clone(),
|
||||||
config.role.is_authority().into(),
|
config.role.is_authority().into(),
|
||||||
|
@ -144,55 +107,69 @@ pub fn new_partial(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (authority, import_queue) = import_queue(
|
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
|
||||||
&task_manager.spawn_essential_handle(),
|
|
||||||
client.clone(),
|
client.clone(),
|
||||||
|
&client,
|
||||||
|
select_chain.clone(),
|
||||||
|
telemetry.as_ref().map(Telemetry::handle),
|
||||||
|
)?;
|
||||||
|
let justification_import = grandpa_block_import.clone();
|
||||||
|
|
||||||
|
let (block_import, babe_link) = sc_consensus_babe::block_import(
|
||||||
|
sc_consensus_babe::configuration(&*client)?,
|
||||||
|
grandpa_block_import,
|
||||||
|
client.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let slot_duration = babe_link.config().slot_duration();
|
||||||
|
let import_queue = sc_consensus_babe::import_queue(
|
||||||
|
babe_link.clone(),
|
||||||
|
block_import.clone(),
|
||||||
|
Some(Box::new(justification_import)),
|
||||||
|
client.clone(),
|
||||||
|
select_chain.clone(),
|
||||||
|
move |_, _| async move { Ok(create_inherent_data_providers(slot_duration)) },
|
||||||
|
&task_manager.spawn_essential_handle(),
|
||||||
config.prometheus_registry(),
|
config.prometheus_registry(),
|
||||||
);
|
telemetry.as_ref().map(Telemetry::handle),
|
||||||
|
)?;
|
||||||
|
|
||||||
let select_chain = TendermintSelectChain::new(backend.clone());
|
Ok(sc_service::PartialComponents {
|
||||||
|
client,
|
||||||
Ok((
|
backend,
|
||||||
authority,
|
task_manager,
|
||||||
sc_service::PartialComponents {
|
keystore_container,
|
||||||
client,
|
select_chain,
|
||||||
backend,
|
import_queue,
|
||||||
task_manager,
|
transaction_pool,
|
||||||
import_queue,
|
other: (block_import, babe_link, grandpa_link, grandpa::SharedVoterState::empty(), telemetry),
|
||||||
keystore_container,
|
})
|
||||||
select_chain,
|
|
||||||
transaction_pool,
|
|
||||||
other: telemetry,
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
|
pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||||
let (
|
let sc_service::PartialComponents {
|
||||||
authority,
|
client,
|
||||||
sc_service::PartialComponents {
|
backend,
|
||||||
client,
|
mut task_manager,
|
||||||
backend,
|
import_queue,
|
||||||
mut task_manager,
|
keystore_container,
|
||||||
import_queue,
|
select_chain,
|
||||||
keystore_container,
|
transaction_pool,
|
||||||
select_chain: _,
|
other: (block_import, babe_link, grandpa_link, shared_voter_state, mut telemetry),
|
||||||
other: mut telemetry,
|
} = new_partial(&config)?;
|
||||||
transaction_pool,
|
|
||||||
},
|
|
||||||
) = new_partial(&config)?;
|
|
||||||
|
|
||||||
let is_authority = config.role.is_authority();
|
let publish_non_global_ips = config.network.allow_non_globals_in_dht;
|
||||||
let genesis = client.block_hash(0).unwrap().unwrap();
|
let grandpa_protocol_name =
|
||||||
let tendermint_protocol = sc_tendermint::protocol_name(genesis, config.chain_spec.fork_id());
|
grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec);
|
||||||
if is_authority {
|
|
||||||
config
|
|
||||||
.network
|
|
||||||
.extra_sets
|
|
||||||
.push(sc_tendermint::set_config(tendermint_protocol.clone(), BLOCK_SIZE.into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (network, system_rpc_tx, tx_handler_controller, network_starter) =
|
config.network.extra_sets.push(grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()));
|
||||||
|
let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
|
||||||
|
backend.clone(),
|
||||||
|
grandpa_link.shared_authority_set().clone(),
|
||||||
|
vec![],
|
||||||
|
));
|
||||||
|
|
||||||
|
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
|
||||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||||
config: &config,
|
config: &config,
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
|
@ -200,7 +177,7 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||||
spawn_handle: task_manager.spawn_handle(),
|
spawn_handle: task_manager.spawn_handle(),
|
||||||
import_queue,
|
import_queue,
|
||||||
block_announce_validator_builder: None,
|
block_announce_validator_builder: None,
|
||||||
warp_sync_params: None,
|
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if config.offchain_worker.enabled {
|
if config.offchain_worker.enabled {
|
||||||
|
@ -212,7 +189,7 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rpc_extensions_builder = {
|
let rpc_builder = {
|
||||||
let client = client.clone();
|
let client = client.clone();
|
||||||
let pool = transaction_pool.clone();
|
let pool = transaction_pool.clone();
|
||||||
|
|
||||||
|
@ -226,48 +203,113 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
let genesis_time = if config.chain_spec.id() != "devnet" {
|
let enable_grandpa = !config.disable_grandpa;
|
||||||
UNIX_EPOCH + Duration::from_secs(u64::from_str(&std::env::var("GENESIS").unwrap()).unwrap())
|
let role = config.role.clone();
|
||||||
} else {
|
let force_authoring = config.force_authoring;
|
||||||
SystemTime::now()
|
let name = config.network.node_name.clone();
|
||||||
};
|
let prometheus_registry = config.prometheus_registry().cloned();
|
||||||
|
|
||||||
|
let keystore = keystore_container.keystore();
|
||||||
|
|
||||||
let registry = config.prometheus_registry().cloned();
|
|
||||||
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||||
network: network.clone(),
|
config,
|
||||||
client: client.clone(),
|
|
||||||
keystore: keystore_container.sync_keystore(),
|
|
||||||
task_manager: &mut task_manager,
|
|
||||||
transaction_pool: transaction_pool.clone(),
|
|
||||||
rpc_builder: rpc_extensions_builder,
|
|
||||||
backend,
|
backend,
|
||||||
|
client: client.clone(),
|
||||||
|
keystore: keystore.clone(),
|
||||||
|
network: network.clone(),
|
||||||
|
rpc_builder,
|
||||||
|
transaction_pool: transaction_pool.clone(),
|
||||||
|
task_manager: &mut task_manager,
|
||||||
system_rpc_tx,
|
system_rpc_tx,
|
||||||
tx_handler_controller,
|
tx_handler_controller,
|
||||||
config,
|
sync_service: sync_service.clone(),
|
||||||
telemetry: telemetry.as_mut(),
|
telemetry: telemetry.as_mut(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if is_authority {
|
if let sc_service::config::Role::Authority { .. } = &role {
|
||||||
task_manager.spawn_essential_handle().spawn(
|
let slot_duration = babe_link.config().slot_duration();
|
||||||
"tendermint",
|
let babe_config = sc_consensus_babe::BabeParams {
|
||||||
None,
|
keystore: keystore.clone(),
|
||||||
TendermintAuthority::new(
|
client: client.clone(),
|
||||||
genesis_time,
|
select_chain,
|
||||||
tendermint_protocol,
|
env: sc_basic_authorship::ProposerFactory::new(
|
||||||
authority,
|
task_manager.spawn_handle(),
|
||||||
keystore_container.keystore(),
|
client.clone(),
|
||||||
Cidp,
|
transaction_pool,
|
||||||
task_manager.spawn_essential_handle(),
|
prometheus_registry.as_ref(),
|
||||||
sc_basic_authorship::ProposerFactory::new(
|
telemetry.as_ref().map(Telemetry::handle),
|
||||||
task_manager.spawn_handle(),
|
|
||||||
client,
|
|
||||||
transaction_pool,
|
|
||||||
registry.as_ref(),
|
|
||||||
telemetry.map(|telemtry| telemtry.handle()),
|
|
||||||
),
|
|
||||||
network,
|
|
||||||
None,
|
|
||||||
),
|
),
|
||||||
|
block_import,
|
||||||
|
sync_oracle: sync_service.clone(),
|
||||||
|
justification_sync_link: sync_service.clone(),
|
||||||
|
create_inherent_data_providers: move |_, _| async move {
|
||||||
|
Ok(create_inherent_data_providers(slot_duration))
|
||||||
|
},
|
||||||
|
force_authoring,
|
||||||
|
backoff_authoring_blocks: None::<()>,
|
||||||
|
babe_link,
|
||||||
|
block_proposal_slot_portion: SlotProportion::new(0.5),
|
||||||
|
max_block_proposal_slot_portion: None,
|
||||||
|
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||||
|
};
|
||||||
|
|
||||||
|
task_manager.spawn_essential_handle().spawn_blocking(
|
||||||
|
"babe-proposer",
|
||||||
|
Some("block-authoring"),
|
||||||
|
sc_consensus_babe::start_babe(babe_config)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if role.is_authority() {
|
||||||
|
task_manager.spawn_handle().spawn(
|
||||||
|
"authority-discovery-worker",
|
||||||
|
Some("networking"),
|
||||||
|
sc_authority_discovery::new_worker_and_service_with_config(
|
||||||
|
#[allow(clippy::field_reassign_with_default)]
|
||||||
|
{
|
||||||
|
let mut worker = sc_authority_discovery::WorkerConfig::default();
|
||||||
|
worker.publish_non_global_ips = publish_non_global_ips;
|
||||||
|
worker
|
||||||
|
},
|
||||||
|
client,
|
||||||
|
network.clone(),
|
||||||
|
Box::pin(network.event_stream("authority-discovery").filter_map(|e| async move {
|
||||||
|
match e {
|
||||||
|
Event::Dht(e) => Some(e),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
})),
|
||||||
|
sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()),
|
||||||
|
prometheus_registry.clone(),
|
||||||
|
)
|
||||||
|
.0
|
||||||
|
.run(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if enable_grandpa {
|
||||||
|
task_manager.spawn_essential_handle().spawn_blocking(
|
||||||
|
"grandpa-voter",
|
||||||
|
None,
|
||||||
|
grandpa::run_grandpa_voter(grandpa::GrandpaParams {
|
||||||
|
config: grandpa::Config {
|
||||||
|
gossip_duration: std::time::Duration::from_millis(333),
|
||||||
|
justification_period: 512,
|
||||||
|
name: Some(name),
|
||||||
|
observer_enabled: false,
|
||||||
|
keystore: if role.is_authority() { Some(keystore) } else { None },
|
||||||
|
local_role: role,
|
||||||
|
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||||
|
protocol_name: grandpa_protocol_name,
|
||||||
|
},
|
||||||
|
link: grandpa_link,
|
||||||
|
network,
|
||||||
|
sync: Arc::new(sync_service),
|
||||||
|
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||||
|
voting_rule: grandpa::VotingRulesBuilder::default().build(),
|
||||||
|
prometheus_registry,
|
||||||
|
shared_voter_state,
|
||||||
|
})?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,17 +19,23 @@ scale-info = { version = "2", default-features = false, features = ["derive"] }
|
||||||
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
|
sp-offchain = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-version = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-version = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-inherents = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-inherents = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-offchain = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
sp-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
sp-consensus-babe = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
sp-consensus-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
|
sp-authority-discovery = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
sp-transaction-pool = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-transaction-pool = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-block-builder = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-block-builder = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
sp-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
sp-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
sp-tendermint = { path = "../tendermint/primitives", default-features = false }
|
|
||||||
|
|
||||||
frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
frame-executive = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
frame-executive = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
@ -37,6 +43,8 @@ frame-benchmarking = { git = "https://github.com/serai-dex/substrate", default-f
|
||||||
|
|
||||||
serai-primitives = { path = "../serai/primitives", default-features = false }
|
serai-primitives = { path = "../serai/primitives", default-features = false }
|
||||||
|
|
||||||
|
pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
pallet-balances = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
pallet-balances = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
pallet-assets = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
pallet-assets = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
pallet-transaction-payment = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
pallet-transaction-payment = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
@ -46,7 +54,10 @@ in-instructions-pallet = { path = "../in-instructions/pallet", default-features
|
||||||
|
|
||||||
validator-sets-pallet = { path = "../validator-sets/pallet", default-features = false }
|
validator-sets-pallet = { path = "../validator-sets/pallet", default-features = false }
|
||||||
pallet-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
pallet-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
pallet-tendermint = { path = "../tendermint/pallet", default-features = false }
|
pallet-babe = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
pallet-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
|
pallet-authority-discovery = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
|
||||||
frame-system-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
frame-system-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||||
|
@ -61,23 +72,31 @@ std = [
|
||||||
|
|
||||||
"sp-core/std",
|
"sp-core/std",
|
||||||
"sp-std/std",
|
"sp-std/std",
|
||||||
|
|
||||||
|
"sp-offchain/std",
|
||||||
"sp-version/std",
|
"sp-version/std",
|
||||||
"sp-inherents/std",
|
"sp-inherents/std",
|
||||||
"sp-offchain/std",
|
|
||||||
"sp-session/std",
|
"sp-session/std",
|
||||||
|
"sp-consensus-babe/std",
|
||||||
|
"sp-consensus-grandpa/std",
|
||||||
|
|
||||||
|
"sp-authority-discovery/std",
|
||||||
|
|
||||||
"sp-transaction-pool/std",
|
"sp-transaction-pool/std",
|
||||||
"sp-block-builder/std",
|
"sp-block-builder/std",
|
||||||
|
|
||||||
"sp-runtime/std",
|
"sp-runtime/std",
|
||||||
"sp-api/std",
|
"sp-api/std",
|
||||||
|
|
||||||
"sp-tendermint/std",
|
|
||||||
|
|
||||||
"frame-system/std",
|
"frame-system/std",
|
||||||
"frame-support/std",
|
"frame-support/std",
|
||||||
"frame-executive/std",
|
"frame-executive/std",
|
||||||
|
|
||||||
"serai-primitives/std",
|
"serai-primitives/std",
|
||||||
|
|
||||||
|
"pallet-timestamp/std",
|
||||||
|
|
||||||
"pallet-balances/std",
|
"pallet-balances/std",
|
||||||
"pallet-transaction-payment/std",
|
"pallet-transaction-payment/std",
|
||||||
|
|
||||||
|
@ -87,7 +106,10 @@ std = [
|
||||||
|
|
||||||
"validator-sets-pallet/std",
|
"validator-sets-pallet/std",
|
||||||
"pallet-session/std",
|
"pallet-session/std",
|
||||||
"pallet-tendermint/std",
|
"pallet-babe/std",
|
||||||
|
"pallet-grandpa/std",
|
||||||
|
|
||||||
|
"pallet-authority-discovery/std",
|
||||||
|
|
||||||
"frame-system-rpc-runtime-api/std",
|
"frame-system-rpc-runtime-api/std",
|
||||||
"pallet-transaction-payment-rpc-runtime-api/std",
|
"pallet-transaction-payment-rpc-runtime-api/std",
|
||||||
|
@ -102,10 +124,13 @@ runtime-benchmarks = [
|
||||||
"frame-support/runtime-benchmarks",
|
"frame-support/runtime-benchmarks",
|
||||||
"frame-benchmarking/runtime-benchmarks",
|
"frame-benchmarking/runtime-benchmarks",
|
||||||
|
|
||||||
|
"pallet-timestamp/runtime-benchmarks",
|
||||||
|
|
||||||
"pallet-balances/runtime-benchmarks",
|
"pallet-balances/runtime-benchmarks",
|
||||||
"pallet-assets/runtime-benchmarks",
|
"pallet-assets/runtime-benchmarks",
|
||||||
|
|
||||||
"pallet-tendermint/runtime-benchmarks",
|
"pallet-babe/runtime-benchmarks",
|
||||||
|
"pallet-grandpa/runtime-benchmarks",
|
||||||
]
|
]
|
||||||
|
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|
|
@ -12,6 +12,8 @@ pub use serai_primitives as primitives;
|
||||||
pub use frame_system as system;
|
pub use frame_system as system;
|
||||||
pub use frame_support as support;
|
pub use frame_support as support;
|
||||||
|
|
||||||
|
pub use pallet_timestamp as timestamp;
|
||||||
|
|
||||||
pub use pallet_balances as balances;
|
pub use pallet_balances as balances;
|
||||||
pub use pallet_transaction_payment as transaction_payment;
|
pub use pallet_transaction_payment as transaction_payment;
|
||||||
|
|
||||||
|
@ -22,7 +24,10 @@ pub use in_instructions_pallet as in_instructions;
|
||||||
pub use validator_sets_pallet as validator_sets;
|
pub use validator_sets_pallet as validator_sets;
|
||||||
|
|
||||||
pub use pallet_session as session;
|
pub use pallet_session as session;
|
||||||
pub use pallet_tendermint as tendermint;
|
pub use pallet_babe as babe;
|
||||||
|
pub use pallet_grandpa as grandpa;
|
||||||
|
|
||||||
|
pub use pallet_authority_discovery as authority_discovery;
|
||||||
|
|
||||||
// Actually used by the runtime
|
// Actually used by the runtime
|
||||||
use sp_core::OpaqueMetadata;
|
use sp_core::OpaqueMetadata;
|
||||||
|
@ -52,7 +57,9 @@ use support::{
|
||||||
|
|
||||||
use transaction_payment::CurrencyAdapter;
|
use transaction_payment::CurrencyAdapter;
|
||||||
|
|
||||||
use session::PeriodicSessions;
|
use babe::AuthorityId as BabeId;
|
||||||
|
use grandpa::AuthorityId as GrandpaId;
|
||||||
|
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
|
||||||
|
|
||||||
/// An index to a block.
|
/// An index to a block.
|
||||||
pub type BlockNumber = u64;
|
pub type BlockNumber = u64;
|
||||||
|
@ -74,7 +81,9 @@ pub mod opaque {
|
||||||
|
|
||||||
impl_opaque_keys! {
|
impl_opaque_keys! {
|
||||||
pub struct SessionKeys {
|
pub struct SessionKeys {
|
||||||
pub tendermint: Tendermint,
|
pub babe: Babe,
|
||||||
|
pub grandpa: Grandpa,
|
||||||
|
pub authority_discovery: AuthorityDiscovery,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -94,6 +103,11 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
|
||||||
state_version: 1,
|
state_version: 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub fn native_version() -> NativeVersion {
|
||||||
|
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
|
||||||
|
}
|
||||||
|
|
||||||
// 1 MB
|
// 1 MB
|
||||||
pub const BLOCK_SIZE: u32 = 1024 * 1024;
|
pub const BLOCK_SIZE: u32 = 1024 * 1024;
|
||||||
// 6 seconds
|
// 6 seconds
|
||||||
|
@ -104,10 +118,13 @@ pub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME;
|
||||||
pub const HOURS: BlockNumber = MINUTES * 60;
|
pub const HOURS: BlockNumber = MINUTES * 60;
|
||||||
pub const DAYS: BlockNumber = HOURS * 24;
|
pub const DAYS: BlockNumber = HOURS * 24;
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
|
||||||
pub fn native_version() -> NativeVersion {
|
|
||||||
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
|
pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration =
|
||||||
}
|
sp_consensus_babe::BabeEpochConfiguration {
|
||||||
|
c: PRIMARY_PROBABILITY,
|
||||||
|
allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,
|
||||||
|
};
|
||||||
|
|
||||||
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
|
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
|
||||||
|
|
||||||
|
@ -122,14 +139,20 @@ parameter_types! {
|
||||||
system::limits::BlockLength::max_with_normal_ratio(BLOCK_SIZE, NORMAL_DISPATCH_RATIO);
|
system::limits::BlockLength::max_with_normal_ratio(BLOCK_SIZE, NORMAL_DISPATCH_RATIO);
|
||||||
pub BlockWeights: system::limits::BlockWeights =
|
pub BlockWeights: system::limits::BlockWeights =
|
||||||
system::limits::BlockWeights::with_sensible_defaults(
|
system::limits::BlockWeights::with_sensible_defaults(
|
||||||
Weight::from_ref_time(2u64 * WEIGHT_REF_TIME_PER_SECOND).set_proof_size(u64::MAX),
|
Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX),
|
||||||
NORMAL_DISPATCH_RATIO,
|
NORMAL_DISPATCH_RATIO,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
pub const MaxAuthorities: u32 = 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct CallFilter;
|
pub struct CallFilter;
|
||||||
impl Contains<RuntimeCall> for CallFilter {
|
impl Contains<RuntimeCall> for CallFilter {
|
||||||
fn contains(call: &RuntimeCall) -> bool {
|
fn contains(call: &RuntimeCall) -> bool {
|
||||||
|
if let RuntimeCall::Timestamp(call) = call {
|
||||||
|
return matches!(call, timestamp::Call::set { .. });
|
||||||
|
}
|
||||||
|
|
||||||
if let RuntimeCall::Balances(call) = call {
|
if let RuntimeCall::Balances(call) = call {
|
||||||
return matches!(call, balances::Call::transfer { .. } | balances::Call::transfer_all { .. });
|
return matches!(call, balances::Call::transfer { .. } | balances::Call::transfer_all { .. });
|
||||||
}
|
}
|
||||||
|
@ -188,14 +211,29 @@ impl system::Config for Runtime {
|
||||||
type MaxConsumers = support::traits::ConstU32<16>;
|
type MaxConsumers = support::traits::ConstU32<16>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl timestamp::Config for Runtime {
|
||||||
|
type Moment = u64;
|
||||||
|
type OnTimestampSet = Babe;
|
||||||
|
type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>;
|
||||||
|
type WeightInfo = ();
|
||||||
|
}
|
||||||
|
|
||||||
impl balances::Config for Runtime {
|
impl balances::Config for Runtime {
|
||||||
type MaxLocks = ConstU32<50>;
|
|
||||||
type MaxReserves = ();
|
|
||||||
type ReserveIdentifier = [u8; 8];
|
|
||||||
type Balance = SubstrateAmount;
|
|
||||||
type RuntimeEvent = RuntimeEvent;
|
type RuntimeEvent = RuntimeEvent;
|
||||||
|
|
||||||
|
type Balance = SubstrateAmount;
|
||||||
|
|
||||||
|
type ReserveIdentifier = ();
|
||||||
|
type HoldIdentifier = ();
|
||||||
|
type FreezeIdentifier = ();
|
||||||
|
|
||||||
|
type MaxLocks = ();
|
||||||
|
type MaxReserves = ();
|
||||||
|
type MaxHolds = ();
|
||||||
|
type MaxFreezes = ();
|
||||||
|
|
||||||
type DustRemoval = ();
|
type DustRemoval = ();
|
||||||
type ExistentialDeposit = ConstU64<500>;
|
type ExistentialDeposit = ConstU64<1>;
|
||||||
type AccountStore = System;
|
type AccountStore = System;
|
||||||
type WeightInfo = balances::weights::SubstrateWeight<Runtime>;
|
type WeightInfo = balances::weights::SubstrateWeight<Runtime>;
|
||||||
}
|
}
|
||||||
|
@ -248,8 +286,9 @@ impl in_instructions::Config for Runtime {
|
||||||
type RuntimeEvent = RuntimeEvent;
|
type RuntimeEvent = RuntimeEvent;
|
||||||
}
|
}
|
||||||
|
|
||||||
const SESSION_LENGTH: BlockNumber = 5 * DAYS;
|
impl validator_sets::Config for Runtime {
|
||||||
type Sessions = PeriodicSessions<ConstU64<{ SESSION_LENGTH }>, ConstU64<{ SESSION_LENGTH }>>;
|
type RuntimeEvent = RuntimeEvent;
|
||||||
|
}
|
||||||
|
|
||||||
pub struct IdentityValidatorIdOf;
|
pub struct IdentityValidatorIdOf;
|
||||||
impl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {
|
impl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {
|
||||||
|
@ -258,23 +297,49 @@ impl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl validator_sets::Config for Runtime {
|
|
||||||
type RuntimeEvent = RuntimeEvent;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl session::Config for Runtime {
|
impl session::Config for Runtime {
|
||||||
type RuntimeEvent = RuntimeEvent;
|
type RuntimeEvent = RuntimeEvent;
|
||||||
type ValidatorId = PublicKey;
|
type ValidatorId = PublicKey;
|
||||||
type ValidatorIdOf = IdentityValidatorIdOf;
|
type ValidatorIdOf = IdentityValidatorIdOf;
|
||||||
type ShouldEndSession = Sessions;
|
type ShouldEndSession = Babe;
|
||||||
type NextSessionRotation = Sessions;
|
type NextSessionRotation = Babe;
|
||||||
type SessionManager = ();
|
type SessionManager = (); // TODO?
|
||||||
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
|
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
|
||||||
type Keys = SessionKeys;
|
type Keys = SessionKeys;
|
||||||
type WeightInfo = session::weights::SubstrateWeight<Runtime>;
|
type WeightInfo = session::weights::SubstrateWeight<Runtime>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tendermint::Config for Runtime {}
|
impl babe::Config for Runtime {
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
type EpochDuration = ConstU64<{ 1 * DAYS }>;
|
||||||
|
type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;
|
||||||
|
type EpochChangeTrigger = pallet_babe::ExternalTrigger;
|
||||||
|
type DisabledValidators = Session;
|
||||||
|
|
||||||
|
type WeightInfo = ();
|
||||||
|
|
||||||
|
type MaxAuthorities = MaxAuthorities;
|
||||||
|
|
||||||
|
// TODO: Handle equivocation reports
|
||||||
|
type KeyOwnerProof = sp_core::Void;
|
||||||
|
type EquivocationReportSystem = ();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl grandpa::Config for Runtime {
|
||||||
|
type RuntimeEvent = RuntimeEvent;
|
||||||
|
|
||||||
|
type WeightInfo = ();
|
||||||
|
type MaxAuthorities = MaxAuthorities;
|
||||||
|
|
||||||
|
// TODO: Handle equivocation reports
|
||||||
|
type MaxSetIdSessionEntries = ConstU64<0>;
|
||||||
|
type KeyOwnerProof = sp_core::Void;
|
||||||
|
type EquivocationReportSystem = ();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl authority_discovery::Config for Runtime {
|
||||||
|
type MaxAuthorities = MaxAuthorities;
|
||||||
|
}
|
||||||
|
|
||||||
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
|
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
|
||||||
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
|
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
|
||||||
|
@ -307,6 +372,8 @@ construct_runtime!(
|
||||||
{
|
{
|
||||||
System: system,
|
System: system,
|
||||||
|
|
||||||
|
Timestamp: timestamp,
|
||||||
|
|
||||||
Balances: balances,
|
Balances: balances,
|
||||||
TransactionPayment: transaction_payment,
|
TransactionPayment: transaction_payment,
|
||||||
|
|
||||||
|
@ -317,7 +384,10 @@ construct_runtime!(
|
||||||
ValidatorSets: validator_sets,
|
ValidatorSets: validator_sets,
|
||||||
|
|
||||||
Session: session,
|
Session: session,
|
||||||
Tendermint: tendermint,
|
Babe: babe,
|
||||||
|
Grandpa: grandpa,
|
||||||
|
|
||||||
|
AuthorityDiscovery: authority_discovery,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -329,8 +399,15 @@ extern crate frame_benchmarking;
|
||||||
mod benches {
|
mod benches {
|
||||||
define_benchmarks!(
|
define_benchmarks!(
|
||||||
[frame_benchmarking, BaselineBench::<Runtime>]
|
[frame_benchmarking, BaselineBench::<Runtime>]
|
||||||
|
|
||||||
[system, SystemBench::<Runtime>]
|
[system, SystemBench::<Runtime>]
|
||||||
|
|
||||||
|
[pallet_timestamp, Timestamp]
|
||||||
|
|
||||||
[balances, Balances]
|
[balances, Balances]
|
||||||
|
|
||||||
|
[babe, Babe]
|
||||||
|
[grandpa, Grandpa]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,6 +430,14 @@ sp_api::impl_runtime_apis! {
|
||||||
fn metadata() -> OpaqueMetadata {
|
fn metadata() -> OpaqueMetadata {
|
||||||
OpaqueMetadata::new(Runtime::metadata().into())
|
OpaqueMetadata::new(Runtime::metadata().into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn metadata_at_version(version: u32) -> Option<OpaqueMetadata> {
|
||||||
|
Runtime::metadata_at_version(version)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn metadata_versions() -> sp_std::vec::Vec<u32> {
|
||||||
|
Runtime::metadata_versions()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl sp_block_builder::BlockBuilder<Block> for Runtime {
|
impl sp_block_builder::BlockBuilder<Block> for Runtime {
|
||||||
|
@ -404,13 +489,69 @@ sp_api::impl_runtime_apis! {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl sp_tendermint::TendermintApi<Block> for Runtime {
|
impl sp_consensus_babe::BabeApi<Block> for Runtime {
|
||||||
fn current_session() -> u32 {
|
fn configuration() -> sp_consensus_babe::BabeConfiguration {
|
||||||
Tendermint::session()
|
use support::traits::Get;
|
||||||
|
|
||||||
|
let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG);
|
||||||
|
sp_consensus_babe::BabeConfiguration {
|
||||||
|
slot_duration: Babe::slot_duration(),
|
||||||
|
epoch_length: <Runtime as babe::Config>::EpochDuration::get(),
|
||||||
|
c: epoch_config.c,
|
||||||
|
authorities: Babe::authorities().to_vec(),
|
||||||
|
randomness: Babe::randomness(),
|
||||||
|
allowed_slots: epoch_config.allowed_slots,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validators() -> Vec<PublicKey> {
|
fn current_epoch_start() -> sp_consensus_babe::Slot {
|
||||||
Session::validators().drain(..).map(Into::into).collect()
|
Babe::current_epoch_start()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_epoch() -> sp_consensus_babe::Epoch {
|
||||||
|
Babe::current_epoch()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn next_epoch() -> sp_consensus_babe::Epoch {
|
||||||
|
Babe::next_epoch()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_key_ownership_proof(
|
||||||
|
_: sp_consensus_babe::Slot,
|
||||||
|
_: BabeId,
|
||||||
|
) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn submit_report_equivocation_unsigned_extrinsic(
|
||||||
|
_: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
|
||||||
|
_: sp_consensus_babe::OpaqueKeyOwnershipProof,
|
||||||
|
) -> Option<()> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl sp_consensus_grandpa::GrandpaApi<Block> for Runtime {
|
||||||
|
fn grandpa_authorities() -> sp_consensus_grandpa::AuthorityList {
|
||||||
|
Grandpa::grandpa_authorities()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_set_id() -> sp_consensus_grandpa::SetId {
|
||||||
|
Grandpa::current_set_id()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn submit_report_equivocation_unsigned_extrinsic(
|
||||||
|
_: sp_consensus_grandpa::EquivocationProof<<Block as BlockT>::Hash, u64>,
|
||||||
|
_: sp_consensus_grandpa::OpaqueKeyOwnershipProof,
|
||||||
|
) -> Option<()> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_key_ownership_proof(
|
||||||
|
_set_id: sp_consensus_grandpa::SetId,
|
||||||
|
_authority_id: GrandpaId,
|
||||||
|
) -> Option<sp_consensus_grandpa::OpaqueKeyOwnershipProof> {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -446,4 +587,10 @@ sp_api::impl_runtime_apis! {
|
||||||
TransactionPayment::length_to_fee(length)
|
TransactionPayment::length_to_fee(length)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl sp_authority_discovery::AuthorityDiscoveryApi<Block> for Runtime {
|
||||||
|
fn authorities() -> Vec<AuthorityDiscoveryId> {
|
||||||
|
AuthorityDiscovery::authorities()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ use subxt::{
|
||||||
error::Error as SubxtError,
|
error::Error as SubxtError,
|
||||||
utils::Encoded,
|
utils::Encoded,
|
||||||
config::{
|
config::{
|
||||||
|
Header as HeaderTrait,
|
||||||
substrate::{BlakeTwo256, SubstrateHeader},
|
substrate::{BlakeTwo256, SubstrateHeader},
|
||||||
extrinsic_params::{BaseExtrinsicParams, BaseExtrinsicParamsBuilder},
|
extrinsic_params::{BaseExtrinsicParams, BaseExtrinsicParamsBuilder},
|
||||||
},
|
},
|
||||||
|
@ -66,6 +67,8 @@ pub enum SeraiError {
|
||||||
RpcError(SubxtError),
|
RpcError(SubxtError),
|
||||||
#[error("serai-client library was intended for a different runtime version")]
|
#[error("serai-client library was intended for a different runtime version")]
|
||||||
InvalidRuntime,
|
InvalidRuntime,
|
||||||
|
#[error("node is faulty")]
|
||||||
|
InvalidNode,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -123,6 +126,44 @@ impl Serai {
|
||||||
Ok(self.0.rpc().finalized_head().await.map_err(SeraiError::RpcError)?.into())
|
Ok(self.0.rpc().finalized_head().await.map_err(SeraiError::RpcError)?.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// There is no provided method for this
|
||||||
|
// TODO: Add one to Serai
|
||||||
|
pub async fn is_finalized(&self, header: &Header) -> Result<Option<bool>, SeraiError> {
|
||||||
|
// Get the latest finalized block
|
||||||
|
let finalized = self.get_latest_block_hash().await?.into();
|
||||||
|
// If the latest finalized block is this block, return true
|
||||||
|
if finalized == header.hash() {
|
||||||
|
return Ok(Some(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(finalized) =
|
||||||
|
self.0.rpc().header(Some(finalized)).await.map_err(SeraiError::RpcError)? else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
// If the finalized block has a lower number, this block can't be finalized
|
||||||
|
if finalized.number() < header.number() {
|
||||||
|
return Ok(Some(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
// This block, if finalized, comes before the finalized block
|
||||||
|
// If we request the hash of this block's number, Substrate will return the hash on the main
|
||||||
|
// chain
|
||||||
|
// If that hash is this hash, this block is finalized
|
||||||
|
let Some(hash) =
|
||||||
|
self
|
||||||
|
.0
|
||||||
|
.rpc()
|
||||||
|
.block_hash(Some(header.number().into()))
|
||||||
|
.await
|
||||||
|
.map_err(SeraiError::RpcError)? else {
|
||||||
|
// This is an error since there is a block at this index
|
||||||
|
return Err(SeraiError::InvalidNode);
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Some(header.hash() == hash))
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_block(&self, hash: [u8; 32]) -> Result<Option<Block>, SeraiError> {
|
pub async fn get_block(&self, hash: [u8; 32]) -> Result<Option<Block>, SeraiError> {
|
||||||
let Some(res) =
|
let Some(res) =
|
||||||
self.0.rpc().block(Some(hash.into())).await.map_err(SeraiError::RpcError)? else {
|
self.0.rpc().block(Some(hash.into())).await.map_err(SeraiError::RpcError)? else {
|
||||||
|
@ -130,8 +171,7 @@ impl Serai {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Only return finalized blocks
|
// Only return finalized blocks
|
||||||
let Some(justifications) = res.justifications.as_ref() else { return Ok(None); };
|
if self.is_finalized(&res.block.header).await? != Some(true) {
|
||||||
if justifications.is_empty() {
|
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,9 +180,9 @@ impl Serai {
|
||||||
|
|
||||||
// Ideally, this would be get_block_hash, not get_block_by_number
|
// Ideally, this would be get_block_hash, not get_block_by_number
|
||||||
// Unfortunately, in order to only operate over only finalized data, we have to check the
|
// Unfortunately, in order to only operate over only finalized data, we have to check the
|
||||||
// returned hash is for a finalized block. We can only do that by calling subxt's `block`, which
|
// returned hash is for a finalized block. We can only do that by calling the extensive
|
||||||
// will return the block and any justifications
|
// is_finalized method, which at least requires the header
|
||||||
// If we're already putting in all the work to get the block, we may as well just return it here
|
// In practice, the block is likely more useful than the header
|
||||||
pub async fn get_block_by_number(&self, number: u64) -> Result<Option<Block>, SeraiError> {
|
pub async fn get_block_by_number(&self, number: u64) -> Result<Option<Block>, SeraiError> {
|
||||||
let Some(hash) =
|
let Some(hash) =
|
||||||
self.0.rpc().block_hash(Some(number.into())).await.map_err(SeraiError::RpcError)? else {
|
self.0.rpc().block_hash(Some(number.into())).await.map_err(SeraiError::RpcError)? else {
|
||||||
|
|
|
@ -25,7 +25,7 @@ pub async fn provide_batch(batch: SignedBatch) -> [u8; 32] {
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.header()
|
.header
|
||||||
.number();
|
.number();
|
||||||
|
|
||||||
let execution = serai.execute_batch(batch.clone()).unwrap();
|
let execution = serai.execute_batch(batch.clone()).unwrap();
|
||||||
|
|
|
@ -10,7 +10,6 @@ use serde::{Serialize, Deserialize};
|
||||||
use sp_core::H256;
|
use sp_core::H256;
|
||||||
|
|
||||||
/// The type used to identify block numbers.
|
/// The type used to identify block numbers.
|
||||||
// Doesn't re-export tendermint-machine's due to traits.
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
|
Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
|
||||||
)]
|
)]
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "sc-tendermint"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "Tendermint client for Substrate"
|
|
||||||
license = "AGPL-3.0-only"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/client"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
edition = "2021"
|
|
||||||
publish = false
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
async-trait = "0.1"
|
|
||||||
|
|
||||||
hex = "0.4"
|
|
||||||
log = "0.4"
|
|
||||||
|
|
||||||
futures = "0.3"
|
|
||||||
tokio = { version = "1", features = ["sync", "rt"] }
|
|
||||||
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-keystore = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-inherents = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-staking = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-blockchain = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-api = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sp-consensus = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
|
|
||||||
sp-tendermint = { path = "../primitives" }
|
|
||||||
|
|
||||||
sc-network-common = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-network = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-network-gossip = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-service = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-client-api = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-block-builder = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
sc-consensus = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
|
|
||||||
substrate-prometheus-endpoint = { git = "https://github.com/serai-dex/substrate" }
|
|
||||||
|
|
||||||
tendermint-machine = { path = "../machine", features = ["substrate"] }
|
|
|
@ -1,15 +0,0 @@
|
||||||
AGPL-3.0-only license
|
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
|
||||||
published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
@ -1,67 +0,0 @@
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use sp_core::Decode;
|
|
||||||
use sp_runtime::traits::{Hash, Header, Block};
|
|
||||||
|
|
||||||
use sc_network::PeerId;
|
|
||||||
use sc_network_gossip::{Validator, ValidatorContext, ValidationResult};
|
|
||||||
|
|
||||||
use tendermint_machine::{ext::SignatureScheme, SignedMessage};
|
|
||||||
|
|
||||||
use crate::{TendermintValidator, validators::TendermintValidators};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct TendermintGossip<T: TendermintValidator> {
|
|
||||||
number: Arc<RwLock<u64>>,
|
|
||||||
signature_scheme: TendermintValidators<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> TendermintGossip<T> {
|
|
||||||
pub(crate) fn new(number: Arc<RwLock<u64>>, signature_scheme: TendermintValidators<T>) -> Self {
|
|
||||||
TendermintGossip { number, signature_scheme }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn topic(number: u64) -> <T::Block as Block>::Hash {
|
|
||||||
<<<T::Block as Block>::Header as Header>::Hashing as Hash>::hash(
|
|
||||||
&[b"Tendermint Block Topic".as_ref(), &number.to_le_bytes()].concat(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> Validator<T::Block> for TendermintGossip<T> {
|
|
||||||
fn validate(
|
|
||||||
&self,
|
|
||||||
_: &mut dyn ValidatorContext<T::Block>,
|
|
||||||
_: &PeerId,
|
|
||||||
data: &[u8],
|
|
||||||
) -> ValidationResult<<T::Block as Block>::Hash> {
|
|
||||||
let msg = match SignedMessage::<
|
|
||||||
u16,
|
|
||||||
T::Block,
|
|
||||||
<TendermintValidators<T> as SignatureScheme>::Signature,
|
|
||||||
>::decode(&mut &*data)
|
|
||||||
{
|
|
||||||
Ok(msg) => msg,
|
|
||||||
Err(_) => return ValidationResult::Discard,
|
|
||||||
};
|
|
||||||
|
|
||||||
if msg.block().0 < *self.number.read().unwrap() {
|
|
||||||
return ValidationResult::Discard;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the signature here so we don't carry invalid messages in our gossip layer
|
|
||||||
// This will cause double verification of the signature, yet that's a minimal cost
|
|
||||||
if !msg.verify_signature(&self.signature_scheme) {
|
|
||||||
return ValidationResult::Discard;
|
|
||||||
}
|
|
||||||
|
|
||||||
ValidationResult::ProcessAndKeep(Self::topic(msg.block().0))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn message_expired<'a>(
|
|
||||||
&'a self,
|
|
||||||
) -> Box<dyn FnMut(<T::Block as Block>::Hash, &[u8]) -> bool + 'a> {
|
|
||||||
let number = self.number.clone();
|
|
||||||
Box::new(move |topic, _| topic != Self::topic(*number.read().unwrap()))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,72 +0,0 @@
|
||||||
use std::{
|
|
||||||
pin::Pin,
|
|
||||||
sync::RwLock,
|
|
||||||
task::{Poll, Context},
|
|
||||||
future::Future,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_runtime::traits::{Header, Block};
|
|
||||||
|
|
||||||
use sp_consensus::Error;
|
|
||||||
use sc_consensus::{BlockImportStatus, BlockImportError, Link};
|
|
||||||
|
|
||||||
use sc_service::ImportQueue;
|
|
||||||
|
|
||||||
use tendermint_machine::ext::BlockError;
|
|
||||||
|
|
||||||
use crate::TendermintImportQueue;
|
|
||||||
|
|
||||||
// Custom helpers for ImportQueue in order to obtain the result of a block's importing
|
|
||||||
struct ValidateLink<B: Block>(Option<(B::Hash, Result<(), BlockError>)>);
|
|
||||||
impl<B: Block> Link<B> for ValidateLink<B> {
|
|
||||||
fn blocks_processed(
|
|
||||||
&mut self,
|
|
||||||
imported: usize,
|
|
||||||
count: usize,
|
|
||||||
mut results: Vec<(
|
|
||||||
Result<BlockImportStatus<<B::Header as Header>::Number>, BlockImportError>,
|
|
||||||
B::Hash,
|
|
||||||
)>,
|
|
||||||
) {
|
|
||||||
assert!(imported <= 1);
|
|
||||||
assert_eq!(count, 1);
|
|
||||||
self.0 = Some((
|
|
||||||
results[0].1,
|
|
||||||
match results.swap_remove(0).0 {
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(BlockImportError::Other(Error::Other(err))) => Err(
|
|
||||||
err.downcast::<BlockError>().map(|boxed| *boxed.as_ref()).unwrap_or(BlockError::Fatal),
|
|
||||||
),
|
|
||||||
_ => Err(BlockError::Fatal),
|
|
||||||
},
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct ImportFuture<'a, B: Block, T: Send>(
|
|
||||||
B::Hash,
|
|
||||||
RwLock<&'a mut TendermintImportQueue<B, T>>,
|
|
||||||
);
|
|
||||||
impl<'a, B: Block, T: Send> ImportFuture<'a, B, T> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
hash: B::Hash,
|
|
||||||
queue: &'a mut TendermintImportQueue<B, T>,
|
|
||||||
) -> ImportFuture<B, T> {
|
|
||||||
ImportFuture(hash, RwLock::new(queue))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, B: Block, T: Send> Future for ImportFuture<'a, B, T> {
|
|
||||||
type Output = Result<(), BlockError>;
|
|
||||||
|
|
||||||
fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
let mut link = ValidateLink(None);
|
|
||||||
self.1.write().unwrap().poll_actions(ctx, &mut link);
|
|
||||||
if let Some(res) = link.0 {
|
|
||||||
assert_eq!(res.0, self.0);
|
|
||||||
Poll::Ready(res.1)
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,494 +0,0 @@
|
||||||
use std::{
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
|
||||||
collections::HashSet,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use log::{debug, warn, error};
|
|
||||||
|
|
||||||
use futures::{
|
|
||||||
SinkExt, StreamExt,
|
|
||||||
lock::Mutex,
|
|
||||||
channel::mpsc::{self, UnboundedSender},
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_core::{Encode, Decode, traits::SpawnEssentialNamed};
|
|
||||||
use sp_keystore::CryptoStore;
|
|
||||||
use sp_runtime::{
|
|
||||||
traits::{Header, Block},
|
|
||||||
Digest,
|
|
||||||
};
|
|
||||||
use sp_blockchain::HeaderBackend;
|
|
||||||
|
|
||||||
use sp_consensus::{Error, BlockOrigin, BlockStatus, Proposer, Environment};
|
|
||||||
use sc_consensus::import_queue::IncomingBlock;
|
|
||||||
|
|
||||||
use sc_service::ImportQueue;
|
|
||||||
use sc_client_api::{BlockBackend, Finalizer, BlockchainEvents};
|
|
||||||
use sc_network::{ProtocolName, NetworkBlock};
|
|
||||||
use sc_network_gossip::GossipEngine;
|
|
||||||
|
|
||||||
use substrate_prometheus_endpoint::Registry;
|
|
||||||
|
|
||||||
use tendermint_machine::{
|
|
||||||
ext::{BlockError, BlockNumber, Commit, SignatureScheme, Network},
|
|
||||||
SignedMessage, TendermintMachine, TendermintHandle,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
CONSENSUS_ID, TendermintValidator,
|
|
||||||
validators::{TendermintSigner, TendermintValidators},
|
|
||||||
tendermint::TendermintImport,
|
|
||||||
};
|
|
||||||
|
|
||||||
mod gossip;
|
|
||||||
use gossip::TendermintGossip;
|
|
||||||
|
|
||||||
mod import_future;
|
|
||||||
use import_future::ImportFuture;
|
|
||||||
|
|
||||||
// Data for an active validator
|
|
||||||
// This is distinct as even when we aren't an authority, we still create stubbed Authority objects
|
|
||||||
// as it's only Authority which implements tendermint_machine::ext::Network. Network has
|
|
||||||
// verify_commit provided, and even non-authorities have to verify commits
|
|
||||||
struct ActiveAuthority<T: TendermintValidator> {
|
|
||||||
signer: TendermintSigner<T>,
|
|
||||||
|
|
||||||
// The number of the Block we're working on producing
|
|
||||||
block_in_progress: Arc<RwLock<u64>>,
|
|
||||||
// Notification channel for when we start on a new block
|
|
||||||
new_block_event: UnboundedSender<()>,
|
|
||||||
// Outgoing message queue, placed here as the GossipEngine itself can't be
|
|
||||||
gossip: UnboundedSender<
|
|
||||||
SignedMessage<u16, T::Block, <TendermintValidators<T> as SignatureScheme>::Signature>,
|
|
||||||
>,
|
|
||||||
|
|
||||||
// Block producer
|
|
||||||
env: Arc<Mutex<T::Environment>>,
|
|
||||||
announce: T::Network,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tendermint Authority. Participates in the block proposal and voting process.
|
|
||||||
pub struct TendermintAuthority<T: TendermintValidator> {
|
|
||||||
import: TendermintImport<T>,
|
|
||||||
active: Option<ActiveAuthority<T>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a block to propose after the specified header
|
|
||||||
// If stub is true, no time will be spent adding transactions to it (beyond what's required),
|
|
||||||
// making it as minimal as possible (a stub)
|
|
||||||
// This is so we can create proposals when syncing, respecting tendermint-machine's API boundaries,
|
|
||||||
// without spending the entire block processing time trying to include transactions (since we know
|
|
||||||
// our proposal is meaningless and we'll just be syncing a new block anyways)
|
|
||||||
async fn get_proposal<T: TendermintValidator>(
|
|
||||||
env: &Arc<Mutex<T::Environment>>,
|
|
||||||
import: &TendermintImport<T>,
|
|
||||||
header: &<T::Block as Block>::Header,
|
|
||||||
) -> T::Block {
|
|
||||||
let proposer =
|
|
||||||
env.lock().await.init(header).await.expect("Failed to create a proposer for the new block");
|
|
||||||
|
|
||||||
proposer
|
|
||||||
.propose(
|
|
||||||
import.inherent_data(*header.parent_hash()).await,
|
|
||||||
Digest::default(),
|
|
||||||
// The first processing time is to build the block
|
|
||||||
// The second is for it to be downloaded (assumes a block won't take longer to download
|
|
||||||
// than it'll take to process)
|
|
||||||
// The third is for it to actually be processed
|
|
||||||
Duration::from_secs((T::BLOCK_PROCESSING_TIME_IN_SECONDS / 3).into()),
|
|
||||||
Some(T::PROPOSED_BLOCK_SIZE_LIMIT),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("Failed to crate a new block proposal")
|
|
||||||
.block
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> TendermintAuthority<T> {
|
|
||||||
// Authority which is capable of verifying commits
|
|
||||||
pub(crate) fn stub(import: TendermintImport<T>) -> Self {
|
|
||||||
Self { import, active: None }
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_proposal(&self, header: &<T::Block as Block>::Header) -> T::Block {
|
|
||||||
get_proposal(&self.active.as_ref().unwrap().env, &self.import, header).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create and run a new Tendermint Authority, proposing and voting on blocks.
|
|
||||||
/// This should be spawned on a task as it will not return until the P2P stack shuts down.
|
|
||||||
#[allow(clippy::too_many_arguments, clippy::new_ret_no_self)]
|
|
||||||
pub async fn new(
|
|
||||||
genesis: SystemTime,
|
|
||||||
protocol: ProtocolName,
|
|
||||||
import: TendermintImport<T>,
|
|
||||||
keys: Arc<dyn CryptoStore>,
|
|
||||||
providers: T::CIDP,
|
|
||||||
spawner: impl SpawnEssentialNamed,
|
|
||||||
env: T::Environment,
|
|
||||||
network: T::Network,
|
|
||||||
registry: Option<&Registry>,
|
|
||||||
) {
|
|
||||||
// This should only have a single value, yet a bounded channel with a capacity of 1 would cause
|
|
||||||
// a firm bound. It's not worth having a backlog crash the node since we aren't constrained
|
|
||||||
let (new_block_event_send, mut new_block_event_recv) = mpsc::unbounded();
|
|
||||||
let (msg_send, mut msg_recv) = mpsc::unbounded();
|
|
||||||
|
|
||||||
// Move the env into an Arc
|
|
||||||
let env = Arc::new(Mutex::new(env));
|
|
||||||
|
|
||||||
// Scoped so the temporary variables used here don't leak
|
|
||||||
let (block_in_progress, mut gossip, TendermintHandle { mut step, mut messages, machine }) = {
|
|
||||||
// Get the info necessary to spawn the machine
|
|
||||||
let info = import.client.info();
|
|
||||||
|
|
||||||
// Header::Number: TryInto<u64> doesn't implement Debug and can't be unwrapped
|
|
||||||
let last_block: u64 = match info.finalized_number.try_into() {
|
|
||||||
Ok(best) => best,
|
|
||||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
|
||||||
};
|
|
||||||
let last_hash = info.finalized_hash;
|
|
||||||
|
|
||||||
let last_time = {
|
|
||||||
// Convert into a Unix timestamp
|
|
||||||
let genesis = genesis.duration_since(UNIX_EPOCH).unwrap().as_secs();
|
|
||||||
|
|
||||||
// Get the last block's time by grabbing its commit and reading the time from that
|
|
||||||
Commit::<TendermintValidators<T>>::decode(
|
|
||||||
&mut import
|
|
||||||
.client
|
|
||||||
.justifications(last_hash)
|
|
||||||
.unwrap()
|
|
||||||
.map(|justifications| justifications.get(CONSENSUS_ID).cloned().unwrap())
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_ref(),
|
|
||||||
)
|
|
||||||
.map(|commit| commit.end_time)
|
|
||||||
// The commit provides the time its block ended at
|
|
||||||
// The genesis time is when the network starts
|
|
||||||
// Accordingly, the end of the genesis block is a block time after the genesis time
|
|
||||||
.unwrap_or_else(|_| genesis + u64::from(Self::block_time()))
|
|
||||||
};
|
|
||||||
|
|
||||||
let next_block = last_block + 1;
|
|
||||||
// Shared references between us and the Tendermint machine (and its actions via its Network
|
|
||||||
// trait)
|
|
||||||
let block_in_progress = Arc::new(RwLock::new(next_block));
|
|
||||||
|
|
||||||
// Write the providers into the import so it can verify inherents
|
|
||||||
*import.providers.write().await = Some(providers);
|
|
||||||
|
|
||||||
let authority = Self {
|
|
||||||
import: import.clone(),
|
|
||||||
active: Some(ActiveAuthority {
|
|
||||||
signer: TendermintSigner(keys, import.validators.clone()),
|
|
||||||
|
|
||||||
block_in_progress: block_in_progress.clone(),
|
|
||||||
new_block_event: new_block_event_send,
|
|
||||||
gossip: msg_send,
|
|
||||||
|
|
||||||
env: env.clone(),
|
|
||||||
announce: network.clone(),
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get our first proposal
|
|
||||||
let proposal =
|
|
||||||
authority.get_proposal(&import.client.header(last_hash).unwrap().unwrap()).await;
|
|
||||||
|
|
||||||
// Create the gossip network
|
|
||||||
// This has to be spawning the machine, else gossip fails for some reason
|
|
||||||
let gossip = GossipEngine::new(
|
|
||||||
network,
|
|
||||||
protocol,
|
|
||||||
Arc::new(TendermintGossip::new(block_in_progress.clone(), import.validators.clone())),
|
|
||||||
registry,
|
|
||||||
);
|
|
||||||
|
|
||||||
(
|
|
||||||
block_in_progress,
|
|
||||||
gossip,
|
|
||||||
TendermintMachine::new(authority, BlockNumber(last_block), last_time, proposal).await,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
spawner.spawn_essential("machine", Some("tendermint"), Box::pin(machine.run()));
|
|
||||||
|
|
||||||
// Start receiving messages about the Tendermint process for this block
|
|
||||||
let mut gossip_recv =
|
|
||||||
gossip.messages_for(TendermintGossip::<T>::topic(*block_in_progress.read().unwrap()));
|
|
||||||
|
|
||||||
// Get finality events from Substrate
|
|
||||||
let mut finality = import.client.finality_notification_stream();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
futures::select_biased! {
|
|
||||||
// GossipEngine closed down
|
|
||||||
_ = gossip => {
|
|
||||||
debug!(
|
|
||||||
target: "tendermint",
|
|
||||||
"GossipEngine shut down. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
},
|
|
||||||
|
|
||||||
// Synced a block from the network
|
|
||||||
notif = finality.next() => {
|
|
||||||
if let Some(notif) = notif {
|
|
||||||
let number = match (*notif.header.number()).try_into() {
|
|
||||||
Ok(number) => number,
|
|
||||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
|
||||||
};
|
|
||||||
|
|
||||||
// There's a race condition between the machine add_block and this
|
|
||||||
// Both wait for a write lock on this ref and don't release it until after updating it
|
|
||||||
// accordingly
|
|
||||||
{
|
|
||||||
let mut block_in_progress = block_in_progress.write().unwrap();
|
|
||||||
if number < *block_in_progress {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let next_block = number + 1;
|
|
||||||
*block_in_progress = next_block;
|
|
||||||
gossip_recv = gossip.messages_for(TendermintGossip::<T>::topic(next_block));
|
|
||||||
}
|
|
||||||
|
|
||||||
let justifications = import.client.justifications(notif.hash).unwrap().unwrap();
|
|
||||||
step.send((
|
|
||||||
BlockNumber(number),
|
|
||||||
Commit::decode(&mut justifications.get(CONSENSUS_ID).unwrap().as_ref()).unwrap(),
|
|
||||||
// Creating a proposal will fail if syncing occurs radically faster than machine
|
|
||||||
// stepping takes
|
|
||||||
// Don't create proposals when stepping accordingly
|
|
||||||
None
|
|
||||||
)).await.unwrap();
|
|
||||||
} else {
|
|
||||||
debug!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Finality notification stream closed down. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
// Machine accomplished a new block
|
|
||||||
new_block = new_block_event_recv.next() => {
|
|
||||||
if new_block.is_some() {
|
|
||||||
gossip_recv = gossip.messages_for(
|
|
||||||
TendermintGossip::<T>::topic(*block_in_progress.read().unwrap())
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
debug!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Block notification stream shut down. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
// Message to broadcast
|
|
||||||
msg = msg_recv.next() => {
|
|
||||||
if let Some(msg) = msg {
|
|
||||||
let topic = TendermintGossip::<T>::topic(msg.block().0);
|
|
||||||
gossip.gossip_message(topic, msg.encode(), false);
|
|
||||||
} else {
|
|
||||||
debug!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Machine's message channel shut down. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
// Received a message
|
|
||||||
msg = gossip_recv.next() => {
|
|
||||||
if let Some(msg) = msg {
|
|
||||||
messages.send(
|
|
||||||
match SignedMessage::decode(&mut msg.message.as_ref()) {
|
|
||||||
Ok(msg) => msg,
|
|
||||||
Err(e) => {
|
|
||||||
// This is guaranteed to be valid thanks to to the gossip validator, assuming
|
|
||||||
// that pipeline is correct. This doesn't panic as a hedge
|
|
||||||
error!(target: "tendermint", "Couldn't decode valid message: {}", e);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
).await.unwrap();
|
|
||||||
} else {
|
|
||||||
debug!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Gossip channel shut down. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: TendermintValidator> Network for TendermintAuthority<T> {
|
|
||||||
type ValidatorId = u16;
|
|
||||||
type SignatureScheme = TendermintValidators<T>;
|
|
||||||
type Weights = TendermintValidators<T>;
|
|
||||||
type Block = T::Block;
|
|
||||||
|
|
||||||
const BLOCK_PROCESSING_TIME: u32 = T::BLOCK_PROCESSING_TIME_IN_SECONDS;
|
|
||||||
const LATENCY_TIME: u32 = T::LATENCY_TIME_IN_SECONDS;
|
|
||||||
|
|
||||||
fn signer(&self) -> TendermintSigner<T> {
|
|
||||||
self.active.as_ref().unwrap().signer.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature_scheme(&self) -> TendermintValidators<T> {
|
|
||||||
self.import.validators.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn weights(&self) -> TendermintValidators<T> {
|
|
||||||
self.import.validators.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn broadcast(
|
|
||||||
&mut self,
|
|
||||||
msg: SignedMessage<u16, Self::Block, <TendermintValidators<T> as SignatureScheme>::Signature>,
|
|
||||||
) {
|
|
||||||
if self.active.as_mut().unwrap().gossip.unbounded_send(msg).is_err() {
|
|
||||||
warn!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Attempted to broadcast a message except the gossip channel is closed. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn slash(&mut self, validator: u16) {
|
|
||||||
// TODO
|
|
||||||
error!("slashing {}, if this is a local network, this shouldn't happen", validator);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Tendermint machine will call add_block for any block which is committed to, regardless of
|
|
||||||
// validity. To determine validity, it expects a validate function, which Substrate doesn't
|
|
||||||
// directly offer, and an add function. In order to comply with Serai's modified view of inherent
|
|
||||||
// transactions, validate MUST check inherents, yet add_block must not.
|
|
||||||
//
|
|
||||||
// In order to acquire a validate function, any block proposed by a legitimate proposer is
|
|
||||||
// imported. This performs full validation and makes the block available as a tip. While this
|
|
||||||
// would be incredibly unsafe thanks to the unchecked inherents, it's defined as a tip with less
|
|
||||||
// work, despite being a child of some parent. This means it won't be moved to nor operated on by
|
|
||||||
// the node.
|
|
||||||
//
|
|
||||||
// When Tendermint completes, the block is finalized, setting it as the tip regardless of work.
|
|
||||||
async fn validate(&mut self, block: &T::Block) -> Result<(), BlockError> {
|
|
||||||
let hash = block.hash();
|
|
||||||
let (header, body) = block.clone().deconstruct();
|
|
||||||
let parent = *header.parent_hash();
|
|
||||||
let number = *header.number();
|
|
||||||
|
|
||||||
// Can happen when we sync a block while also acting as a validator
|
|
||||||
if number <= self.import.client.info().best_number {
|
|
||||||
debug!(target: "tendermint", "Machine proposed a block for a slot we've already synced");
|
|
||||||
Err(BlockError::Temporal)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut queue_write = self.import.queue.write().await;
|
|
||||||
*self.import.importing_block.write().unwrap() = Some(hash);
|
|
||||||
|
|
||||||
queue_write.as_mut().unwrap().service_ref().import_blocks(
|
|
||||||
BlockOrigin::ConsensusBroadcast, // TODO: Use BlockOrigin::Own when it's our block
|
|
||||||
vec![IncomingBlock {
|
|
||||||
hash,
|
|
||||||
header: Some(header),
|
|
||||||
body: Some(body),
|
|
||||||
indexed_body: None,
|
|
||||||
justifications: None,
|
|
||||||
origin: None, // TODO
|
|
||||||
allow_missing_state: false,
|
|
||||||
skip_execution: false,
|
|
||||||
import_existing: self.import.recheck.read().unwrap().contains(&hash),
|
|
||||||
state: None,
|
|
||||||
}],
|
|
||||||
);
|
|
||||||
|
|
||||||
ImportFuture::new(hash, queue_write.as_mut().unwrap()).await?;
|
|
||||||
|
|
||||||
// Sanity checks that a child block can have less work than its parent
|
|
||||||
{
|
|
||||||
let info = self.import.client.info();
|
|
||||||
assert_eq!(info.best_hash, parent);
|
|
||||||
assert_eq!(info.finalized_hash, parent);
|
|
||||||
assert_eq!(info.best_number, number - 1u8.into());
|
|
||||||
assert_eq!(info.finalized_number, number - 1u8.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_block(
|
|
||||||
&mut self,
|
|
||||||
block: T::Block,
|
|
||||||
commit: Commit<TendermintValidators<T>>,
|
|
||||||
) -> Option<T::Block> {
|
|
||||||
// Prevent import_block from being called while we run
|
|
||||||
let _lock = self.import.sync_lock.lock().await;
|
|
||||||
|
|
||||||
// If we didn't import this block already, return
|
|
||||||
// If it's a legitimate block, we'll pick it up in the standard sync loop
|
|
||||||
if self.import.client.block_status(block.hash()).unwrap() != BlockStatus::InChainWithState {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we already imported this externally
|
|
||||||
if self.import.client.justifications(block.hash()).unwrap().is_some() {
|
|
||||||
debug!(target: "tendermint", "Machine produced a commit after we already synced it");
|
|
||||||
} else {
|
|
||||||
let hash = block.hash();
|
|
||||||
let justification = (CONSENSUS_ID, commit.encode());
|
|
||||||
debug_assert!(self.import.verify_justification(hash, &justification).is_ok());
|
|
||||||
|
|
||||||
let raw_number = *block.header().number();
|
|
||||||
let number: u64 = match raw_number.try_into() {
|
|
||||||
Ok(number) => number,
|
|
||||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let active = self.active.as_mut().unwrap();
|
|
||||||
let mut block_in_progress = active.block_in_progress.write().unwrap();
|
|
||||||
// This will hold true unless we received, and handled, a notification for the block before
|
|
||||||
// its justification was made available
|
|
||||||
debug_assert_eq!(number, *block_in_progress);
|
|
||||||
|
|
||||||
// Finalize the block
|
|
||||||
self
|
|
||||||
.import
|
|
||||||
.client
|
|
||||||
.finalize_block(hash, Some(justification), true)
|
|
||||||
.map_err(|_| Error::InvalidJustification)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Tell the loop we received a block and to move to the next
|
|
||||||
*block_in_progress = number + 1;
|
|
||||||
if active.new_block_event.unbounded_send(()).is_err() {
|
|
||||||
warn!(
|
|
||||||
target: "tendermint",
|
|
||||||
"Attempted to send a new number to the gossip handler except it's closed. {}",
|
|
||||||
"Is the node shutting down?"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Announce the block to the network so new clients can sync properly
|
|
||||||
active.announce.announce_block(hash, None);
|
|
||||||
active.announce.new_best_block_imported(hash, raw_number);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear any blocks for the previous slot which we were willing to recheck
|
|
||||||
*self.import.recheck.write().unwrap() = HashSet::new();
|
|
||||||
|
|
||||||
Some(self.get_proposal(block.header()).await)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,180 +0,0 @@
|
||||||
use std::{marker::PhantomData, sync::Arc, collections::HashMap};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use sp_runtime::traits::{Header, Block};
|
|
||||||
use sp_blockchain::{BlockStatus, HeaderBackend, Backend as BlockchainBackend};
|
|
||||||
use sp_consensus::{Error, CacheKeyId, BlockOrigin, SelectChain};
|
|
||||||
|
|
||||||
use sc_consensus::{BlockCheckParams, BlockImportParams, ImportResult, BlockImport, Verifier};
|
|
||||||
|
|
||||||
use sc_client_api::{Backend, BlockBackend};
|
|
||||||
|
|
||||||
use crate::{TendermintValidator, tendermint::TendermintImport};
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> TendermintImport<T> {
|
|
||||||
fn check_already_in_chain(&self, hash: <T::Block as Block>::Hash) -> bool {
|
|
||||||
// If it's in chain, with justifications, return it's already on chain
|
|
||||||
// If it's in chain, without justifications, continue the block import process to import its
|
|
||||||
// justifications
|
|
||||||
// This can be triggered if the validators add a block, without justifications, yet the p2p
|
|
||||||
// process then broadcasts it with its justifications
|
|
||||||
(self.client.status(hash).unwrap() == BlockStatus::InChain) &&
|
|
||||||
self.client.justifications(hash).unwrap().is_some()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: TendermintValidator> BlockImport<T::Block> for TendermintImport<T>
|
|
||||||
where
|
|
||||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
|
||||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
|
||||||
{
|
|
||||||
type Error = Error;
|
|
||||||
type Transaction = T::BackendTransaction;
|
|
||||||
|
|
||||||
// TODO: Is there a DoS where you send a block without justifications, causing it to error,
|
|
||||||
// yet adding it to the blacklist in the process preventing further syncing?
|
|
||||||
async fn check_block(
|
|
||||||
&mut self,
|
|
||||||
mut block: BlockCheckParams<T::Block>,
|
|
||||||
) -> Result<ImportResult, Self::Error> {
|
|
||||||
if self.check_already_in_chain(block.hash) {
|
|
||||||
return Ok(ImportResult::AlreadyInChain);
|
|
||||||
}
|
|
||||||
self.verify_order(block.parent_hash, block.number)?;
|
|
||||||
|
|
||||||
// Does not verify origin here as origin only applies to unfinalized blocks
|
|
||||||
// We don't have context on if this block has justifications or not
|
|
||||||
|
|
||||||
block.allow_missing_state = false;
|
|
||||||
block.allow_missing_parent = false;
|
|
||||||
|
|
||||||
self.client.check_block(block).await.map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn import_block(
|
|
||||||
&mut self,
|
|
||||||
mut block: BlockImportParams<T::Block, Self::Transaction>,
|
|
||||||
new_cache: HashMap<CacheKeyId, Vec<u8>>,
|
|
||||||
) -> Result<ImportResult, Self::Error> {
|
|
||||||
// Don't allow multiple blocks to be imported at once
|
|
||||||
let _lock = self.sync_lock.lock().await;
|
|
||||||
|
|
||||||
if self.check_already_in_chain(block.header.hash()) {
|
|
||||||
return Ok(ImportResult::AlreadyInChain);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.check(&mut block).await?;
|
|
||||||
self.client.import_block(block, new_cache).await.map_err(Into::into)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: TendermintValidator> Verifier<T::Block> for TendermintImport<T>
|
|
||||||
where
|
|
||||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
|
||||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
|
||||||
{
|
|
||||||
async fn verify(
|
|
||||||
&mut self,
|
|
||||||
mut block: BlockImportParams<T::Block, ()>,
|
|
||||||
) -> Result<(BlockImportParams<T::Block, ()>, Option<Vec<(CacheKeyId, Vec<u8>)>>), String> {
|
|
||||||
block.origin = match block.origin {
|
|
||||||
BlockOrigin::Genesis => BlockOrigin::Genesis,
|
|
||||||
BlockOrigin::NetworkBroadcast => BlockOrigin::NetworkBroadcast,
|
|
||||||
|
|
||||||
// Re-map NetworkInitialSync to NetworkBroadcast so it still triggers notifications
|
|
||||||
// Tendermint will listen to the finality stream. If we sync a block we're running a machine
|
|
||||||
// for, it'll force the machine to move ahead. We can only do that if there actually are
|
|
||||||
// notifications
|
|
||||||
//
|
|
||||||
// Then Serai also runs data indexing code based on block addition, so ensuring it always
|
|
||||||
// emits events ensures we always perform our necessary indexing (albeit with a race
|
|
||||||
// condition since Substrate will eventually prune the block's state, potentially before
|
|
||||||
// indexing finishes when syncing)
|
|
||||||
//
|
|
||||||
// The alternative to this would be editing Substrate directly, which would be a lot less
|
|
||||||
// fragile, manually triggering the notifications (which may be possible with code intended
|
|
||||||
// for testing), writing our own notification system, or implementing lock_import_and_run
|
|
||||||
// on our end, letting us directly set the notifications, so we're not beholden to when
|
|
||||||
// Substrate decides to call notify_finalized
|
|
||||||
//
|
|
||||||
// lock_import_and_run unfortunately doesn't allow async code and generally isn't feasible to
|
|
||||||
// work with though. We also couldn't use it to prevent Substrate from creating
|
|
||||||
// notifications, so it only solves half the problem. We'd *still* have to keep this patch,
|
|
||||||
// with all its fragility, unless we edit Substrate or move the entire block import flow here
|
|
||||||
BlockOrigin::NetworkInitialSync => BlockOrigin::NetworkBroadcast,
|
|
||||||
// Also re-map File so bootstraps also trigger notifications, enabling using bootstraps
|
|
||||||
BlockOrigin::File => BlockOrigin::NetworkBroadcast,
|
|
||||||
|
|
||||||
// We do not want this block, which hasn't been confirmed, to be broadcast over the net
|
|
||||||
// Substrate will generate notifications unless it's Genesis, which this isn't, InitialSync,
|
|
||||||
// which changes telemetry behavior, or File, which is... close enough
|
|
||||||
BlockOrigin::ConsensusBroadcast => BlockOrigin::File,
|
|
||||||
BlockOrigin::Own => BlockOrigin::File,
|
|
||||||
};
|
|
||||||
|
|
||||||
if self.check_already_in_chain(block.header.hash()) {
|
|
||||||
return Ok((block, None));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.check(&mut block).await.map_err(|e| format!("{e}"))?;
|
|
||||||
Ok((block, None))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tendermint's Select Chain, where the best chain is defined as the most recently finalized
|
|
||||||
/// block.
|
|
||||||
///
|
|
||||||
/// leaves panics on call due to not being applicable under Tendermint. Any provided answer would
|
|
||||||
/// have conflicts best left unraised.
|
|
||||||
//
|
|
||||||
// SelectChain, while provided by Substrate and part of PartialComponents, isn't used by Substrate
|
|
||||||
// It's common between various block-production/finality crates, yet Substrate as a system doesn't
|
|
||||||
// rely on it, which is good, because its definition is explicitly incompatible with Tendermint
|
|
||||||
//
|
|
||||||
// leaves is supposed to return all leaves of the blockchain. While Tendermint maintains that view,
|
|
||||||
// an honest node will only build on the most recently finalized block, so it is a 'leaf' despite
|
|
||||||
// having descendants
|
|
||||||
//
|
|
||||||
// best_chain will always be this finalized block, yet Substrate explicitly defines it as one of
|
|
||||||
// the above leaves, which this finalized block is explicitly not included in. Accordingly, we
|
|
||||||
// can never provide a compatible decision
|
|
||||||
//
|
|
||||||
// Since PartialComponents expects it, an implementation which does its best is provided. It panics
|
|
||||||
// if leaves is called, yet returns the finalized chain tip for best_chain, as that's intended to
|
|
||||||
// be the header to build upon
|
|
||||||
pub struct TendermintSelectChain<B: Block, Be: Backend<B>>(Arc<Be>, PhantomData<B>);
|
|
||||||
|
|
||||||
impl<B: Block, Be: Backend<B>> Clone for TendermintSelectChain<B, Be> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
TendermintSelectChain(self.0.clone(), PhantomData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Block, Be: Backend<B>> TendermintSelectChain<B, Be> {
|
|
||||||
pub fn new(backend: Arc<Be>) -> TendermintSelectChain<B, Be> {
|
|
||||||
TendermintSelectChain(backend, PhantomData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<B: Block, Be: Backend<B>> SelectChain<B> for TendermintSelectChain<B, Be> {
|
|
||||||
async fn leaves(&self) -> Result<Vec<B::Hash>, Error> {
|
|
||||||
panic!("Substrate definition of leaves is incompatible with Tendermint")
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn best_chain(&self) -> Result<B::Header, Error> {
|
|
||||||
Ok(
|
|
||||||
self
|
|
||||||
.0
|
|
||||||
.blockchain()
|
|
||||||
// There should always be a finalized block
|
|
||||||
.header(self.0.blockchain().last_finalized().unwrap())
|
|
||||||
// There should not be an error in retrieving it and since it's finalized, it should exist
|
|
||||||
.unwrap()
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use sp_core::crypto::KeyTypeId;
|
|
||||||
use sp_inherents::CreateInherentDataProviders;
|
|
||||||
use sp_runtime::traits::{Header, Block};
|
|
||||||
use sp_blockchain::HeaderBackend;
|
|
||||||
use sp_api::{StateBackend, StateBackendFor, TransactionFor, ApiExt, ProvideRuntimeApi};
|
|
||||||
use sp_consensus::{Error, Environment};
|
|
||||||
|
|
||||||
use sc_client_api::{BlockBackend, Backend, Finalizer, BlockchainEvents};
|
|
||||||
use sc_block_builder::BlockBuilderApi;
|
|
||||||
use sc_consensus::{BlockImport, BasicQueue};
|
|
||||||
|
|
||||||
use sc_network_common::config::NonDefaultSetConfig;
|
|
||||||
use sc_network::{ProtocolName, NetworkBlock};
|
|
||||||
use sc_network_gossip::Network;
|
|
||||||
|
|
||||||
use sp_tendermint::TendermintApi;
|
|
||||||
|
|
||||||
use substrate_prometheus_endpoint::Registry;
|
|
||||||
|
|
||||||
mod validators;
|
|
||||||
|
|
||||||
pub(crate) mod tendermint;
|
|
||||||
pub use tendermint::TendermintImport;
|
|
||||||
|
|
||||||
mod block_import;
|
|
||||||
pub use block_import::TendermintSelectChain;
|
|
||||||
|
|
||||||
pub(crate) mod authority;
|
|
||||||
pub use authority::TendermintAuthority;
|
|
||||||
|
|
||||||
pub const CONSENSUS_ID: [u8; 4] = *b"tend";
|
|
||||||
pub(crate) const KEY_TYPE_ID: KeyTypeId = KeyTypeId(CONSENSUS_ID);
|
|
||||||
|
|
||||||
const PROTOCOL_NAME: &str = "/tendermint/1";
|
|
||||||
|
|
||||||
pub fn protocol_name<Hash: AsRef<[u8]>>(genesis: Hash, fork: Option<&str>) -> ProtocolName {
|
|
||||||
let mut name = format!("/{}", hex::encode(genesis.as_ref()));
|
|
||||||
if let Some(fork) = fork {
|
|
||||||
name += &format!("/{fork}");
|
|
||||||
}
|
|
||||||
name += PROTOCOL_NAME;
|
|
||||||
name.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_config(protocol: ProtocolName, block_size: u64) -> NonDefaultSetConfig {
|
|
||||||
// The extra 512 bytes is for the additional data part of Tendermint
|
|
||||||
// Even with BLS, that should just be 161 bytes in the worst case, for a perfect messaging scheme
|
|
||||||
// While 256 bytes would suffice there, it's unknown if any LibP2P overhead exists nor if
|
|
||||||
// anything here will be perfect. Considering this is miniscule compared to the block size, it's
|
|
||||||
// better safe than sorry.
|
|
||||||
let mut cfg = NonDefaultSetConfig::new(protocol, block_size + 512);
|
|
||||||
cfg.allow_non_reserved(25, 25);
|
|
||||||
cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait consolidating all generics required by sc_tendermint for processing.
|
|
||||||
pub trait TendermintClient: Send + Sync + 'static {
|
|
||||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize;
|
|
||||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32;
|
|
||||||
const LATENCY_TIME_IN_SECONDS: u32;
|
|
||||||
|
|
||||||
type Block: Block;
|
|
||||||
type Backend: Backend<Self::Block> + 'static;
|
|
||||||
|
|
||||||
/// TransactionFor<Client, Block>
|
|
||||||
type BackendTransaction: Send + Sync + 'static;
|
|
||||||
/// StateBackendFor<Client, Block>
|
|
||||||
type StateBackend: StateBackend<
|
|
||||||
<<Self::Block as Block>::Header as Header>::Hashing,
|
|
||||||
Transaction = Self::BackendTransaction,
|
|
||||||
>;
|
|
||||||
// Client::Api
|
|
||||||
type Api: ApiExt<Self::Block, StateBackend = Self::StateBackend>
|
|
||||||
+ BlockBuilderApi<Self::Block>
|
|
||||||
+ TendermintApi<Self::Block>;
|
|
||||||
type Client: Send
|
|
||||||
+ Sync
|
|
||||||
+ HeaderBackend<Self::Block>
|
|
||||||
+ BlockBackend<Self::Block>
|
|
||||||
+ BlockImport<Self::Block, Transaction = Self::BackendTransaction>
|
|
||||||
+ Finalizer<Self::Block, Self::Backend>
|
|
||||||
+ BlockchainEvents<Self::Block>
|
|
||||||
+ ProvideRuntimeApi<Self::Block, Api = Self::Api>
|
|
||||||
+ 'static;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait implementable on firm types to automatically provide a full TendermintClient impl.
|
|
||||||
pub trait TendermintClientMinimal: Send + Sync + 'static {
|
|
||||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize;
|
|
||||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32;
|
|
||||||
const LATENCY_TIME_IN_SECONDS: u32;
|
|
||||||
|
|
||||||
type Block: Block;
|
|
||||||
type Backend: Backend<Self::Block> + 'static;
|
|
||||||
type Api: ApiExt<Self::Block> + BlockBuilderApi<Self::Block> + TendermintApi<Self::Block>;
|
|
||||||
type Client: Send
|
|
||||||
+ Sync
|
|
||||||
+ HeaderBackend<Self::Block>
|
|
||||||
+ BlockBackend<Self::Block>
|
|
||||||
+ BlockImport<Self::Block, Transaction = TransactionFor<Self::Client, Self::Block>>
|
|
||||||
+ Finalizer<Self::Block, Self::Backend>
|
|
||||||
+ BlockchainEvents<Self::Block>
|
|
||||||
+ ProvideRuntimeApi<Self::Block, Api = Self::Api>
|
|
||||||
+ 'static;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClientMinimal> TendermintClient for T
|
|
||||||
where
|
|
||||||
<T::Client as ProvideRuntimeApi<T::Block>>::Api:
|
|
||||||
BlockBuilderApi<T::Block> + TendermintApi<T::Block>,
|
|
||||||
TransactionFor<T::Client, T::Block>: Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize = T::PROPOSED_BLOCK_SIZE_LIMIT;
|
|
||||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32 = T::BLOCK_PROCESSING_TIME_IN_SECONDS;
|
|
||||||
const LATENCY_TIME_IN_SECONDS: u32 = T::LATENCY_TIME_IN_SECONDS;
|
|
||||||
|
|
||||||
type Block = T::Block;
|
|
||||||
type Backend = T::Backend;
|
|
||||||
|
|
||||||
type BackendTransaction = TransactionFor<T::Client, T::Block>;
|
|
||||||
type StateBackend = StateBackendFor<T::Client, T::Block>;
|
|
||||||
type Api = <T::Client as ProvideRuntimeApi<T::Block>>::Api;
|
|
||||||
type Client = T::Client;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait consolidating additional generics required by sc_tendermint for authoring.
|
|
||||||
pub trait TendermintValidator: TendermintClient {
|
|
||||||
type CIDP: CreateInherentDataProviders<Self::Block, ()> + 'static;
|
|
||||||
type Environment: Send + Sync + Environment<Self::Block> + 'static;
|
|
||||||
|
|
||||||
type Network: Clone
|
|
||||||
+ Send
|
|
||||||
+ Sync
|
|
||||||
+ Network<Self::Block>
|
|
||||||
+ NetworkBlock<<Self::Block as Block>::Hash, <<Self::Block as Block>::Header as Header>::Number>
|
|
||||||
+ 'static;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type TendermintImportQueue<Block, Transaction> = BasicQueue<Block, Transaction>;
|
|
||||||
|
|
||||||
/// Create an import queue, additionally returning the Tendermint Import object iself, enabling
|
|
||||||
/// creating an author later as well.
|
|
||||||
pub fn import_queue<T: TendermintValidator>(
|
|
||||||
spawner: &impl sp_core::traits::SpawnEssentialNamed,
|
|
||||||
client: Arc<T::Client>,
|
|
||||||
registry: Option<&Registry>,
|
|
||||||
) -> (TendermintImport<T>, TendermintImportQueue<T::Block, T::BackendTransaction>)
|
|
||||||
where
|
|
||||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
|
||||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
|
||||||
{
|
|
||||||
let import = TendermintImport::<T>::new(client);
|
|
||||||
|
|
||||||
let boxed = Box::new(import.clone());
|
|
||||||
// Use None for the justification importer since justifications always come with blocks
|
|
||||||
// Therefore, they're never imported after the fact, which is what mandates an importer
|
|
||||||
let queue = || BasicQueue::new(import.clone(), boxed.clone(), None, spawner, registry);
|
|
||||||
|
|
||||||
*futures::executor::block_on(import.queue.write()) = Some(queue());
|
|
||||||
(import.clone(), queue())
|
|
||||||
}
|
|
|
@ -1,247 +0,0 @@
|
||||||
use std::{
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
collections::HashSet,
|
|
||||||
};
|
|
||||||
|
|
||||||
use log::{debug, warn};
|
|
||||||
|
|
||||||
use tokio::sync::{Mutex, RwLock as AsyncRwLock};
|
|
||||||
|
|
||||||
use sp_core::Decode;
|
|
||||||
use sp_runtime::{
|
|
||||||
traits::{Header, Block},
|
|
||||||
Justification,
|
|
||||||
};
|
|
||||||
use sp_inherents::{InherentData, InherentDataProvider, CreateInherentDataProviders};
|
|
||||||
use sp_blockchain::HeaderBackend;
|
|
||||||
use sp_api::ProvideRuntimeApi;
|
|
||||||
|
|
||||||
use sp_consensus::Error;
|
|
||||||
use sc_consensus::{ForkChoiceStrategy, BlockImportParams};
|
|
||||||
|
|
||||||
use sc_block_builder::BlockBuilderApi;
|
|
||||||
|
|
||||||
use tendermint_machine::ext::{BlockError, Commit, Network};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
CONSENSUS_ID, TendermintClient, TendermintValidator, validators::TendermintValidators,
|
|
||||||
TendermintImportQueue, authority::TendermintAuthority,
|
|
||||||
};
|
|
||||||
|
|
||||||
type InstantiatedTendermintImportQueue<T> = TendermintImportQueue<
|
|
||||||
<T as TendermintClient>::Block,
|
|
||||||
<T as TendermintClient>::BackendTransaction,
|
|
||||||
>;
|
|
||||||
|
|
||||||
/// Tendermint import handler.
|
|
||||||
pub struct TendermintImport<T: TendermintValidator> {
|
|
||||||
// Lock ensuring only one block is imported at a time
|
|
||||||
pub(crate) sync_lock: Arc<Mutex<()>>,
|
|
||||||
|
|
||||||
pub(crate) validators: TendermintValidators<T>,
|
|
||||||
|
|
||||||
pub(crate) providers: Arc<AsyncRwLock<Option<T::CIDP>>>,
|
|
||||||
pub(crate) importing_block: Arc<RwLock<Option<<T::Block as Block>::Hash>>>,
|
|
||||||
|
|
||||||
// A set of blocks which we're willing to recheck
|
|
||||||
// We reject blocks with invalid inherents, yet inherents can be fatally flawed or solely
|
|
||||||
// perceived as flawed
|
|
||||||
// If we solely perceive them as flawed, we mark them as eligible for being checked again. Then,
|
|
||||||
// if they're proposed again, we see if our perception has changed
|
|
||||||
pub(crate) recheck: Arc<RwLock<HashSet<<T::Block as Block>::Hash>>>,
|
|
||||||
|
|
||||||
pub(crate) client: Arc<T::Client>,
|
|
||||||
pub(crate) queue: Arc<AsyncRwLock<Option<InstantiatedTendermintImportQueue<T>>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> Clone for TendermintImport<T> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
TendermintImport {
|
|
||||||
sync_lock: self.sync_lock.clone(),
|
|
||||||
|
|
||||||
validators: self.validators.clone(),
|
|
||||||
|
|
||||||
providers: self.providers.clone(),
|
|
||||||
importing_block: self.importing_block.clone(),
|
|
||||||
recheck: self.recheck.clone(),
|
|
||||||
|
|
||||||
client: self.client.clone(),
|
|
||||||
queue: self.queue.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintValidator> TendermintImport<T> {
|
|
||||||
pub(crate) fn new(client: Arc<T::Client>) -> TendermintImport<T> {
|
|
||||||
TendermintImport {
|
|
||||||
sync_lock: Arc::new(Mutex::new(())),
|
|
||||||
|
|
||||||
validators: TendermintValidators::new(client.clone()),
|
|
||||||
|
|
||||||
providers: Arc::new(AsyncRwLock::new(None)),
|
|
||||||
importing_block: Arc::new(RwLock::new(None)),
|
|
||||||
recheck: Arc::new(RwLock::new(HashSet::new())),
|
|
||||||
|
|
||||||
client,
|
|
||||||
queue: Arc::new(AsyncRwLock::new(None)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn inherent_data(&self, parent: <T::Block as Block>::Hash) -> InherentData {
|
|
||||||
match self
|
|
||||||
.providers
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.create_inherent_data_providers(parent, ())
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(providers) => match providers.create_inherent_data().await {
|
|
||||||
Ok(data) => Some(data),
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "tendermint", "Failed to create inherent data: {}", err);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
warn!(target: "tendermint", "Failed to create inherent data providers: {}", err);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.unwrap_or_else(InherentData::new)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn check_inherents(
|
|
||||||
&self,
|
|
||||||
hash: <T::Block as Block>::Hash,
|
|
||||||
block: T::Block,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let inherent_data = self.inherent_data(*block.header().parent_hash()).await;
|
|
||||||
let err = self
|
|
||||||
.client
|
|
||||||
.runtime_api()
|
|
||||||
.check_inherents(self.client.info().finalized_hash, block, inherent_data)
|
|
||||||
.map_err(|_| Error::Other(BlockError::Fatal.into()))?;
|
|
||||||
|
|
||||||
if err.ok() {
|
|
||||||
self.recheck.write().unwrap().remove(&hash);
|
|
||||||
Ok(())
|
|
||||||
} else if err.fatal_error() {
|
|
||||||
Err(Error::Other(BlockError::Fatal.into()))
|
|
||||||
} else {
|
|
||||||
debug!(target: "tendermint", "Proposed block has temporally wrong inherents");
|
|
||||||
self.recheck.write().unwrap().insert(hash);
|
|
||||||
Err(Error::Other(BlockError::Temporal.into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure this is part of a sequential import
|
|
||||||
pub(crate) fn verify_order(
|
|
||||||
&self,
|
|
||||||
parent: <T::Block as Block>::Hash,
|
|
||||||
number: <<T::Block as Block>::Header as Header>::Number,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let info = self.client.info();
|
|
||||||
if (info.finalized_hash != parent) || ((info.finalized_number + 1u16.into()) != number) {
|
|
||||||
Err(Error::Other("non-sequential import".into()))?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do not allow blocks from the traditional network to be broadcast
|
|
||||||
// Only allow blocks from Tendermint
|
|
||||||
// Tendermint's propose message could be rewritten as a seal OR Tendermint could produce blocks
|
|
||||||
// which this checks the proposer slot for, and then tells the Tendermint machine
|
|
||||||
// While those would be more seamless with Substrate, there's no actual benefit to doing so
|
|
||||||
fn verify_origin(&self, hash: <T::Block as Block>::Hash) -> Result<(), Error> {
|
|
||||||
if let Some(tm_hash) = *self.importing_block.read().unwrap() {
|
|
||||||
if hash == tm_hash {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Error::Other("block created outside of tendermint".into()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors if the justification isn't valid
|
|
||||||
pub(crate) fn verify_justification(
|
|
||||||
&self,
|
|
||||||
hash: <T::Block as Block>::Hash,
|
|
||||||
justification: &Justification,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if justification.0 != CONSENSUS_ID {
|
|
||||||
Err(Error::InvalidJustification)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let commit: Commit<TendermintValidators<T>> =
|
|
||||||
Commit::decode(&mut justification.1.as_ref()).map_err(|_| Error::InvalidJustification)?;
|
|
||||||
// Create a stubbed TendermintAuthority so we can verify the commit
|
|
||||||
if !TendermintAuthority::stub(self.clone()).verify_commit(hash, &commit) {
|
|
||||||
Err(Error::InvalidJustification)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verifies the justifications aren't malformed, not that the block is justified
|
|
||||||
// Errors if justifications is neither empty nor a single Tendermint justification
|
|
||||||
// If the block does have a justification, finalized will be set to true
|
|
||||||
fn verify_justifications<BT>(
|
|
||||||
&self,
|
|
||||||
block: &mut BlockImportParams<T::Block, BT>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if !block.finalized {
|
|
||||||
if let Some(justifications) = &block.justifications {
|
|
||||||
let mut iter = justifications.iter();
|
|
||||||
let next = iter.next();
|
|
||||||
if next.is_none() || iter.next().is_some() {
|
|
||||||
Err(Error::InvalidJustification)?;
|
|
||||||
}
|
|
||||||
self.verify_justification(block.header.hash(), next.unwrap())?;
|
|
||||||
block.finalized = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn check<BT>(
|
|
||||||
&self,
|
|
||||||
block: &mut BlockImportParams<T::Block, BT>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if block.finalized {
|
|
||||||
if block.fork_choice != Some(ForkChoiceStrategy::Custom(false)) {
|
|
||||||
// Since we alw1ays set the fork choice, this means something else marked the block as
|
|
||||||
// finalized, which shouldn't be possible. Ensuring nothing else is setting blocks as
|
|
||||||
// finalized helps ensure our security
|
|
||||||
panic!("block was finalized despite not setting the fork choice");
|
|
||||||
}
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the block as a worse choice
|
|
||||||
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
|
||||||
|
|
||||||
self.verify_order(*block.header.parent_hash(), *block.header.number())?;
|
|
||||||
self.verify_justifications(block)?;
|
|
||||||
|
|
||||||
// If the block wasn't finalized, verify the origin and validity of its inherents
|
|
||||||
if !block.finalized {
|
|
||||||
let hash = block.header.hash();
|
|
||||||
self.verify_origin(hash)?;
|
|
||||||
self
|
|
||||||
.check_inherents(hash, T::Block::new(block.header.clone(), block.body.clone().unwrap()))
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additionally check these fields are empty
|
|
||||||
// They *should* be unused, so requiring their emptiness prevents malleability and ensures
|
|
||||||
// nothing slips through
|
|
||||||
if !block.post_digests.is_empty() {
|
|
||||||
Err(Error::Other("post-digests included".into()))?;
|
|
||||||
}
|
|
||||||
if !block.auxiliary.is_empty() {
|
|
||||||
Err(Error::Other("auxiliary included".into()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,191 +0,0 @@
|
||||||
use core::ops::Deref;
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use sp_core::Decode;
|
|
||||||
use sp_application_crypto::{
|
|
||||||
RuntimePublic as PublicTrait,
|
|
||||||
sr25519::{Public, Signature},
|
|
||||||
};
|
|
||||||
use sp_keystore::CryptoStore;
|
|
||||||
|
|
||||||
use sp_staking::SessionIndex;
|
|
||||||
use sp_api::ProvideRuntimeApi;
|
|
||||||
|
|
||||||
use sc_client_api::HeaderBackend;
|
|
||||||
|
|
||||||
use tendermint_machine::ext::{BlockNumber, RoundNumber, Weights, Signer, SignatureScheme};
|
|
||||||
|
|
||||||
use sp_tendermint::TendermintApi;
|
|
||||||
|
|
||||||
use crate::{KEY_TYPE_ID, TendermintClient};
|
|
||||||
|
|
||||||
struct TendermintValidatorsStruct {
|
|
||||||
session: SessionIndex,
|
|
||||||
|
|
||||||
total_weight: u64,
|
|
||||||
weights: Vec<u64>,
|
|
||||||
|
|
||||||
lookup: Vec<Public>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TendermintValidatorsStruct {
|
|
||||||
fn from_module<T: TendermintClient>(client: &Arc<T::Client>) -> Self {
|
|
||||||
let last = client.info().finalized_hash;
|
|
||||||
let api = client.runtime_api();
|
|
||||||
let session = api.current_session(last).unwrap();
|
|
||||||
let validators = api.validators(last).unwrap();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
session,
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
total_weight: validators.len().try_into().unwrap(),
|
|
||||||
weights: vec![1; validators.len()],
|
|
||||||
|
|
||||||
lookup: validators,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap every access of the validators struct in something which forces calling refresh
|
|
||||||
struct Refresh<T: TendermintClient> {
|
|
||||||
client: Arc<T::Client>,
|
|
||||||
_refresh: Arc<RwLock<TendermintValidatorsStruct>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> Refresh<T> {
|
|
||||||
// If the session has changed, re-create the struct with the data on it
|
|
||||||
fn refresh(&self) {
|
|
||||||
let session = self._refresh.read().unwrap().session;
|
|
||||||
if session !=
|
|
||||||
self.client.runtime_api().current_session(self.client.info().finalized_hash).unwrap()
|
|
||||||
{
|
|
||||||
*self._refresh.write().unwrap() = TendermintValidatorsStruct::from_module::<T>(&self.client);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> Deref for Refresh<T> {
|
|
||||||
type Target = RwLock<TendermintValidatorsStruct>;
|
|
||||||
fn deref(&self) -> &RwLock<TendermintValidatorsStruct> {
|
|
||||||
self.refresh();
|
|
||||||
&self._refresh
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tendermint validators observer, providing data on the active validators.
|
|
||||||
pub struct TendermintValidators<T: TendermintClient>(Refresh<T>);
|
|
||||||
impl<T: TendermintClient> Clone for TendermintValidators<T> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self(Refresh { _refresh: self.0._refresh.clone(), client: self.0.client.clone() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> TendermintValidators<T> {
|
|
||||||
pub(crate) fn new(client: Arc<T::Client>) -> TendermintValidators<T> {
|
|
||||||
TendermintValidators(Refresh {
|
|
||||||
_refresh: Arc::new(RwLock::new(TendermintValidatorsStruct::from_module::<T>(&client))),
|
|
||||||
client,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TendermintSigner<T: TendermintClient>(
|
|
||||||
pub(crate) Arc<dyn CryptoStore>,
|
|
||||||
pub(crate) TendermintValidators<T>,
|
|
||||||
);
|
|
||||||
|
|
||||||
impl<T: TendermintClient> Clone for TendermintSigner<T> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self(self.0.clone(), self.1.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> TendermintSigner<T> {
|
|
||||||
async fn get_public_key(&self) -> Public {
|
|
||||||
let pubs = self.0.sr25519_public_keys(KEY_TYPE_ID).await;
|
|
||||||
if pubs.is_empty() {
|
|
||||||
self.0.sr25519_generate_new(KEY_TYPE_ID, None).await.unwrap()
|
|
||||||
} else {
|
|
||||||
pubs[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: TendermintClient> Signer for TendermintSigner<T> {
|
|
||||||
type ValidatorId = u16;
|
|
||||||
type Signature = Signature;
|
|
||||||
|
|
||||||
async fn validator_id(&self) -> Option<u16> {
|
|
||||||
let key = self.get_public_key().await;
|
|
||||||
for (i, k) in (*self.1 .0).read().unwrap().lookup.iter().enumerate() {
|
|
||||||
if k == &key {
|
|
||||||
return Some(u16::try_from(i).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn sign(&self, msg: &[u8]) -> Signature {
|
|
||||||
Signature::decode(
|
|
||||||
&mut self
|
|
||||||
.0
|
|
||||||
.sign_with(KEY_TYPE_ID, &self.get_public_key().await.into(), msg)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.unwrap()
|
|
||||||
.as_ref(),
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> SignatureScheme for TendermintValidators<T> {
|
|
||||||
type ValidatorId = u16;
|
|
||||||
type Signature = Signature;
|
|
||||||
type AggregateSignature = Vec<Signature>;
|
|
||||||
type Signer = TendermintSigner<T>;
|
|
||||||
|
|
||||||
fn verify(&self, validator: u16, msg: &[u8], sig: &Signature) -> bool {
|
|
||||||
self.0.read().unwrap().lookup[usize::try_from(validator).unwrap()].verify(&msg, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn aggregate(sigs: &[Signature]) -> Vec<Signature> {
|
|
||||||
sigs.to_vec()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_aggregate(&self, validators: &[u16], msg: &[u8], sigs: &Vec<Signature>) -> bool {
|
|
||||||
if validators.len() != sigs.len() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (v, sig) in validators.iter().zip(sigs.iter()) {
|
|
||||||
if !self.verify(*v, msg, sig) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TendermintClient> Weights for TendermintValidators<T> {
|
|
||||||
type ValidatorId = u16;
|
|
||||||
|
|
||||||
fn total_weight(&self) -> u64 {
|
|
||||||
self.0.read().unwrap().total_weight
|
|
||||||
}
|
|
||||||
|
|
||||||
fn weight(&self, id: u16) -> u64 {
|
|
||||||
self.0.read().unwrap().weights[usize::try_from(id).unwrap()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: https://github.com/serai-dex/serai/issues/159
|
|
||||||
fn proposer(&self, number: BlockNumber, round: RoundNumber) -> u16 {
|
|
||||||
u16::try_from(
|
|
||||||
(number.0 + u64::from(round.0)) % u64::try_from(self.0.read().unwrap().lookup.len()).unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "pallet-tendermint"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "Tendermint pallet for Substrate"
|
|
||||||
license = "AGPL-3.0-only"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/pallet"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
parity-scale-codec = { version = "3", default-features = false, features = ["derive"] }
|
|
||||||
scale-info = { version = "2", default-features = false, features = ["derive"] }
|
|
||||||
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
|
|
||||||
frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
std = [
|
|
||||||
"sp-application-crypto/std",
|
|
||||||
|
|
||||||
"frame-system/std",
|
|
||||||
"frame-support/std",
|
|
||||||
]
|
|
||||||
|
|
||||||
runtime-benchmarks = [
|
|
||||||
"frame-system/runtime-benchmarks",
|
|
||||||
"frame-support/runtime-benchmarks",
|
|
||||||
]
|
|
||||||
|
|
||||||
default = ["std"]
|
|
|
@ -1,15 +0,0 @@
|
||||||
AGPL-3.0-only license
|
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
|
||||||
published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
@ -1,75 +0,0 @@
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
#[frame_support::pallet]
|
|
||||||
pub mod pallet {
|
|
||||||
use sp_std::vec::Vec;
|
|
||||||
use sp_core::sr25519::Public;
|
|
||||||
|
|
||||||
use frame_support::pallet_prelude::*;
|
|
||||||
use frame_support::traits::{ConstU32, OneSessionHandler};
|
|
||||||
|
|
||||||
type MaxValidators = ConstU32<{ u16::MAX as u32 }>;
|
|
||||||
|
|
||||||
#[pallet::config]
|
|
||||||
pub trait Config: frame_system::Config {}
|
|
||||||
|
|
||||||
#[pallet::pallet]
|
|
||||||
#[pallet::generate_store(pub(super) trait Store)]
|
|
||||||
pub struct Pallet<T>(PhantomData<T>);
|
|
||||||
|
|
||||||
#[pallet::storage]
|
|
||||||
#[pallet::getter(fn session)]
|
|
||||||
pub type Session<T: Config> = StorageValue<_, u32, ValueQuery>;
|
|
||||||
|
|
||||||
#[pallet::storage]
|
|
||||||
#[pallet::getter(fn validators)]
|
|
||||||
pub type Validators<T: Config> = StorageValue<_, BoundedVec<Public, MaxValidators>, ValueQuery>;
|
|
||||||
|
|
||||||
pub mod crypto {
|
|
||||||
use sp_application_crypto::{KeyTypeId, app_crypto, sr25519};
|
|
||||||
app_crypto!(sr25519, KeyTypeId(*b"tend"));
|
|
||||||
|
|
||||||
impl<T> sp_application_crypto::BoundToRuntimeAppPublic for crate::Pallet<T> {
|
|
||||||
type Public = Public;
|
|
||||||
}
|
|
||||||
|
|
||||||
sp_application_crypto::with_pair! {
|
|
||||||
pub type AuthorityPair = Pair;
|
|
||||||
}
|
|
||||||
pub type AuthoritySignature = Signature;
|
|
||||||
pub type AuthorityId = Public;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Config, V> OneSessionHandler<V> for Pallet<T> {
|
|
||||||
type Key = crypto::Public;
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
fn on_genesis_session<'a, I: 'a>(_validators: I)
|
|
||||||
where
|
|
||||||
I: Iterator<Item = (&'a V, Self::Key)>,
|
|
||||||
V: 'a,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued: I)
|
|
||||||
where
|
|
||||||
I: Iterator<Item = (&'a V, Self::Key)>,
|
|
||||||
V: 'a,
|
|
||||||
{
|
|
||||||
if !changed {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Session::<T>::put(Self::session() + 1);
|
|
||||||
Validators::<T>::put(
|
|
||||||
BoundedVec::try_from(validators.map(|(_, key)| key.into()).collect::<Vec<Public>>())
|
|
||||||
.unwrap(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
fn on_disabled(_validator_index: u32) {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use pallet::*;
|
|
|
@ -1,21 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "sp-tendermint"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "Tendermint primitives for Substrate"
|
|
||||||
license = "AGPL-3.0-only"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/primitives"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
sp-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
std = ["sp-core/std", "sp-std/std", "sp-api/std"]
|
|
||||||
default = ["std"]
|
|
|
@ -1,15 +0,0 @@
|
||||||
AGPL-3.0-only license
|
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
|
||||||
published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
@ -1,16 +0,0 @@
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
use sp_core::sr25519::Public;
|
|
||||||
use sp_std::vec::Vec;
|
|
||||||
|
|
||||||
sp_api::decl_runtime_apis! {
|
|
||||||
/// TendermintApi trait for runtimes to implement.
|
|
||||||
pub trait TendermintApi {
|
|
||||||
/// Current session number. A session is NOT a fixed length of blocks, yet rather a continuous
|
|
||||||
/// set of validators.
|
|
||||||
fn current_session() -> u32;
|
|
||||||
|
|
||||||
/// Current validators.
|
|
||||||
fn validators() -> Vec<Public>;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -34,7 +34,6 @@ pub mod pallet {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pallet::pallet]
|
#[pallet::pallet]
|
||||||
#[pallet::generate_store(pub(crate) trait Store)]
|
|
||||||
pub struct Pallet<T>(PhantomData<T>);
|
pub struct Pallet<T>(PhantomData<T>);
|
||||||
|
|
||||||
impl<T: Config> Pallet<T> {
|
impl<T: Config> Pallet<T> {
|
||||||
|
|
|
@ -52,7 +52,6 @@ pub mod pallet {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pallet::pallet]
|
#[pallet::pallet]
|
||||||
#[pallet::generate_store(pub(super) trait Store)]
|
|
||||||
pub struct Pallet<T>(PhantomData<T>);
|
pub struct Pallet<T>(PhantomData<T>);
|
||||||
|
|
||||||
/// The details of a validator set instance.
|
/// The details of a validator set instance.
|
||||||
|
|
|
@ -3,7 +3,7 @@ name = "tendermint-machine"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
description = "An implementation of the Tendermint state machine in Rust"
|
description = "An implementation of the Tendermint state machine in Rust"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/machine"
|
repository = "https://github.com/serai-dex/serai/tree/develop/tendermint"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
Loading…
Reference in a new issue