Validator DHT (#494)

* Route validators for any active set through sc-authority-discovery

Additionally adds an RPC route to retrieve their P2P addresses.

* Have the coordinator get peers from substrate

* Have the RPC return one address, not up to 3

Prevents the coordinator from believing it has 3 peers when it has one.

* Add missing feature to serai-client

* Correct network argument in serai-client for p2p_validators call

* Add a test in serai-client to check DHT population with a much quicker failure than the coordinator tests

* Update to latest Substrate

Removes distinguishing BABE/AuthorityDiscovery keys which causes
sc_authority_discovery to populate as desired.

* Update to a properly tagged substrate commit

* Add all dialed to peers to GossipSub

* cargo fmt

* Reduce common code in serai-coordinator-tests with amore involved new_test

* Use a recursive async function to spawn `n` DockerTests with the necessary networking configuration

* Merge UNIQUE_ID and ONE_AT_A_TIME

* Tidy up the new recursive code in tests/coordinator

* Use a Mutex in CONTEXT to let it be set multiple times

* Make complimentary edits to full-stack tests

* Augment coordinator P2p connection logs

* Drop lock acquisitions before recursing

* Better scope lock acquisitions in full-stack, preventing a deadlock

* Ensure OUTER_OPS is reset across the test boundary

* Add cargo deny allowance for dockertest fork
This commit is contained in:
Luke Parker 2023-12-22 21:09:18 -05:00 committed by GitHub
parent 00774c29d7
commit b493e3e31f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 1551 additions and 1225 deletions

234
Cargo.lock generated
View file

@ -290,6 +290,17 @@ dependencies = [
"pin-project-lite 0.2.13",
]
[[package]]
name = "async-recursion"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.41",
]
[[package]]
name = "async-trait"
version = "0.1.74"
@ -1597,8 +1608,7 @@ dependencies = [
[[package]]
name = "dockertest"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88933ed892cc8f5be247da11a1cd86a5c64802ac0172982e8aeb8315cb6dacfa"
source = "git+https://github.com/kayabaNerve/dockertest-rs?branch=arc#c0ea77997048f9edc9987984bbe20e43fac74e06"
dependencies = [
"anyhow",
"async-trait",
@ -2217,7 +2227,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fork-tree"
version = "3.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
]
@ -2240,7 +2250,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frame-benchmarking"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"frame-support-procedural",
@ -2265,7 +2275,7 @@ dependencies = [
[[package]]
name = "frame-executive"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"frame-system",
@ -2294,7 +2304,7 @@ dependencies = [
[[package]]
name = "frame-support"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"bitflags 1.3.2",
"environmental",
@ -2327,7 +2337,7 @@ dependencies = [
[[package]]
name = "frame-support-procedural"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"Inflector",
"cfg-expr",
@ -2345,7 +2355,7 @@ dependencies = [
[[package]]
name = "frame-support-procedural-tools"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support-procedural-tools-derive",
"proc-macro-crate 1.3.1",
@ -2357,7 +2367,7 @@ dependencies = [
[[package]]
name = "frame-support-procedural-tools-derive"
version = "3.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"proc-macro2",
"quote",
@ -2367,7 +2377,7 @@ dependencies = [
[[package]]
name = "frame-system"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"cfg-if",
"frame-support",
@ -2386,7 +2396,7 @@ dependencies = [
[[package]]
name = "frame-system-rpc-runtime-api"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"sp-api",
@ -2395,7 +2405,7 @@ dependencies = [
[[package]]
name = "frame-try-runtime"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"parity-scale-codec",
@ -4715,7 +4725,7 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro-crate 2.0.0",
"proc-macro2",
"quote",
"syn 2.0.41",
@ -4829,7 +4839,7 @@ dependencies = [
[[package]]
name = "pallet-authorship"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"frame-system",
@ -4843,7 +4853,7 @@ dependencies = [
[[package]]
name = "pallet-babe"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-benchmarking",
"frame-support",
@ -4867,7 +4877,7 @@ dependencies = [
[[package]]
name = "pallet-grandpa"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-benchmarking",
"frame-support",
@ -4890,7 +4900,7 @@ dependencies = [
[[package]]
name = "pallet-session"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"frame-system",
@ -4911,7 +4921,7 @@ dependencies = [
[[package]]
name = "pallet-timestamp"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-benchmarking",
"frame-support",
@ -4929,7 +4939,7 @@ dependencies = [
[[package]]
name = "pallet-transaction-payment"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-support",
"frame-system",
@ -4945,7 +4955,7 @@ dependencies = [
[[package]]
name = "pallet-transaction-payment-rpc"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"jsonrpsee",
"pallet-transaction-payment-rpc-runtime-api",
@ -4961,7 +4971,7 @@ dependencies = [
[[package]]
name = "pallet-transaction-payment-rpc-runtime-api"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"pallet-transaction-payment",
"parity-scale-codec",
@ -6114,7 +6124,7 @@ dependencies = [
[[package]]
name = "sc-allocator"
version = "4.1.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"log",
"sp-core",
@ -6125,7 +6135,7 @@ dependencies = [
[[package]]
name = "sc-authority-discovery"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -6153,7 +6163,7 @@ dependencies = [
[[package]]
name = "sc-basic-authorship"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"futures",
"futures-timer",
@ -6176,7 +6186,7 @@ dependencies = [
[[package]]
name = "sc-block-builder"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"sc-client-api",
@ -6191,7 +6201,7 @@ dependencies = [
[[package]]
name = "sc-chain-spec"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"memmap2",
"sc-chain-spec-derive",
@ -6210,7 +6220,7 @@ dependencies = [
[[package]]
name = "sc-chain-spec-derive"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
@ -6221,7 +6231,7 @@ dependencies = [
[[package]]
name = "sc-cli"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"chrono",
@ -6260,7 +6270,7 @@ dependencies = [
[[package]]
name = "sc-client-api"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"fnv",
"futures",
@ -6285,7 +6295,7 @@ dependencies = [
[[package]]
name = "sc-client-db"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"hash-db",
"kvdb",
@ -6311,7 +6321,7 @@ dependencies = [
[[package]]
name = "sc-consensus"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -6336,7 +6346,7 @@ dependencies = [
[[package]]
name = "sc-consensus-babe"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"fork-tree",
@ -6372,7 +6382,7 @@ dependencies = [
[[package]]
name = "sc-consensus-epochs"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"fork-tree",
"parity-scale-codec",
@ -6385,7 +6395,7 @@ dependencies = [
[[package]]
name = "sc-consensus-grandpa"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"ahash",
"array-bytes",
@ -6426,7 +6436,7 @@ dependencies = [
[[package]]
name = "sc-consensus-slots"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -6449,7 +6459,7 @@ dependencies = [
[[package]]
name = "sc-executor"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"parking_lot 0.12.1",
@ -6471,7 +6481,7 @@ dependencies = [
[[package]]
name = "sc-executor-common"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"sc-allocator",
"sp-maybe-compressed-blob",
@ -6483,7 +6493,7 @@ dependencies = [
[[package]]
name = "sc-executor-wasmtime"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"anyhow",
"cfg-if",
@ -6500,7 +6510,7 @@ dependencies = [
[[package]]
name = "sc-informant"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"anstyle",
"futures",
@ -6516,7 +6526,7 @@ dependencies = [
[[package]]
name = "sc-keystore"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"parking_lot 0.12.1",
@ -6530,7 +6540,7 @@ dependencies = [
[[package]]
name = "sc-network"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"async-channel",
@ -6572,7 +6582,7 @@ dependencies = [
[[package]]
name = "sc-network-bitswap"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-channel",
"cid",
@ -6592,7 +6602,7 @@ dependencies = [
[[package]]
name = "sc-network-common"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"bitflags 1.3.2",
@ -6609,7 +6619,7 @@ dependencies = [
[[package]]
name = "sc-network-gossip"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"ahash",
"futures",
@ -6628,7 +6638,7 @@ dependencies = [
[[package]]
name = "sc-network-light"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"async-channel",
@ -6649,7 +6659,7 @@ dependencies = [
[[package]]
name = "sc-network-sync"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"async-channel",
@ -6683,7 +6693,7 @@ dependencies = [
[[package]]
name = "sc-network-transactions"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"futures",
@ -6701,7 +6711,7 @@ dependencies = [
[[package]]
name = "sc-offchain"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"bytes",
"fnv",
@ -6733,7 +6743,7 @@ dependencies = [
[[package]]
name = "sc-proposer-metrics"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"log",
"substrate-prometheus-endpoint",
@ -6742,7 +6752,7 @@ dependencies = [
[[package]]
name = "sc-rpc"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"futures",
"jsonrpsee",
@ -6772,7 +6782,7 @@ dependencies = [
[[package]]
name = "sc-rpc-api"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"jsonrpsee",
"parity-scale-codec",
@ -6791,7 +6801,7 @@ dependencies = [
[[package]]
name = "sc-rpc-server"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"http",
"jsonrpsee",
@ -6806,7 +6816,7 @@ dependencies = [
[[package]]
name = "sc-rpc-spec-v2"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"futures",
@ -6832,7 +6842,7 @@ dependencies = [
[[package]]
name = "sc-service"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"directories",
@ -6895,7 +6905,7 @@ dependencies = [
[[package]]
name = "sc-state-db"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"log",
"parity-scale-codec",
@ -6906,7 +6916,7 @@ dependencies = [
[[package]]
name = "sc-sysinfo"
version = "6.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"futures",
"libc",
@ -6925,7 +6935,7 @@ dependencies = [
[[package]]
name = "sc-telemetry"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"chrono",
"futures",
@ -6944,7 +6954,7 @@ dependencies = [
[[package]]
name = "sc-tracing"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"anstyle",
"chrono",
@ -6972,7 +6982,7 @@ dependencies = [
[[package]]
name = "sc-tracing-proc-macro"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
@ -6983,7 +6993,7 @@ dependencies = [
[[package]]
name = "sc-transaction-pool"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -7009,7 +7019,7 @@ dependencies = [
[[package]]
name = "sc-transaction-pool-api"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -7025,7 +7035,7 @@ dependencies = [
[[package]]
name = "sc-utils"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-channel",
"futures",
@ -7281,6 +7291,7 @@ dependencies = [
"hex",
"modular-frost",
"monero-serai",
"multiaddr",
"parity-scale-codec",
"rand_core",
"serai-abi",
@ -7359,6 +7370,8 @@ dependencies = [
name = "serai-coordinator-tests"
version = "0.1.0"
dependencies = [
"async-recursion",
"async-trait",
"blake2",
"borsh",
"ciphersuite",
@ -7418,6 +7431,8 @@ version = "0.1.0"
name = "serai-full-stack-tests"
version = "0.1.0"
dependencies = [
"async-recursion",
"async-trait",
"bitcoin-serai",
"curve25519-dalek",
"dockertest",
@ -7427,6 +7442,7 @@ dependencies = [
"rand_core",
"serai-client",
"serai-coordinator-tests",
"serai-docker-tests",
"serai-message-queue-tests",
"serai-processor",
"serai-processor-tests",
@ -7559,6 +7575,7 @@ dependencies = [
"sp-timestamp",
"substrate-build-script-utils",
"substrate-frame-rpc-system",
"tokio",
]
[[package]]
@ -7671,6 +7688,7 @@ dependencies = [
"frame-support",
"frame-system",
"frame-system-rpc-runtime-api",
"hashbrown 0.14.3",
"pallet-authorship",
"pallet-babe",
"pallet-grandpa",
@ -8061,7 +8079,7 @@ dependencies = [
[[package]]
name = "sp-api"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"hash-db",
"log",
@ -8082,7 +8100,7 @@ dependencies = [
[[package]]
name = "sp-api-proc-macro"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"Inflector",
"blake2",
@ -8096,7 +8114,7 @@ dependencies = [
[[package]]
name = "sp-application-crypto"
version = "23.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"scale-info",
@ -8109,7 +8127,7 @@ dependencies = [
[[package]]
name = "sp-arithmetic"
version = "16.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"integer-sqrt",
"num-traits",
@ -8123,7 +8141,7 @@ dependencies = [
[[package]]
name = "sp-authority-discovery"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"scale-info",
@ -8135,7 +8153,7 @@ dependencies = [
[[package]]
name = "sp-block-builder"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"sp-api",
"sp-inherents",
@ -8146,7 +8164,7 @@ dependencies = [
[[package]]
name = "sp-blockchain"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"futures",
"log",
@ -8164,7 +8182,7 @@ dependencies = [
[[package]]
name = "sp-consensus"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"futures",
@ -8178,7 +8196,7 @@ dependencies = [
[[package]]
name = "sp-consensus-babe"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"parity-scale-codec",
@ -8197,7 +8215,7 @@ dependencies = [
[[package]]
name = "sp-consensus-grandpa"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"finality-grandpa",
"log",
@ -8215,7 +8233,7 @@ dependencies = [
[[package]]
name = "sp-consensus-slots"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"scale-info",
@ -8227,7 +8245,7 @@ dependencies = [
[[package]]
name = "sp-core"
version = "21.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"array-bytes",
"bitflags 1.3.2",
@ -8270,7 +8288,7 @@ dependencies = [
[[package]]
name = "sp-core-hashing"
version = "9.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"blake2b_simd",
"byteorder",
@ -8282,7 +8300,7 @@ dependencies = [
[[package]]
name = "sp-core-hashing-proc-macro"
version = "9.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"quote",
"sp-core-hashing",
@ -8292,7 +8310,7 @@ dependencies = [
[[package]]
name = "sp-database"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"kvdb",
"parking_lot 0.12.1",
@ -8301,7 +8319,7 @@ dependencies = [
[[package]]
name = "sp-debug-derive"
version = "8.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"proc-macro2",
"quote",
@ -8311,7 +8329,7 @@ dependencies = [
[[package]]
name = "sp-externalities"
version = "0.19.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"environmental",
"parity-scale-codec",
@ -8322,7 +8340,7 @@ dependencies = [
[[package]]
name = "sp-inherents"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"impl-trait-for-tuples",
@ -8336,7 +8354,7 @@ dependencies = [
[[package]]
name = "sp-io"
version = "23.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"bytes",
"ed25519",
@ -8358,7 +8376,7 @@ dependencies = [
[[package]]
name = "sp-keyring"
version = "24.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"lazy_static",
"sp-core",
@ -8369,7 +8387,7 @@ dependencies = [
[[package]]
name = "sp-keystore"
version = "0.27.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"parking_lot 0.12.1",
@ -8381,7 +8399,7 @@ dependencies = [
[[package]]
name = "sp-maybe-compressed-blob"
version = "4.1.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"thiserror",
"zstd 0.12.4",
@ -8390,7 +8408,7 @@ dependencies = [
[[package]]
name = "sp-metadata-ir"
version = "0.1.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-metadata",
"parity-scale-codec",
@ -8401,7 +8419,7 @@ dependencies = [
[[package]]
name = "sp-offchain"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"sp-api",
"sp-core",
@ -8411,7 +8429,7 @@ dependencies = [
[[package]]
name = "sp-panic-handler"
version = "8.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"backtrace",
"lazy_static",
@ -8421,7 +8439,7 @@ dependencies = [
[[package]]
name = "sp-rpc"
version = "6.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"rustc-hash",
"serde",
@ -8431,7 +8449,7 @@ dependencies = [
[[package]]
name = "sp-runtime"
version = "24.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"either",
"hash256-std-hasher",
@ -8453,7 +8471,7 @@ dependencies = [
[[package]]
name = "sp-runtime-interface"
version = "17.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"bytes",
"impl-trait-for-tuples",
@ -8471,7 +8489,7 @@ dependencies = [
[[package]]
name = "sp-runtime-interface-proc-macro"
version = "11.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"Inflector",
"proc-macro-crate 1.3.1",
@ -8483,7 +8501,7 @@ dependencies = [
[[package]]
name = "sp-session"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"scale-info",
@ -8498,7 +8516,7 @@ dependencies = [
[[package]]
name = "sp-staking"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"impl-trait-for-tuples",
"parity-scale-codec",
@ -8512,7 +8530,7 @@ dependencies = [
[[package]]
name = "sp-state-machine"
version = "0.28.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"hash-db",
"log",
@ -8533,12 +8551,12 @@ dependencies = [
[[package]]
name = "sp-std"
version = "8.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
[[package]]
name = "sp-storage"
version = "13.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"impl-serde",
"parity-scale-codec",
@ -8551,7 +8569,7 @@ dependencies = [
[[package]]
name = "sp-timestamp"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"async-trait",
"parity-scale-codec",
@ -8564,7 +8582,7 @@ dependencies = [
[[package]]
name = "sp-tracing"
version = "10.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"sp-std",
@ -8576,7 +8594,7 @@ dependencies = [
[[package]]
name = "sp-transaction-pool"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"sp-api",
"sp-runtime",
@ -8585,7 +8603,7 @@ dependencies = [
[[package]]
name = "sp-trie"
version = "22.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"ahash",
"hash-db",
@ -8608,7 +8626,7 @@ dependencies = [
[[package]]
name = "sp-version"
version = "22.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"impl-serde",
"parity-scale-codec",
@ -8625,7 +8643,7 @@ dependencies = [
[[package]]
name = "sp-version-proc-macro"
version = "8.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"proc-macro2",
@ -8636,7 +8654,7 @@ dependencies = [
[[package]]
name = "sp-wasm-interface"
version = "14.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"anyhow",
"impl-trait-for-tuples",
@ -8649,7 +8667,7 @@ dependencies = [
[[package]]
name = "sp-weights"
version = "20.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"parity-scale-codec",
"scale-info",
@ -8827,12 +8845,12 @@ dependencies = [
[[package]]
name = "substrate-build-script-utils"
version = "3.0.0"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
[[package]]
name = "substrate-frame-rpc-system"
version = "4.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"frame-system-rpc-runtime-api",
"futures",
@ -8851,7 +8869,7 @@ dependencies = [
[[package]]
name = "substrate-prometheus-endpoint"
version = "0.10.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"hyper",
"log",
@ -8863,7 +8881,7 @@ dependencies = [
[[package]]
name = "substrate-wasm-builder"
version = "5.0.0-dev"
source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2"
source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5"
dependencies = [
"anstyle",
"build-helper",

View file

@ -97,6 +97,8 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev
sp-core-hashing = { git = "https://github.com/serai-dex/substrate" }
sp-std = { git = "https://github.com/serai-dex/substrate" }
dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
[workspace.lints.clippy]
unwrap_or_default = "allow"
borrow_as_ptr = "deny"

View file

@ -50,7 +50,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "mdns", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
[dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }

View file

@ -951,10 +951,8 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P,
processors: Pro,
serai: Serai,
serai: Arc<Serai>,
) {
let serai = Arc::new(serai);
let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel();
// Reload active tributaries from the database
for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 {
@ -1212,11 +1210,10 @@ async fn main() {
key_bytes.zeroize();
key
};
let p2p = LibP2p::new();
let processors = Arc::new(MessageQueue::from_env(Service::Coordinator));
let serai = || async {
let serai = (async {
loop {
let Ok(serai) = Serai::new(format!(
"http://{}:9944",
@ -1229,8 +1226,10 @@ async fn main() {
continue;
};
log::info!("made initial connection to Serai node");
return serai;
return Arc::new(serai);
}
};
run(db, key, p2p, processors, serai().await).await
})
.await;
let p2p = LibP2p::new(serai.clone());
run(db, key, p2p, processors, serai).await
}

View file

@ -7,9 +7,11 @@ use std::{
};
use async_trait::async_trait;
use rand_core::{RngCore, OsRng};
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::primitives::NetworkId;
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
use serai_db::Db;
@ -20,6 +22,7 @@ use tokio::{
};
use libp2p::{
core::multiaddr::{Protocol, Multiaddr},
identity::Keypair,
PeerId,
tcp::Config as TcpConfig,
@ -127,8 +130,8 @@ pub struct Message<P: P2p> {
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
type Id: Send + Sync + Clone + Copy + fmt::Debug;
async fn subscribe(&self, genesis: [u8; 32]);
async fn unsubscribe(&self, genesis: [u8; 32]);
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec<u8>);
async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec<u8>);
@ -190,14 +193,12 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
#[derive(NetworkBehaviour)]
struct Behavior {
gossipsub: GsBehavior,
#[cfg(debug_assertions)]
mdns: libp2p::mdns::tokio::Behaviour,
}
#[allow(clippy::type_complexity)]
#[derive(Clone)]
pub struct LibP2p {
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, [u8; 32])>>>,
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
broadcast: Arc<Mutex<mpsc::UnboundedSender<(Option<[u8; 32]>, Vec<u8>)>>>,
receive: Arc<Mutex<mpsc::UnboundedReceiver<(PeerId, Vec<u8>)>>>,
}
@ -209,14 +210,13 @@ impl fmt::Debug for LibP2p {
impl LibP2p {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
pub fn new(serai: Arc<Serai>) -> Self {
// Block size limit + 1 KB of space for signatures/metadata
const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
log::info!("creating a libp2p instance");
let throwaway_key_pair = Keypair::generate_ed25519();
let throwaway_peer_id = PeerId::from(throwaway_key_pair.public());
let behavior = Behavior {
gossipsub: {
@ -258,14 +258,6 @@ impl LibP2p {
gossipsub
},
// Only use MDNS in debug environments, as it should have no value in a release build
#[cfg(debug_assertions)]
mdns: {
log::info!("creating mdns service");
libp2p::mdns::tokio::Behaviour::new(libp2p::mdns::Config::default(), throwaway_peer_id)
.unwrap()
},
};
// Uses noise for authentication, yamux for multiplexing
@ -294,8 +286,8 @@ impl LibP2p {
let (receive_send, receive_recv) = mpsc::unbounded_channel();
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
fn topic_for_genesis(genesis: [u8; 32]) -> IdentTopic {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(genesis)))
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
}
tokio::spawn({
@ -305,17 +297,14 @@ impl LibP2p {
fn broadcast_raw(
p2p: &mut Swarm<Behavior>,
time_of_last_p2p_message: &mut Instant,
genesis: Option<[u8; 32]>,
set: Option<ValidatorSet>,
msg: Vec<u8>,
) {
// Update the time of last message
*time_of_last_p2p_message = Instant::now();
let topic = if let Some(genesis) = genesis {
topic_for_genesis(genesis)
} else {
IdentTopic::new(LIBP2P_TOPIC)
};
let topic =
if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) };
match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) {
Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"),
@ -331,37 +320,97 @@ impl LibP2p {
}
async move {
let mut set_for_genesis = HashMap::new();
let mut pending_p2p_connections = vec![];
// Run this task ad-infinitum
loop {
// Handle pending P2P connections
// TODO: Break this out onto its own task with better peer management logic?
{
let mut connect = |addr: Multiaddr| {
log::info!("found peer from substrate: {addr}");
let protocols = addr.iter().filter_map(|piece| match piece {
// Drop PeerIds from the Substrate P2p network
Protocol::P2p(_) => None,
// Use our own TCP port
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
other => Some(other),
});
let mut new_addr = Multiaddr::empty();
for protocol in protocols {
new_addr.push(protocol);
}
let addr = new_addr;
log::debug!("transformed found peer: {addr}");
if let Err(e) = swarm.dial(addr) {
log::warn!("dialing peer failed: {e:?}");
}
};
while let Some(network) = pending_p2p_connections.pop() {
if let Ok(mut nodes) = serai.p2p_validators(network).await {
// If there's an insufficient amount of nodes known, connect to all yet add it back
// and break
if nodes.len() < 3 {
log::warn!(
"insufficient amount of P2P nodes known for {:?}: {}",
network,
nodes.len()
);
pending_p2p_connections.push(network);
for node in nodes {
connect(node);
}
break;
}
// Randomly select up to 5
for _ in 0 .. 5 {
if !nodes.is_empty() {
let to_connect = nodes.swap_remove(
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
.unwrap(),
);
connect(to_connect);
}
}
}
}
}
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
tokio::select! {
biased;
// Subscribe to any new topics
topic = subscribe_recv.recv() => {
let (subscribe, topic) = topic.expect("subscribe_recv closed. are we shutting down?");
set = subscribe_recv.recv() => {
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
set.expect("subscribe_recv closed. are we shutting down?");
let topic = topic_for_set(set);
if subscribe {
swarm
.behaviour_mut()
.gossipsub
.subscribe(&topic_for_genesis(topic))
.unwrap();
log::info!("subscribing to p2p messages for {set:?}");
pending_p2p_connections.push(set.network);
set_for_genesis.insert(genesis, set);
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
} else {
swarm
.behaviour_mut()
.gossipsub
.unsubscribe(&topic_for_genesis(topic))
.unwrap();
log::info!("unsubscribing to p2p messages for {set:?}");
set_for_genesis.remove(&genesis);
swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap();
}
}
// Handle any queued outbound messages
msg = broadcast_recv.recv() => {
let (genesis, msg) = msg.expect("broadcast_recv closed. are we shutting down?");
let (genesis, msg): (Option<[u8; 32]>, Vec<u8>) =
msg.expect("broadcast_recv closed. are we shutting down?");
let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied());
broadcast_raw(
&mut swarm,
&mut time_of_last_p2p_message,
genesis,
set,
msg,
);
}
@ -369,28 +418,17 @@ impl LibP2p {
// Handle new incoming messages
event = swarm.next() => {
match event {
#[cfg(debug_assertions)]
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
libp2p::mdns::Event::Discovered(list),
))) => {
for (peer, mut addr) in list {
// Check the port is as expected to prevent trying to peer with Substrate nodes
if addr.pop() == Some(libp2p::multiaddr::Protocol::Tcp(PORT)) {
log::info!("found peer via mdns");
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer);
}
}
Some(SwarmEvent::Dialing { connection_id, .. }) => {
log::debug!("dialing to peer in connection ID {}", &connection_id);
}
#[cfg(debug_assertions)]
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
libp2p::mdns::Event::Expired(list),
))) => {
for (peer, _) in list {
log::info!("disconnecting peer due to mdns");
swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer);
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
log::debug!(
"connection established to peer {} in connection ID {}",
&peer_id,
&connection_id,
);
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id)
}
}
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
GsEvent::Message { propagation_source, message, .. },
))) => {
@ -434,21 +472,21 @@ impl LibP2p {
impl P2p for LibP2p {
type Id = PeerId;
async fn subscribe(&self, genesis: [u8; 32]) {
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()
.await
.send((true, genesis))
.send((true, set, genesis))
.expect("subscribe_send closed. are we shutting down?");
}
async fn unsubscribe(&self, genesis: [u8; 32]) {
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()
.await
.send((false, genesis))
.send((false, set, genesis))
.expect("subscribe_send closed. are we shutting down?");
}
@ -552,7 +590,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
channels.write().await.insert(genesis, send);
// Subscribe to the topic for this tributary
p2p.subscribe(genesis).await;
p2p.subscribe(tributary.spec.set(), genesis).await;
// Per-Tributary P2P message handler
tokio::spawn({
@ -675,8 +713,8 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
}
TributaryEvent::TributaryRetired(set) => {
if let Some(genesis) = set_to_genesis.remove(&set) {
p2p.unsubscribe(set, genesis).await;
channels.write().await.remove(&genesis);
p2p.unsubscribe(genesis).await;
}
}
}

View file

@ -4,7 +4,7 @@ use std::{
collections::{VecDeque, HashSet, HashMap},
};
use serai_client::primitives::NetworkId;
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use processor_messages::CoordinatorMessage;
@ -62,8 +62,8 @@ impl LocalP2p {
impl P2p for LocalP2p {
type Id = usize;
async fn subscribe(&self, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _genesis: [u8; 32]) {}
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
self.1.write().await.1[to].push_back((self.0, msg));

View file

@ -100,4 +100,5 @@ allow-git = [
"https://github.com/serai-dex/substrate-bip39",
"https://github.com/serai-dex/substrate",
"https://github.com/monero-rs/base58-monero",
"https://github.com/kayabaNerve/dockertest-rs",
]

View file

@ -27,6 +27,7 @@ serde_json = { version = "1", optional = true }
serai-abi = { path = "../abi", version = "0.1" }
multiaddr = { version = "0.18", optional = true }
sp-core = { git = "https://github.com/serai-dex/substrate", optional = true }
sp-runtime = { git = "https://github.com/serai-dex/substrate", optional = true }
frame-system = { git = "https://github.com/serai-dex/substrate", optional = true }
@ -56,7 +57,7 @@ dockertest = "0.4"
serai-docker-tests = { path = "../../tests/docker" }
[features]
serai = ["thiserror", "serde", "serde_json", "sp-core", "sp-runtime", "frame-system", "simple-request"]
serai = ["thiserror", "serde", "serde_json", "serai-abi/serde", "multiaddr", "sp-core", "sp-runtime", "frame-system", "simple-request"]
borsh = ["serai-abi/borsh"]
networks = []

View file

@ -16,7 +16,7 @@ pub use abi::{primitives, Transaction};
use abi::*;
pub use primitives::{SeraiAddress, Signature, Amount};
use primitives::Header;
use primitives::{Header, NetworkId};
pub mod coins;
pub use coins::SeraiCoins;
@ -306,6 +306,14 @@ impl Serai {
pub fn as_of(&self, block: [u8; 32]) -> TemporalSerai {
TemporalSerai { serai: self, block, events: RwLock::new(None) }
}
/// Return the P2P Multiaddrs for the validators of the specified network.
pub async fn p2p_validators(
&self,
network: NetworkId,
) -> Result<Vec<multiaddr::Multiaddr>, SeraiError> {
self.call("p2p_validators", network).await
}
}
impl<'a> TemporalSerai<'a> {

View file

@ -0,0 +1,59 @@
use serai_client::{primitives::NetworkId, Serai};
#[tokio::test]
async fn dht() {
use dockertest::{
PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,
TestBodySpecification, DockerTest,
};
serai_docker_tests::build("serai".to_string());
let handle = |name| format!("serai_client-serai_node-{name}");
let composition = |name| {
TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never),
)
.replace_cmd(vec![
"serai-node".to_string(),
"--unsafe-rpc-external".to_string(),
"--rpc-cors".to_string(),
"all".to_string(),
"--chain".to_string(),
"local".to_string(),
format!("--{name}"),
])
.set_publish_all_ports(true)
.set_handle(handle(name))
.set_start_policy(StartPolicy::Strict)
.set_log_options(Some(LogOptions {
action: LogAction::Forward,
policy: LogPolicy::Always,
source: LogSource::Both,
}))
};
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
test.provide_container(composition("alice"));
test.provide_container(composition("bob"));
test.provide_container(composition("charlie"));
test.provide_container(composition("dave"));
test
.run_async(|ops| async move {
// Sleep until the Substrate RPC starts
let alice = handle("alice");
let serai_rpc = ops.handle(&alice).host_port(9944).unwrap();
let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1);
// Sleep for a minute
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
// Check the DHT has been populated
assert!(!Serai::new(serai_rpc.clone())
.await
.unwrap()
.p2p_validators(NetworkId::Bitcoin)
.await
.unwrap()
.is_empty());
})
.await;
}

View file

@ -145,7 +145,7 @@ pub mod pallet {
fn increase_balance_internal(to: Public, balance: Balance) -> Result<(), Error<T, I>> {
let coin = &balance.coin;
// sub amount from account
// add amount to account
let new_amount = Self::balances(to, coin)
.checked_add(balance.amount.0)
.ok_or(Error::<T, I>::AmountOverflowed)?;

View file

@ -20,11 +20,6 @@ workspace = true
name = "serai-node"
[dependencies]
clap = { version = "4", features = ["derive"] }
futures-util = "0.3"
jsonrpsee = { version = "0.16", features = ["server"] }
sp-core = { git = "https://github.com/serai-dex/substrate" }
sp-timestamp = { git = "https://github.com/serai-dex/substrate" }
sp-io = { git = "https://github.com/serai-dex/substrate" }
@ -37,6 +32,12 @@ frame-benchmarking = { git = "https://github.com/serai-dex/substrate" }
serai-runtime = { path = "../runtime", features = ["std"] }
clap = { version = "4", features = ["derive"] }
futures-util = "0.3"
tokio = { version = "1", features = ["sync", "rt-multi-thread"] }
jsonrpsee = { version = "0.16", features = ["server"] }
sc-offchain = { git = "https://github.com/serai-dex/substrate" }
sc-transaction-pool = { git = "https://github.com/serai-dex/substrate" }
sc-transaction-pool-api = { git = "https://github.com/serai-dex/substrate" }

View file

@ -1,16 +1,18 @@
use std::sync::Arc;
use jsonrpsee::RpcModule;
use std::{sync::Arc, collections::HashSet};
use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata};
use sp_block_builder::BlockBuilder;
use sp_api::ProvideRuntimeApi;
use serai_runtime::{
primitives::{SubstrateAmount, PublicKey},
Nonce, Block,
primitives::{NetworkId, SubstrateAmount, PublicKey},
Nonce, Block, SeraiRuntimeApi,
};
use tokio::sync::RwLock;
use jsonrpsee::RpcModule;
pub use sc_rpc_api::DenyUnsafe;
use sc_transaction_pool_api::TransactionPool;
@ -18,6 +20,7 @@ pub struct FullDeps<C, P> {
pub client: Arc<C>,
pub pool: Arc<P>,
pub deny_unsafe: DenyUnsafe,
pub authority_discovery: Option<sc_authority_discovery::Service>,
}
pub fn create_full<
@ -34,16 +37,56 @@ pub fn create_full<
where
C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, PublicKey, Nonce>
+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, SubstrateAmount>
+ SeraiRuntimeApi<Block>
+ BlockBuilder<Block>,
{
use substrate_frame_rpc_system::{System, SystemApiServer};
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
let mut module = RpcModule::new(());
let FullDeps { client, pool, deny_unsafe } = deps;
let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps;
module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client).into_rpc())?;
module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
if let Some(authority_discovery) = authority_discovery {
let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery)));
authority_discovery_module.register_async_method(
"p2p_validators",
|params, context| async move {
let network: NetworkId = params.parse()?;
let (client, authority_discovery) = &*context;
let latest_block = client.info().best_hash;
let validators = client.runtime_api().validators(latest_block, network).map_err(|_| {
jsonrpsee::core::Error::to_call_error(std::io::Error::other(format!(
"couldn't get validators from the latest block, which is likely a fatal bug. {}",
"please report this at https://github.com/serai-dex/serai",
)))
})?;
let mut all_p2p_addresses = vec![];
for validator in validators {
let mut returned_addresses = authority_discovery
.write()
.await
.get_addresses_by_authority_id(validator.into())
.await
.unwrap_or_else(HashSet::new)
.into_iter();
// Only take a single address
// There should be one, there may be two if their IP address changed, and more should only
// occur if they have multiple proxies/an IP address changing frequently/some issue
// preventing consistent self-identification
// It isn't beneficial to use multiple addresses for a single peer here
if let Some(address) = returned_addresses.next() {
all_p2p_addresses.push(address);
}
}
Ok(all_p2p_addresses)
},
)?;
module.merge(authority_discovery_module)?;
}
Ok(module)
}

View file

@ -206,6 +206,42 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
);
}
let role = config.role.clone();
let keystore = keystore_container.keystore();
let prometheus_registry = config.prometheus_registry().cloned();
// TODO: Ensure we're considered as an authority is a validator of an external network
let authority_discovery = if role.is_authority() {
let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
#[allow(clippy::field_reassign_with_default)]
{
let mut worker = sc_authority_discovery::WorkerConfig::default();
worker.publish_non_global_ips = publish_non_global_ips;
worker.strict_record_validation = true;
worker
},
client.clone(),
network.clone(),
Box::pin(network.event_stream("authority-discovery").filter_map(|e| async move {
match e {
Event::Dht(e) => Some(e),
_ => None,
}
})),
sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()),
prometheus_registry.clone(),
);
task_manager.spawn_handle().spawn(
"authority-discovery-worker",
Some("networking"),
worker.run(),
);
Some(service)
} else {
None
};
let rpc_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
@ -215,18 +251,15 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
client: client.clone(),
pool: pool.clone(),
deny_unsafe,
authority_discovery: authority_discovery.clone(),
})
.map_err(Into::into)
})
};
let enable_grandpa = !config.disable_grandpa;
let role = config.role.clone();
let force_authoring = config.force_authoring;
let name = config.network.node_name.clone();
let prometheus_registry = config.prometheus_registry().cloned();
let keystore = keystore_container.keystore();
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
config,
@ -251,7 +284,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
select_chain,
env: sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
client,
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(Telemetry::handle),
@ -277,33 +310,6 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
);
}
if role.is_authority() {
task_manager.spawn_handle().spawn(
"authority-discovery-worker",
Some("networking"),
sc_authority_discovery::new_worker_and_service_with_config(
#[allow(clippy::field_reassign_with_default)]
{
let mut worker = sc_authority_discovery::WorkerConfig::default();
worker.publish_non_global_ips = publish_non_global_ips;
worker
},
client,
network.clone(),
Box::pin(network.event_stream("authority-discovery").filter_map(|e| async move {
match e {
Event::Dht(e) => Some(e),
_ => None,
}
})),
sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()),
prometheus_registry.clone(),
)
.0
.run(),
);
}
if enable_grandpa {
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",

View file

@ -19,6 +19,8 @@ ignored = ["scale", "scale-info"]
workspace = true
[dependencies]
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] }
scale-info = { version = "2", default-features = false, features = ["derive"] }

View file

@ -11,6 +11,7 @@ use core::marker::PhantomData;
// Re-export all components
pub use serai_primitives as primitives;
pub use primitives::{BlockNumber, Header};
use primitives::{NetworkId, NETWORKS};
pub use frame_system as system;
pub use frame_support as support;
@ -43,7 +44,7 @@ use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys, KeyTypeId,
traits::{Convert, BlakeTwo256, Block as BlockT},
transaction_validity::{TransactionSource, TransactionValidity},
Perbill, ApplyExtrinsicResult,
BoundedVec, Perbill, ApplyExtrinsicResult,
};
use primitives::{PublicKey, AccountLookup, SubstrateAmount};
@ -377,6 +378,13 @@ mod benches {
);
}
sp_api::decl_runtime_apis! {
#[api_version(1)]
pub trait SeraiRuntimeApi {
fn validators(network_id: NetworkId) -> Vec<PublicKey>;
}
}
sp_api::impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
@ -561,10 +569,41 @@ sp_api::impl_runtime_apis! {
impl sp_authority_discovery::AuthorityDiscoveryApi<Block> for Runtime {
fn authorities() -> Vec<AuthorityDiscoveryId> {
Babe::authorities()
// Converts to `[u8; 32]` so it can be hashed
let serai_validators = Babe::authorities()
.into_iter()
.map(|(id, _)| AuthorityDiscoveryId::from(id.into_inner()))
.collect()
.map(|(id, _)| id.into_inner().0)
.collect::<hashbrown::HashSet<_>>();
let mut all = serai_validators;
for network in NETWORKS {
if network == NetworkId::Serai {
continue;
}
let participants =
ValidatorSets::participants_for_latest_decided_set(network)
.map_or(vec![], BoundedVec::into_inner);
for (participant, _) in participants {
all.insert(participant.0);
}
}
all.into_iter().map(|id| AuthorityDiscoveryId::from(PublicKey::from_raw(id))).collect()
}
}
impl crate::SeraiRuntimeApi<Block> for Runtime {
fn validators(network_id: NetworkId) -> Vec<PublicKey> {
if network_id == NetworkId::Serai {
Babe::authorities()
.into_iter()
.map(|(id, _)| id.into_inner())
.collect()
} else {
ValidatorSets::participants_for_latest_decided_set(network_id)
.map_or(
vec![],
|vec| vec.into_inner().into_iter().map(|(validator, _)| validator).collect()
)
}
}
}
}

View file

@ -124,8 +124,9 @@ pub mod pallet {
#[pallet::getter(fn allocation_per_key_share)]
pub type AllocationPerKeyShare<T: Config> =
StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
/// The validators selected to be in-set who haven't been removed.
/// The validators selected to be in-set.
#[pallet::storage]
#[pallet::getter(fn participants_for_latest_decided_set)]
pub(crate) type Participants<T: Config> = StorageMap<
_,
Identity,

View file

@ -19,6 +19,8 @@ workspace = true
[dependencies]
hex = "0.4"
async-trait = "0.1"
async-recursion = "1"
zeroize = { version = "1", default-features = false }
rand_core = { version = "0.6", default-features = false }

View file

@ -1,9 +1,8 @@
#![allow(clippy::needless_pass_by_ref_mut)] // False positives
use std::{
sync::{OnceLock, Arc, Mutex},
sync::{OnceLock, Arc},
time::Duration,
fs,
};
use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex};
@ -27,16 +26,11 @@ use serai_message_queue::{Service, Metadata, client::MessageQueue};
use serai_client::{primitives::Signature, Serai};
use dockertest::{
PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, StartPolicy,
TestBodySpecification, DockerOperations,
};
use dockertest::{PullPolicy, Image, TestBodySpecification, DockerOperations};
#[cfg(test)]
mod tests;
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
pub fn coordinator_instance(
name: &str,
message_queue_key: <Ristretto as Ciphersuite>::F,
@ -81,78 +75,6 @@ pub fn serai_composition(name: &str) -> TestBodySpecification {
.set_publish_all_ports(true)
}
pub type Handles = (String, String, String);
pub fn coordinator_stack(
name: &str,
) -> (Handles, <Ristretto as Ciphersuite>::F, Vec<TestBodySpecification>) {
let serai_composition = serai_composition(name);
let (coord_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();
let coordinator_composition = coordinator_instance(name, coord_key);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0));
let mut unique_id_lock = unique_id_mutex.lock().unwrap();
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", "coordinator"]
.iter()
.collect::<std::path::PathBuf>();
if first {
let _ = fs::remove_dir_all(&logs_path);
fs::create_dir_all(&logs_path).expect("couldn't create logs directory");
assert!(
fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(),
"logs folder wasn't empty, despite removing it at the start of the run",
);
}
let logs_path = logs_path.to_str().unwrap().to_string();
let mut compositions = vec![];
let mut handles = vec![];
for (name, composition) in [
("serai_node", serai_composition),
("message_queue", message_queue_composition),
("coordinator", coordinator_composition),
] {
let handle = format!("coordinator-{name}-{unique_id}");
compositions.push(
composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options(
Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
}),
),
);
handles.push(handle);
}
let coordinator_composition = compositions.last_mut().unwrap();
coordinator_composition.inject_container_name(handles[0].clone(), "SERAI_HOSTNAME");
coordinator_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC");
(
(handles[0].clone(), handles[1].clone(), handles[2].clone()),
message_queue_keys[&NetworkId::Bitcoin],
compositions,
)
}
fn is_cosign_message(msg: &CoordinatorMessage) -> bool {
matches!(
msg,
@ -176,15 +98,19 @@ fn is_cosign_message(msg: &CoordinatorMessage) -> bool {
)
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Handles {
pub(crate) serai: String,
pub(crate) message_queue: String,
}
#[derive(Clone)]
pub struct Processor {
network: NetworkId,
serai_rpc: String,
#[allow(unused)]
message_queue_handle: String,
#[allow(unused)]
coordinator_handle: String,
handles: Handles,
queue: Arc<AsyncMutex<(u64, u64, MessageQueue)>>,
abort_handle: Option<Arc<AbortHandle>>,
@ -205,14 +131,14 @@ impl Processor {
raw_i: u8,
network: NetworkId,
ops: &DockerOperations,
handles: (String, String, String),
handles: Handles,
processor_key: <Ristretto as Ciphersuite>::F,
) -> Processor {
let message_queue_rpc = ops.handle(&handles.1).host_port(2287).unwrap();
let message_queue_rpc = ops.handle(&handles.message_queue).host_port(2287).unwrap();
let message_queue_rpc = format!("{}:{}", message_queue_rpc.0, message_queue_rpc.1);
// Sleep until the Substrate RPC starts
let serai_rpc = ops.handle(&handles.0).host_port(9944).unwrap();
let serai_rpc = ops.handle(&handles.serai).host_port(9944).unwrap();
let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1);
// Bound execution to 60 seconds
for _ in 0 .. 60 {
@ -231,8 +157,7 @@ impl Processor {
network,
serai_rpc,
message_queue_handle: handles.1,
coordinator_handle: handles.2,
handles,
queue: Arc::new(AsyncMutex::new((
0,

View file

@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::Duration,
collections::{HashSet, HashMap},
};
@ -261,43 +260,21 @@ pub async fn batch(
#[tokio::test]
async fn batch_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
let (processor_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
batch(
&mut processors,
&processor_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: BlockHash([0x22; 32]),
instructions: vec![],
},
)
.await;
})
new_test(|mut processors: Vec<Processor>| async move {
let (processor_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
batch(
&mut processors,
&processor_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: BlockHash([0x22; 32]),
instructions: vec![],
},
)
.await;
})
.await;
}

View file

@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::{Duration, SystemTime},
collections::HashMap,
};
@ -221,30 +220,8 @@ pub async fn key_gen<C: Ciphersuite>(
#[tokio::test]
async fn key_gen_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
key_gen::<Secp256k1>(&mut processors).await;
})
.await;
new_test(|mut processors: Vec<Processor>| async move {
key_gen::<Secp256k1>(&mut processors).await;
})
.await;
}

View file

@ -1,8 +1,14 @@
use std::sync::OnceLock;
use core::future::Future;
use std::{sync::OnceLock, collections::HashMap};
use ciphersuite::Ristretto;
use tokio::sync::Mutex;
use dockertest::DockerTest;
use dockertest::{
LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification,
DockerOperations, DockerTest,
};
use serai_docker_tests::fresh_logs_folder;
use crate::*;
@ -19,13 +25,28 @@ pub use sign::sign;
pub(crate) const COORDINATORS: usize = 4;
pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;
pub(crate) static ONE_AT_A_TIME: OnceLock<Mutex<()>> = OnceLock::new();
// Provide a unique ID and ensures only one invocation occurs at a time.
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
#[async_trait::async_trait]
pub(crate) trait TestBody: 'static + Send + Sync {
async fn body(&self, processors: Vec<Processor>);
}
#[async_trait::async_trait]
impl<F: Send + Future, TB: 'static + Send + Sync + Fn(Vec<Processor>) -> F> TestBody for TB {
async fn body(&self, processors: Vec<Processor>) {
(self)(processors).await;
}
}
pub(crate) async fn new_test(test_body: impl TestBody) {
let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;
pub(crate) fn new_test() -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, DockerTest) {
let mut coordinators = vec![];
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
let mut coordinator_compositions = vec![];
for i in 0 .. COORDINATORS {
let (handles, coord_key, compositions) = coordinator_stack(match i {
let name = match i {
0 => "Alice",
1 => "Bob",
2 => "Charlie",
@ -33,13 +54,158 @@ pub(crate) fn new_test() -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, Dock
4 => "Eve",
5 => "Ferdie",
_ => panic!("needed a 7th name for a serai node"),
});
coordinators.push((handles, coord_key));
};
let serai_composition = serai_composition(name);
let (processor_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();
let coordinator_composition = coordinator_instance(name, processor_key);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = fresh_logs_folder(first, "coordinator");
let mut compositions = vec![];
let mut handles = HashMap::new();
for (name, composition) in [
("serai_node", serai_composition),
("message_queue", message_queue_composition),
("coordinator", coordinator_composition),
] {
let handle = format!("coordinator-{name}-{unique_id}");
compositions.push(
composition
.set_start_policy(StartPolicy::Strict)
.set_handle(handle.clone())
.set_log_options(Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
})),
);
handles.insert(name, handle);
}
let processor_key = message_queue_keys[&NetworkId::Bitcoin];
coordinators.push((
Handles {
serai: handles.remove("serai_node").unwrap(),
message_queue: handles.remove("message_queue").unwrap(),
},
processor_key,
));
coordinator_compositions.push(compositions.pop().unwrap());
for composition in compositions {
test.provide_container(composition);
}
}
(coordinators, test)
struct Context {
pending_coordinator_compositions: Mutex<Vec<TestBodySpecification>>,
handles_and_keys: Vec<(Handles, <Ristretto as Ciphersuite>::F)>,
test_body: Box<dyn TestBody>,
}
static CONTEXT: OnceLock<Mutex<Option<Context>>> = OnceLock::new();
*CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context {
pending_coordinator_compositions: Mutex::new(coordinator_compositions),
handles_and_keys: coordinators,
test_body: Box::new(test_body),
});
// The DockerOperations from the first invocation, containing the Message Queue servers and the
// Serai nodes.
static OUTER_OPS: OnceLock<Mutex<Option<DockerOperations>>> = OnceLock::new();
// Reset OUTER_OPS
*OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;
// Spawns a coordinator, if one has yet to be spawned, or else runs the test.
#[async_recursion::async_recursion]
async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) {
// If the outer operations have yet to be set, these *are* the outer operations
let outer_ops = OUTER_OPS.get().unwrap();
if outer_ops.lock().await.is_none() {
*outer_ops.lock().await = Some(inner_ops);
}
let context_lock = CONTEXT.get().unwrap().lock().await;
let Context { pending_coordinator_compositions, handles_and_keys: coordinators, test_body } =
context_lock.as_ref().unwrap();
// Check if there is a coordinator left
let maybe_coordinator = {
let mut remaining = pending_coordinator_compositions.lock().await;
let maybe_coordinator = if !remaining.is_empty() {
let handles = coordinators[coordinators.len() - remaining.len()].0.clone();
let composition = remaining.remove(0);
Some((composition, handles))
} else {
None
};
drop(remaining);
maybe_coordinator
};
if let Some((mut composition, handles)) = maybe_coordinator {
let network = {
let outer_ops = outer_ops.lock().await;
let outer_ops = outer_ops.as_ref().unwrap();
// Spawn it by building another DockerTest which recursively calls this function
// TODO: Spawn this outside of DockerTest so we can remove the recursion
let serai_container = outer_ops.handle(&handles.serai);
composition.modify_env("SERAI_HOSTNAME", serai_container.ip());
let message_queue_container = outer_ops.handle(&handles.message_queue);
composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip());
format!("container:{}", serai_container.name())
};
let mut test = DockerTest::new().with_network(dockertest::Network::External(network));
test.provide_container(composition);
drop(context_lock);
test.run_async(spawn_coordinator_or_run_test).await;
} else {
let outer_ops = outer_ops.lock().await.take().unwrap();
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC we can query
tokio::time::sleep(Duration::from_secs(60)).await;
// Connect to the Message Queues as the processor
let mut processors: Vec<Processor> = vec![];
for (i, (handles, key)) in coordinators.iter().enumerate() {
processors.push(
Processor::new(
i.try_into().unwrap(),
NetworkId::Bitcoin,
&outer_ops,
handles.clone(),
*key,
)
.await,
);
}
test_body.body(processors).await;
}
}
test.run_async(spawn_coordinator_or_run_test).await;
}
// TODO: Don't use a pessimistic sleep

View file

@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::Duration,
collections::{HashSet, HashMap},
};
@ -169,186 +168,161 @@ pub async fn sign(
#[tokio::test]
async fn sign_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
new_test(|mut processors: Vec<Processor>| async move {
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
let (serai_pair, serai_addr) = {
let mut name = [0; 4];
OsRng.fill_bytes(&mut name);
let pair = insecure_pair_from_name(&hex::encode(name));
let address = SeraiAddress::from(pair.public());
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
let (serai_pair, serai_addr) = {
let mut name = [0; 4];
OsRng.fill_bytes(&mut name);
let pair = insecure_pair_from_name(&hex::encode(name));
let address = SeraiAddress::from(pair.public());
// Fund the new account to pay for fees
let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };
serai
.publish(&serai.sign(
&insecure_pair_from_name("Ferdie"),
SeraiCoins::transfer(address, balance),
0,
Default::default(),
))
.await
.unwrap();
(pair, address)
};
#[allow(clippy::inconsistent_digit_grouping)]
let amount = Amount(1_000_000_00);
let balance = Balance { coin: Coin::Bitcoin, amount };
let coin_block = BlockHash([0x33; 32]);
let block_included_in = batch(
&mut processors,
&participant_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: coin_block,
instructions: vec![InInstructionWithBalance {
instruction: InInstruction::Transfer(serai_addr),
balance,
}],
},
)
.await;
{
let block_included_in_hash =
serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash();
let serai = serai.as_of(block_included_in_hash);
let serai = serai.coins();
assert_eq!(
serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(),
Amount(1_000_000_000)
);
// Verify the mint occurred as expected
assert_eq!(
serai.mint_events().await.unwrap(),
vec![CoinsEvent::Mint { to: serai_addr, balance }]
);
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount);
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount);
}
// Trigger a burn
let out_instruction = OutInstructionWithBalance {
balance,
instruction: OutInstruction {
address: ExternalAddress::new(b"external".to_vec()).unwrap(),
data: None,
},
};
// Fund the new account to pay for fees
let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };
serai
.publish(&serai.sign(
&serai_pair,
SeraiCoins::burn_with_instruction(out_instruction.clone()),
&insecure_pair_from_name("Ferdie"),
SeraiCoins::transfer(address, balance),
0,
Default::default(),
))
.await
.unwrap();
// TODO: We *really* need a helper for this pattern
let mut last_serai_block = block_included_in;
'outer: for _ in 0 .. 20 {
tokio::time::sleep(Duration::from_secs(6)).await;
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(6)).await;
}
(pair, address)
};
while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {
let burn_events = serai
.as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())
.coins()
.burn_with_instruction_events()
.await
.unwrap();
#[allow(clippy::inconsistent_digit_grouping)]
let amount = Amount(1_000_000_00);
let balance = Balance { coin: Coin::Bitcoin, amount };
if !burn_events.is_empty() {
assert_eq!(burn_events.len(), 1);
assert_eq!(
burn_events[0],
CoinsEvent::BurnWithInstruction {
from: serai_addr,
instruction: out_instruction.clone()
}
);
break 'outer;
}
last_serai_block += 1;
}
}
let last_serai_block =
serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();
let last_serai_block_hash = last_serai_block.hash();
let serai = serai.as_of(last_serai_block_hash);
let serai = serai.coins();
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0));
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0));
let mut plan_id = [0; 32];
OsRng.fill_bytes(&mut plan_id);
let plan_id = plan_id;
// We should now get a SubstrateBlock
for processor in &mut processors {
assert_eq!(
processor.recv_message().await,
messages::CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: last_serai_block.time().unwrap() / 1000,
network_latest_finalized_block: coin_block,
},
block: last_serai_block.number(),
burns: vec![out_instruction.clone()],
batches: vec![],
}
)
);
// Send the ACK, claiming there's a plan to sign
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
block: last_serai_block.number(),
plans: vec![PlanMeta { session: Session(0), id: plan_id }],
},
))
.await;
}
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
let coin_block = BlockHash([0x33; 32]);
let block_included_in = batch(
&mut processors,
&participant_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: coin_block,
instructions: vec![InInstructionWithBalance {
instruction: InInstruction::Transfer(serai_addr),
balance,
}],
},
)
.await;
{
let block_included_in_hash =
serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash();
let serai = serai.as_of(block_included_in_hash);
let serai = serai.coins();
assert_eq!(serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), Amount(1_000_000_000));
// Verify the mint occurred as expected
assert_eq!(
serai.mint_events().await.unwrap(),
vec![CoinsEvent::Mint { to: serai_addr, balance }]
);
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount);
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount);
}
// Trigger a burn
let out_instruction = OutInstructionWithBalance {
balance,
instruction: OutInstruction {
address: ExternalAddress::new(b"external".to_vec()).unwrap(),
data: None,
},
};
serai
.publish(&serai.sign(
&serai_pair,
SeraiCoins::burn_with_instruction(out_instruction.clone()),
0,
Default::default(),
))
.await
.unwrap();
// TODO: We *really* need a helper for this pattern
let mut last_serai_block = block_included_in;
'outer: for _ in 0 .. 20 {
tokio::time::sleep(Duration::from_secs(6)).await;
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(6)).await;
}
while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {
let burn_events = serai
.as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())
.coins()
.burn_with_instruction_events()
.await
.unwrap();
if !burn_events.is_empty() {
assert_eq!(burn_events.len(), 1);
assert_eq!(
burn_events[0],
CoinsEvent::BurnWithInstruction {
from: serai_addr,
instruction: out_instruction.clone()
}
);
break 'outer;
}
last_serai_block += 1;
}
}
let last_serai_block =
serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();
let last_serai_block_hash = last_serai_block.hash();
let serai = serai.as_of(last_serai_block_hash);
let serai = serai.coins();
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0));
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0));
let mut plan_id = [0; 32];
OsRng.fill_bytes(&mut plan_id);
let plan_id = plan_id;
// We should now get a SubstrateBlock
for processor in &mut processors {
assert_eq!(
processor.recv_message().await,
messages::CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: last_serai_block.time().unwrap() / 1000,
network_latest_finalized_block: coin_block,
},
block: last_serai_block.number(),
burns: vec![out_instruction.clone()],
batches: vec![],
}
)
);
// Send the ACK, claiming there's a plan to sign
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
block: last_serai_block.number(),
plans: vec![PlanMeta { session: Session(0), id: plan_id }],
},
))
.await;
}
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
.await;
}

View file

@ -7,6 +7,21 @@ use std::{
process::Command,
};
pub fn fresh_logs_folder(first: bool, label: &str) -> String {
let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", label]
.iter()
.collect::<std::path::PathBuf>();
if first {
let _ = fs::remove_dir_all(&logs_path);
fs::create_dir_all(&logs_path).expect("couldn't create logs directory");
assert!(
fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(),
"logs folder wasn't empty, despite removing it at the start of the run",
);
}
logs_path.to_str().unwrap().to_string()
}
static BUILT: OnceLock<Mutex<HashMap<String, bool>>> = OnceLock::new();
pub fn build(name: String) {
let built = BUILT.get_or_init(|| Mutex::new(HashMap::new()));

View file

@ -19,6 +19,9 @@ workspace = true
[dependencies]
hex = "0.4"
async-trait = "0.1"
async-recursion = "1"
zeroize = { version = "1", default-features = false }
rand_core = { version = "0.6", default-features = false }
@ -38,6 +41,7 @@ serai-client = { path = "../../substrate/client", features = ["serai"] }
tokio = { version = "1", features = ["time"] }
dockertest = "0.4"
serai-docker-tests = { path = "../docker" }
serai-message-queue-tests = { path = "../message-queue" }
serai-processor-tests = { path = "../processor" }
serai-coordinator-tests = { path = "../coordinator" }

View file

@ -1,24 +1,14 @@
use std::{
sync::{OnceLock, Mutex},
time::Duration,
fs,
};
use std::time::Duration;
use serai_client::{primitives::NetworkId, Serai};
use serai_client::Serai;
use dockertest::{
LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, DockerOperations,
};
use dockertest::DockerOperations;
use serai_processor_tests::{RPC_USER, RPC_PASS};
#[cfg(test)]
mod tests;
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
use serai_processor_tests::{RPC_USER, RPC_PASS, network_instance, processor_instance};
use serai_message_queue_tests::instance as message_queue_instance;
use serai_coordinator_tests::{coordinator_instance, serai_composition};
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct Handles {
@ -27,108 +17,9 @@ pub struct Handles {
monero: (String, u32),
monero_processor: String,
message_queue: String,
coordinator: String,
serai: String,
}
pub fn full_stack(name: &str) -> (Handles, Vec<TestBodySpecification>) {
let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance();
let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin);
let bitcoin_processor_composition =
processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]);
let (monero_composition, monero_port) = network_instance(NetworkId::Monero);
let monero_processor_composition =
processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]);
let coordinator_composition = coordinator_instance(name, coord_key);
let serai_composition = serai_composition(name);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0));
let mut unique_id_lock = unique_id_mutex.lock().unwrap();
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", "full-stack"]
.iter()
.collect::<std::path::PathBuf>();
if first {
let _ = fs::remove_dir_all(&logs_path);
fs::create_dir_all(&logs_path).expect("couldn't create logs directory");
assert!(
fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(),
"logs folder wasn't empty, despite removing it at the start of the run",
);
}
let logs_path = logs_path.to_str().unwrap().to_string();
let mut compositions = vec![];
let mut handles = vec![];
for (name, composition) in [
("message_queue", message_queue_composition),
("bitcoin", bitcoin_composition),
("bitcoin_processor", bitcoin_processor_composition),
("monero", monero_composition),
("monero_processor", monero_processor_composition),
("coordinator", coordinator_composition),
("serai", serai_composition),
] {
let handle = format!("full_stack-{name}-{unique_id}");
compositions.push(
composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options(
Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
}),
),
);
handles.push(handle);
}
let handles = Handles {
message_queue: handles[0].clone(),
bitcoin: (handles[1].clone(), bitcoin_port),
bitcoin_processor: handles[2].clone(),
monero: (handles[3].clone(), monero_port),
monero_processor: handles[4].clone(),
coordinator: handles[5].clone(),
serai: handles[6].clone(),
};
{
let bitcoin_processor_composition = compositions.get_mut(2).unwrap();
bitcoin_processor_composition
.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC");
bitcoin_processor_composition
.inject_container_name(handles.bitcoin.0.clone(), "NETWORK_RPC_HOSTNAME");
}
{
let monero_processor_composition = compositions.get_mut(4).unwrap();
monero_processor_composition
.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC");
monero_processor_composition
.inject_container_name(handles.monero.0.clone(), "NETWORK_RPC_HOSTNAME");
}
let coordinator_composition = compositions.get_mut(5).unwrap();
coordinator_composition.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC");
coordinator_composition.inject_container_name(handles.serai.clone(), "SERAI_HOSTNAME");
(handles, compositions)
}
impl Handles {
pub async fn serai(&self, ops: &DockerOperations) -> Serai {
let serai_rpc = ops.handle(&self.serai).host_port(9944).unwrap();

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,19 @@
use std::sync::OnceLock;
use core::future::Future;
use std::{sync::OnceLock, collections::HashMap};
use dockertest::DockerTest;
use tokio::sync::Mutex;
use serai_client::primitives::NetworkId;
use dockertest::{
LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification,
DockerOperations, DockerTest,
};
use serai_docker_tests::fresh_logs_folder;
use serai_processor_tests::{network_instance, processor_instance};
use serai_message_queue_tests::instance as message_queue_instance;
use serai_coordinator_tests::{coordinator_instance, serai_composition};
use crate::*;
@ -9,13 +22,29 @@ mod mint_and_burn;
pub(crate) const VALIDATORS: usize = 4;
// pub(crate) const THRESHOLD: usize = ((VALIDATORS * 2) / 3) + 1;
pub(crate) static ONE_AT_A_TIME: OnceLock<Mutex<()>> = OnceLock::new();
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
pub(crate) fn new_test() -> (Vec<Handles>, DockerTest) {
let mut validators = vec![];
#[async_trait::async_trait]
pub(crate) trait TestBody: 'static + Send + Sync {
async fn body(&self, ops: DockerOperations, handles: Vec<Handles>);
}
#[async_trait::async_trait]
impl<F: Send + Future, TB: 'static + Send + Sync + Fn(DockerOperations, Vec<Handles>) -> F> TestBody
for TB
{
async fn body(&self, ops: DockerOperations, handles: Vec<Handles>) {
(self)(ops, handles).await;
}
}
pub(crate) async fn new_test(test_body: impl TestBody) {
let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;
let mut all_handles = vec![];
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
let mut coordinator_compositions = vec![];
for i in 0 .. VALIDATORS {
let (handles, compositions) = full_stack(match i {
let name = match i {
0 => "Alice",
1 => "Bob",
2 => "Charlie",
@ -23,11 +52,164 @@ pub(crate) fn new_test() -> (Vec<Handles>, DockerTest) {
4 => "Eve",
5 => "Ferdie",
_ => panic!("needed a 7th name for a serai node"),
});
validators.push(handles);
for composition in compositions {
};
let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance();
let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin);
let bitcoin_processor_composition =
processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]);
let (monero_composition, monero_port) = network_instance(NetworkId::Monero);
let monero_processor_composition =
processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]);
let coordinator_composition = coordinator_instance(name, coord_key);
let serai_composition = serai_composition(name);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = fresh_logs_folder(first, "full-stack");
let mut compositions = HashMap::new();
let mut handles = HashMap::new();
for (name, composition) in [
("message_queue", message_queue_composition),
("bitcoin", bitcoin_composition),
("bitcoin_processor", bitcoin_processor_composition),
("monero", monero_composition),
("monero_processor", monero_processor_composition),
("coordinator", coordinator_composition),
("serai", serai_composition),
] {
let handle = format!("full_stack-{name}-{unique_id}");
compositions.insert(
name,
composition
.set_start_policy(StartPolicy::Strict)
.set_handle(handle.clone())
.set_log_options(Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
})),
);
handles.insert(name, handle);
}
let handles = Handles {
message_queue: handles.remove("message_queue").unwrap(),
bitcoin: (handles.remove("bitcoin").unwrap(), bitcoin_port),
bitcoin_processor: handles.remove("bitcoin_processor").unwrap(),
monero: (handles.remove("monero").unwrap(), monero_port),
monero_processor: handles.remove("monero_processor").unwrap(),
serai: handles.remove("serai").unwrap(),
};
{
let bitcoin_processor_composition = compositions.get_mut("bitcoin_processor").unwrap();
bitcoin_processor_composition
.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC");
bitcoin_processor_composition
.inject_container_name(handles.bitcoin.0.clone(), "NETWORK_RPC_HOSTNAME");
}
{
let monero_processor_composition = compositions.get_mut("monero_processor").unwrap();
monero_processor_composition
.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC");
monero_processor_composition
.inject_container_name(handles.monero.0.clone(), "NETWORK_RPC_HOSTNAME");
}
coordinator_compositions.push(compositions.remove("coordinator").unwrap());
all_handles.push(handles);
for (_, composition) in compositions {
test.provide_container(composition);
}
}
(validators, test)
struct Context {
pending_coordinator_compositions: Mutex<Vec<TestBodySpecification>>,
handles: Vec<Handles>,
test_body: Box<dyn TestBody>,
}
static CONTEXT: OnceLock<Mutex<Option<Context>>> = OnceLock::new();
*CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context {
pending_coordinator_compositions: Mutex::new(coordinator_compositions),
handles: all_handles,
test_body: Box::new(test_body),
});
// The DockerOperations from the first invocation, containing the Message Queue servers and the
// Serai nodes.
static OUTER_OPS: OnceLock<Mutex<Option<DockerOperations>>> = OnceLock::new();
// Reset OUTER_OPS
*OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;
// Spawns a coordinator, if one has yet to be spawned, or else runs the test.
#[async_recursion::async_recursion]
async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) {
// If the outer operations have yet to be set, these *are* the outer operations
let outer_ops = OUTER_OPS.get().unwrap();
if outer_ops.lock().await.is_none() {
*outer_ops.lock().await = Some(inner_ops);
}
let context_lock = CONTEXT.get().unwrap().lock().await;
let Context { pending_coordinator_compositions, handles, test_body } =
context_lock.as_ref().unwrap();
// Check if there is a coordinator left
let maybe_coordinator = {
let mut remaining = pending_coordinator_compositions.lock().await;
let maybe_coordinator = if !remaining.is_empty() {
let handles = handles[handles.len() - remaining.len()].clone();
let composition = remaining.remove(0);
Some((composition, handles))
} else {
None
};
drop(remaining);
maybe_coordinator
};
if let Some((mut composition, handles)) = maybe_coordinator {
let network = {
let outer_ops = outer_ops.lock().await;
let outer_ops = outer_ops.as_ref().unwrap();
// Spawn it by building another DockerTest which recursively calls this function
// TODO: Spawn this outside of DockerTest so we can remove the recursion
let serai_container = outer_ops.handle(&handles.serai);
composition.modify_env("SERAI_HOSTNAME", serai_container.ip());
let message_queue_container = outer_ops.handle(&handles.message_queue);
composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip());
format!("container:{}", serai_container.name())
};
let mut test = DockerTest::new().with_network(dockertest::Network::External(network));
test.provide_container(composition);
drop(context_lock);
test.run_async(spawn_coordinator_or_run_test).await;
} else {
let outer_ops = outer_ops.lock().await.take().unwrap();
test_body.body(outer_ops, handles.clone()).await;
}
}
test.run_async(spawn_coordinator_or_run_test).await;
}