Merge branch 'serai-dex:develop' into develop

This commit is contained in:
Benny Fischer 2024-03-27 14:38:51 -07:00 committed by GitHub
commit ab028f6544
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
60 changed files with 1604 additions and 986 deletions

View file

@ -42,8 +42,8 @@ runs:
shell: bash shell: bash
run: | run: |
cargo install svm-rs cargo install svm-rs
svm install 0.8.16 svm install 0.8.25
svm use 0.8.16 svm use 0.8.25
# - name: Cache Rust # - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

392
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,7 @@
# solidity build outputs # Solidity build outputs
cache cache
artifacts artifacts
# Auto-generated ABI files
src/abi/schnorr.rs
src/abi/router.rs

View file

@ -30,6 +30,9 @@ ethers-core = { version = "2", default-features = false }
ethers-providers = { version = "2", default-features = false } ethers-providers = { version = "2", default-features = false }
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[build-dependencies]
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[dev-dependencies] [dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }

View file

@ -1,6 +1,20 @@
use std::process::Command;
use ethers_contract::Abigen;
fn main() { fn main() {
println!("cargo:rerun-if-changed=contracts"); println!("cargo:rerun-if-changed=contracts/*");
println!("cargo:rerun-if-changed=artifacts"); println!("cargo:rerun-if-changed=artifacts/*");
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
.unwrap()
.lines()
{
if let Some(version) = line.strip_prefix("Version: ") {
let version = version.split('+').next().unwrap();
assert_eq!(version, "0.8.25");
}
}
#[rustfmt::skip] #[rustfmt::skip]
let args = [ let args = [
@ -8,8 +22,21 @@ fn main() {
"-o", "./artifacts", "--overwrite", "-o", "./artifacts", "--overwrite",
"--bin", "--abi", "--bin", "--abi",
"--optimize", "--optimize",
"./contracts/Schnorr.sol" "./contracts/Schnorr.sol", "./contracts/Router.sol",
]; ];
assert!(Command::new("solc").args(args).status().unwrap().success());
assert!(std::process::Command::new("solc").args(args).status().unwrap().success()); Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/schnorr.rs")
.unwrap();
Abigen::new("Router", "./artifacts/Router.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/router.rs")
.unwrap();
} }

View file

@ -0,0 +1,90 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "./Schnorr.sol";
contract Router is Schnorr {
// Contract initializer
// TODO: Replace with a MuSig of the genesis validators
address public initializer;
// Nonce is incremented for each batch of transactions executed
uint256 public nonce;
// fixed parity for the public keys used in this contract
uint8 constant public KEY_PARITY = 27;
// current public key's x-coordinate
// note: this key must always use the fixed parity defined above
bytes32 public seraiKey;
struct OutInstruction {
address to;
uint256 value;
bytes data;
}
struct Signature {
bytes32 c;
bytes32 s;
}
// success is a uint256 representing a bitfield of transaction successes
event Executed(uint256 nonce, bytes32 batch, uint256 success);
// error types
error NotInitializer();
error AlreadyInitialized();
error InvalidKey();
error TooManyTransactions();
constructor() {
initializer = msg.sender;
}
// initSeraiKey can be called by the contract initializer to set the first
// public key, only if the public key has yet to be set.
function initSeraiKey(bytes32 _seraiKey) external {
if (msg.sender != initializer) revert NotInitializer();
if (seraiKey != 0) revert AlreadyInitialized();
if (_seraiKey == bytes32(0)) revert InvalidKey();
seraiKey = _seraiKey;
}
// updateSeraiKey validates the given Schnorr signature against the current public key,
// and if successful, updates the contract's public key to the given one.
function updateSeraiKey(
bytes32 _seraiKey,
Signature memory sig
) public {
if (_seraiKey == bytes32(0)) revert InvalidKey();
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
seraiKey = _seraiKey;
}
// execute accepts a list of transactions to execute as well as a Schnorr signature.
// if signature verification passes, the given transactions are executed.
// if signature verification fails, this function will revert.
function execute(
OutInstruction[] calldata transactions,
Signature memory sig
) public {
if (transactions.length > 256) revert TooManyTransactions();
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
// This prevents re-entrancy from causing double spends yet does allow
// out-of-order execution via re-entrancy
nonce++;
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
uint256 successes;
for(uint256 i = 0; i < transactions.length; i++) {
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
assembly {
successes := or(successes, shl(i, success))
}
}
emit Executed(nonce, message, successes);
}
}

View file

@ -1,4 +1,4 @@
//SPDX-License-Identifier: AGPLv3 // SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
// see https://github.com/noot/schnorr-verify for implementation details // see https://github.com/noot/schnorr-verify for implementation details
@ -7,29 +7,32 @@ contract Schnorr {
uint256 constant public Q = uint256 constant public Q =
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
error InvalidSOrA();
error InvalidSignature();
// parity := public key y-coord parity (27 or 28) // parity := public key y-coord parity (27 or 28)
// px := public key x-coord // px := public key x-coord
// message := 32-byte message // message := 32-byte hash of the message
// c := schnorr signature challenge
// s := schnorr signature // s := schnorr signature
// e := schnorr signature challenge
function verify( function verify(
uint8 parity, uint8 parity,
bytes32 px, bytes32 px,
bytes32 message, bytes32 message,
bytes32 s, bytes32 c,
bytes32 e bytes32 s
) public view returns (bool) { ) public view returns (bool) {
// ecrecover = (m, v, r, s); // ecrecover = (m, v, r, s);
bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
require(sp != 0); if (sa == 0) revert InvalidSOrA();
// the ecrecover precompile implementation checks that the `r` and `s` // the ecrecover precompile implementation checks that the `r` and `s`
// inputs are non-zero (in this case, `px` and `ep`), thus we don't need to // inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
// check if they're zero.will make me // check if they're zero.
address R = ecrecover(sp, parity, px, ep); address R = ecrecover(sa, parity, px, ca);
require(R != address(0), "ecrecover failed"); if (R == address(0)) revert InvalidSignature();
return e == keccak256( return c == keccak256(
abi.encodePacked(R, uint8(parity), px, block.chainid, message) abi.encodePacked(R, uint8(parity), px, block.chainid, message)
); );
} }

View file

@ -0,0 +1,6 @@
#[rustfmt::skip]
#[allow(clippy::all)]
pub(crate) mod schnorr;
#[rustfmt::skip]
#[allow(clippy::all)]
pub(crate) mod router;

View file

@ -1,36 +0,0 @@
use thiserror::Error;
use eyre::{eyre, Result};
use ethers_providers::{Provider, Http};
use ethers_contract::abigen;
use crate::crypto::ProcessedSignature;
#[derive(Error, Debug)]
pub enum EthereumError {
#[error("failed to verify Schnorr signature")]
VerificationError,
}
abigen!(Schnorr, "./artifacts/Schnorr.abi");
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
params: &ProcessedSignature,
) -> Result<()> {
if contract
.verify(
params.parity + 27,
params.px.to_bytes().into(),
params.message,
params.s.to_bytes().into(),
params.e.to_bytes().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(EthereumError::VerificationError))
}
}

View file

@ -1,50 +1,54 @@
use sha3::{Digest, Keccak256}; use sha3::{Digest, Keccak256};
use group::Group; use group::ff::PrimeField;
use k256::{ use k256::{
elliptic_curve::{ elliptic_curve::{
bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint, bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
}, },
AffinePoint, ProjectivePoint, Scalar, U256, ProjectivePoint, Scalar, U256,
}; };
use frost::{algorithm::Hram, curve::Secp256k1}; use frost::{
algorithm::{Hram, SchnorrSignature},
curve::Secp256k1,
};
pub fn keccak256(data: &[u8]) -> [u8; 32] { pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
Keccak256::digest(data).into() Keccak256::digest(data).into()
} }
pub fn hash_to_scalar(data: &[u8]) -> Scalar { pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
}
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
let encoded_point = point.to_encoded_point(false); let encoded_point = point.to_encoded_point(false);
keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap() // Last 20 bytes of the hash of the concatenated x and y coordinates
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
} }
pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { #[allow(non_snake_case)]
if r.is_zero().into() || s.is_zero().into() { pub struct PublicKey {
return None; pub A: ProjectivePoint,
pub px: Scalar,
pub parity: u8,
}
impl PublicKey {
#[allow(non_snake_case)]
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
let affine = A.to_affine();
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
if parity != 27 {
None?;
} }
#[allow(non_snake_case)] let x_coord = affine.x();
let R = AffinePoint::decompress(&r.to_bytes(), v.into()); let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
#[allow(non_snake_case)] // Return None if a reduction would occur
if let Some(R) = Option::<AffinePoint>::from(R) { if x_coord_scalar.to_repr() != x_coord {
#[allow(non_snake_case)] None?;
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
} }
None Some(PublicKey { A, px: x_coord_scalar, parity })
}
} }
#[derive(Clone, Default)] #[derive(Clone, Default)]
@ -55,53 +59,33 @@ impl Hram<Secp256k1> for EthereumHram {
let a_encoded_point = A.to_encoded_point(true); let a_encoded_point = A.to_encoded_point(true);
let mut a_encoded = a_encoded_point.as_ref().to_owned(); let mut a_encoded = a_encoded_point.as_ref().to_owned();
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
let mut data = address(R).to_vec(); let mut data = address(R).to_vec();
data.append(&mut a_encoded); data.append(&mut a_encoded);
data.append(&mut m.to_vec()); data.extend(m);
Scalar::reduce(U256::from_be_slice(&keccak256(&data))) Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
} }
} }
pub struct ProcessedSignature { pub struct Signature {
pub s: Scalar, pub(crate) c: Scalar,
pub px: Scalar, pub(crate) s: Scalar,
pub parity: u8,
pub message: [u8; 32],
pub e: Scalar,
} }
impl Signature {
#[allow(non_snake_case)] pub fn new(
pub fn preprocess_signature_for_ecrecover( public_key: &PublicKey,
m: [u8; 32],
R: &ProjectivePoint,
s: Scalar,
A: &ProjectivePoint,
chain_id: U256, chain_id: U256,
) -> (Scalar, Scalar) { m: &[u8],
let processed_sig = process_signature_for_contract(m, R, s, A, chain_id); signature: SchnorrSignature<Secp256k1>,
let sr = processed_sig.s.mul(&processed_sig.px).negate(); ) -> Option<Signature> {
let er = processed_sig.e.mul(&processed_sig.px).negate(); let c = EthereumHram::hram(
(sr, er) &signature.R,
} &public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
#[allow(non_snake_case)] );
pub fn process_signature_for_contract( if !signature.verify(public_key.A, c) {
m: [u8; 32], None?;
R: &ProjectivePoint, }
s: Scalar, Some(Signature { c, s: signature.s })
A: &ProjectivePoint,
chain_id: U256,
) -> ProcessedSignature {
let encoded_pk = A.to_encoded_point(true);
let px = &encoded_pk.as_ref()[1 .. 33];
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
ProcessedSignature {
s,
px: px_scalar,
parity: &encoded_pk.as_ref()[0] - 2,
#[allow(non_snake_case)]
message: m,
e,
} }
} }

View file

@ -1,2 +1,16 @@
pub mod contract; use thiserror::Error;
pub mod crypto; pub mod crypto;
pub(crate) mod abi;
pub mod schnorr;
pub mod router;
#[cfg(test)]
mod tests;
#[derive(Error, Debug)]
pub enum Error {
#[error("failed to verify Schnorr signature")]
InvalidSignature,
}

View file

@ -0,0 +1,30 @@
pub use crate::abi::router::*;
/*
use crate::crypto::{ProcessedSignature, PublicKey};
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
use eyre::Result;
use std::{convert::From, fs::File, sync::Arc};
pub async fn router_update_public_key<M: Middleware + 'static>(
contract: &Router<M>,
public_key: &PublicKey,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
}
pub async fn router_execute<M: Middleware + 'static>(
contract: &Router<M>,
txs: Vec<Rtransaction>,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.execute(txs, signature.into()).send();
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
}
*/

View file

@ -0,0 +1,34 @@
use eyre::{eyre, Result};
use group::ff::PrimeField;
use ethers_providers::{Provider, Http};
use crate::{
Error,
crypto::{keccak256, PublicKey, Signature},
};
pub use crate::abi::schnorr::*;
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<()> {
if contract
.verify(
public_key.parity,
public_key.px.to_repr().into(),
keccak256(message),
signature.c.to_repr().into(),
signature.s.to_repr().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(Error::InvalidSignature))
}
}

View file

@ -0,0 +1,132 @@
use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use group::Group;
use k256::{
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
U256, Scalar, AffinePoint, ProjectivePoint,
};
use frost::{
curve::Secp256k1,
algorithm::{Hram, IetfSchnorr},
tests::{algorithm_machines, sign},
};
use crate::{crypto::*, tests::key_gen};
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
}
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
return None;
}
#[allow(non_snake_case)]
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
#[allow(non_snake_case)]
if let Some(R) = Option::<AffinePoint>::from(R) {
#[allow(non_snake_case)]
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
}
None
}
#[test]
fn test_ecrecover() {
let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private);
// Sign the signature
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
.unwrap();
// Sanity check the signature verifies
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
}
// Perform the ecrecover
assert_eq!(
ecrecover(
hash_to_scalar(MESSAGE),
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
*sig.r(),
*sig.s()
)
.unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
);
}
// Run the sign test with the EthereumHram
#[test]
fn test_signing() {
let (keys, _) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
}
#[allow(non_snake_case)]
pub fn preprocess_signature_for_ecrecover(
R: ProjectivePoint,
public_key: &PublicKey,
chain_id: U256,
m: &[u8],
s: Scalar,
) -> (u8, Scalar, Scalar) {
let c = EthereumHram::hram(
&R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
let sa = -(s * public_key.px);
let ca = -(c * public_key.px);
(public_key.parity, sa, ca)
}
#[test]
fn test_ecrecover_hack() {
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let (parity, sa, ca) =
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
assert_eq!(q, address(&sig.R));
}

View file

@ -0,0 +1,92 @@
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use k256::{Scalar, ProjectivePoint};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
use ethers_core::{
types::{H160, Signature as EthersSignature},
abi::Abi,
};
use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use crate::crypto::PublicKey;
mod crypto;
mod schnorr;
mod router;
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
let mut offset = Scalar::ZERO;
while PublicKey::new(group_key).is_none() {
offset += Scalar::ONE;
group_key += ProjectivePoint::GENERATOR;
}
for keys in keys.values_mut() {
*keys = keys.offset(offset);
}
let public_key = PublicKey::new(group_key).unwrap();
(keys, public_key)
}
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
// to fund the deployer, not create/pass a wallet
// TODO: Deterministic deployments across chains
pub async fn deploy_contract(
chain_id: u32,
client: Arc<Provider<Http>>,
wallet: &k256::ecdsa::SigningKey,
name: &str,
) -> eyre::Result<H160> {
let abi: Abi =
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let mut deployment_tx = factory.deploy(())?.tx;
deployment_tx.set_chain_id(chain_id);
deployment_tx.set_gas(1_000_000);
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
// EIP-155 v
let mut v = u64::from(rid.to_byte());
assert!((v == 0) || (v == 1));
v += u64::from((chain_id * 2) + 35);
let r = sig.r().to_repr();
let r_ref: &[u8] = r.as_ref();
let s = sig.s().to_repr();
let s_ref: &[u8] = s.as_ref();
let deployment_tx =
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
Ok(receipt.contract_address.unwrap())
}

View file

@ -0,0 +1,109 @@
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use frost::{
curve::Secp256k1,
Participant, ThresholdKeys,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use ethers_core::{
types::{H160, U256, Bytes},
abi::AbiEncode,
utils::{Anvil, AnvilInstance},
};
use ethers_providers::{Middleware, Provider, Http};
use crate::{
crypto::{keccak256, PublicKey, EthereumHram, Signature},
router::{self, *},
tests::{key_gen, deploy_contract},
};
async fn setup_test() -> (
u32,
AnvilInstance,
Router<Provider<Http>>,
HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey,
) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
let contract = Router::new(contract_address, client.clone());
let (keys, public_key) = key_gen();
// Set the key to the threshold keys
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
let pending_tx = tx.send().await.unwrap();
let receipt = pending_tx.await.unwrap().unwrap();
assert!(receipt.status == Some(1.into()));
(chain_id, anvil, contract, keys, public_key)
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
}
pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey,
chain_id: U256,
message: &[u8],
) -> Signature {
let hashed_message = keccak256(message);
let mut chain_id_bytes = [0; 32];
chain_id.to_big_endian(&mut chain_id_bytes);
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, keys),
full_message,
);
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
}
#[tokio::test]
async fn test_router_execute() {
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
let to = H160([0u8; 20]);
let value = U256([0u64; 4]);
let data = Bytes::from([0]);
let tx = OutInstruction { to, value, data: data.clone() };
let nonce_call = contract.nonce();
let nonce = nonce_call.call().await.unwrap();
let encoded =
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
let tx = contract
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
.gas(300_000);
let pending_tx = tx.send().await.unwrap();
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
assert!(receipt.status == Some(1.into()));
println!("gas used: {:?}", receipt.cumulative_gas_used);
println!("logs: {:?}", receipt.logs);
}

View file

@ -0,0 +1,67 @@
use std::{convert::TryFrom, sync::Arc};
use rand_core::OsRng;
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
use ethers_providers::{Middleware, Provider, Http};
use frost::{
curve::Secp256k1,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use crate::{
crypto::*,
schnorr::*,
tests::{key_gen, deploy_contract},
};
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
let contract = Schnorr::new(contract_address, client.clone());
(chain_id, anvil, contract)
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (chain_id, _anvil, contract) = setup_test().await;
let chain_id = U256::from(chain_id);
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
// Test an invalid signature fails
let mut sig = sig;
sig.s += Scalar::ONE;
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
}

View file

@ -1,128 +0,0 @@
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
use rand_core::OsRng;
use ::k256::{
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
U256,
};
use ethers_core::{
types::Signature,
abi::Abi,
utils::{keccak256, Anvil, AnvilInstance},
};
use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use frost::{
curve::Secp256k1,
Participant,
algorithm::IetfSchnorr,
tests::{key_gen, algorithm_machines, sign},
};
use ethereum_serai::{
crypto,
contract::{Schnorr, call_verify},
};
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
// to fund the deployer, not create/pass a wallet
pub async fn deploy_schnorr_verifier_contract(
chain_id: u32,
client: Arc<Provider<Http>>,
wallet: &k256::ecdsa::SigningKey,
) -> eyre::Result<Schnorr<Provider<Http>>> {
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let mut deployment_tx = factory.deploy(())?.tx;
deployment_tx.set_chain_id(chain_id);
deployment_tx.set_gas(500_000);
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
// EIP-155 v
let mut v = u64::from(rid.to_byte());
assert!((v == 0) || (v == 1));
v += u64::from((chain_id * 2) + 35);
let r = sig.r().to_repr();
let r_ref: &[u8] = r.as_ref();
let s = sig.s().to_repr();
let s_ref: &[u8] = s.as_ref();
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
Ok(contract)
}
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
let anvil = Anvil::new().spawn();
let provider =
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
}
#[tokio::test]
async fn test_deploy_contract() {
deploy_test_contract().await;
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (chain_id, _anvil, contract) = deploy_test_contract().await;
let chain_id = U256::from(chain_id);
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let mut processed_sig =
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
call_verify(&contract, &processed_sig).await.unwrap();
// test invalid signature fails
processed_sig.message[0] = 0;
assert!(call_verify(&contract, &processed_sig).await.is_err());
}

View file

@ -1,87 +0,0 @@
use k256::{
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
ProjectivePoint, Scalar, U256,
};
use frost::{curve::Secp256k1, Participant};
use ethereum_serai::crypto::*;
#[test]
fn test_ecrecover() {
use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private);
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
.unwrap();
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
}
assert_eq!(
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
.unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
);
}
#[test]
fn test_signing() {
use frost::{
algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign},
};
use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
}
#[test]
fn test_ecrecover_hack() {
use frost::{
algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign},
};
use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
let group_key_encoded = group_key.to_encoded_point(true);
let group_key_compressed = group_key_encoded.as_ref();
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let (sr, er) =
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
assert_eq!(q, address(&sig.R));
}

View file

@ -1,2 +0,0 @@
mod contract;
mod crypto;

View file

@ -199,6 +199,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
l: Participant, l: Participant,
addendum: ClsagAddendum, addendum: ClsagAddendum,
) -> Result<(), FrostError> { ) -> Result<(), FrostError> {
// TODO: This check is faulty if two shares are additive inverses of each other
if self.image.is_identity().into() { if self.image.is_identity().into() {
self.transcript.domain_separate(b"CLSAG"); self.transcript.domain_separate(b"CLSAG");
self.input().transcript(&mut self.transcript); self.input().transcript(&mut self.transcript);

View file

@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
.unwrap(); .unwrap();
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap(); let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
// TODO: Needs https://github.com/monero-project/monero/pull/8882 // TODO: Needs https://github.com/monero-project/monero/pull/9260
// let fee_rate = daemon_rpc // let fee_rate = daemon_rpc
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant) // .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
// .await // .await
@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap(); let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
// TODO: Needs https://github.com/monero-project/monero/pull/8882 // TODO: Needs https://github.com/monero-project/monero/pull/9260
// runner::check_weight_and_fee(&tx, fee_rate); // runner::check_weight_and_fee(&tx, fee_rate);
match spec { match spec {

View file

@ -1,44 +1,51 @@
use std::sync::Arc; use std::sync::Arc;
use rocksdb::{ use rocksdb::{
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, Transaction, Options, DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
TransactionDB, Transaction as RocksTransaction, Options, OptimisticTransactionDB,
}; };
use crate::*; use crate::*;
impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> { pub struct Transaction<'a, T: ThreadMode>(
RocksTransaction<'a, OptimisticTransactionDB<T>>,
&'a OptimisticTransactionDB<T>,
);
impl<T: ThreadMode> Get for Transaction<'_, T> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> { fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.get(key).expect("couldn't read from RocksDB via transaction") self.0.get(key).expect("couldn't read from RocksDB via transaction")
} }
} }
impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> { impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction") self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
} }
fn del(&mut self, key: impl AsRef<[u8]>) { fn del(&mut self, key: impl AsRef<[u8]>) {
self.delete(key).expect("couldn't delete from RocksDB via transaction") self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
} }
fn commit(self) { fn commit(self) {
Transaction::commit(self).expect("couldn't commit to RocksDB via transaction") self.0.commit().expect("couldn't commit to RocksDB via transaction");
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
self.1.flush().expect("couldn't flush RocksDB");
} }
} }
impl<T: ThreadMode> Get for Arc<TransactionDB<T>> { impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> { fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
TransactionDB::get(self, key).expect("couldn't read from RocksDB") OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
} }
} }
impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> { impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
type Transaction<'a> = Transaction<'a, TransactionDB<T>>; type Transaction<'a> = Transaction<'a, T>;
fn txn(&mut self) -> Self::Transaction<'_> { fn txn(&mut self) -> Self::Transaction<'_> {
let mut opts = WriteOptions::default(); let mut opts = WriteOptions::default();
opts.set_sync(true); opts.set_sync(true);
self.transaction_opt(&opts, &Default::default()) Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
} }
} }
pub type RocksDB = Arc<TransactionDB<SingleThreaded>>; pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
pub fn new_rocksdb(path: &str) -> RocksDB { pub fn new_rocksdb(path: &str) -> RocksDB {
let mut options = Options::default(); let mut options = Options::default();
options.create_if_missing(true); options.create_if_missing(true);
@ -54,5 +61,5 @@ pub fn new_rocksdb(path: &str) -> RocksDB {
options.set_max_log_file_size(1024 * 1024); options.set_max_log_file_size(1024 * 1024);
options.set_recycle_log_file_num(1); options.set_recycle_log_file_num(1);
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap()) Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
} }

View file

@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
http-body-util = { version = "0.1", default-features = false } http-body-util = { version = "0.1", default-features = false }
tokio = { version = "1", default-features = false } tokio = { version = "1", default-features = false }
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
zeroize = { version = "1", optional = true } zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true } base64ct = { version = "1", features = ["alloc"], optional = true }

View file

@ -290,6 +290,81 @@ impl LibP2p {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
} }
// Find and connect to peers
let (pending_p2p_connections_send, mut pending_p2p_connections_recv) =
tokio::sync::mpsc::unbounded_channel();
let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel();
tokio::spawn({
let pending_p2p_connections_send = pending_p2p_connections_send.clone();
async move {
loop {
// TODO: Add better peer management logic?
{
let connect = |addr: Multiaddr| {
log::info!("found peer from substrate: {addr}");
let protocols = addr.iter().filter_map(|piece| match piece {
// Drop PeerIds from the Substrate P2p network
Protocol::P2p(_) => None,
// Use our own TCP port
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
other => Some(other),
});
let mut new_addr = Multiaddr::empty();
for protocol in protocols {
new_addr.push(protocol);
}
let addr = new_addr;
log::debug!("transformed found peer: {addr}");
// TODO: Check this isn't a duplicate
to_dial_send.send(addr).unwrap();
};
// TODO: We should also connect to random peers from random nets as needed for
// cosigning
let mut to_retry = vec![];
while let Some(network) = pending_p2p_connections_recv.recv().await {
if let Ok(mut nodes) = serai.p2p_validators(network).await {
// If there's an insufficient amount of nodes known, connect to all yet add it
// back and break
if nodes.len() < 3 {
log::warn!(
"insufficient amount of P2P nodes known for {:?}: {}",
network,
nodes.len()
);
to_retry.push(network);
for node in nodes {
connect(node);
}
continue;
}
// Randomly select up to 5
for _ in 0 .. 5 {
if !nodes.is_empty() {
let to_connect = nodes.swap_remove(
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
.unwrap(),
);
connect(to_connect);
}
}
}
}
for to_retry in to_retry {
pending_p2p_connections_send.send(to_retry).unwrap();
}
}
// Sleep 60 seconds before moving to the next iteration
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
}
}
});
// Manage the actual swarm
tokio::spawn({ tokio::spawn({
let mut time_of_last_p2p_message = Instant::now(); let mut time_of_last_p2p_message = Instant::now();
@ -321,66 +396,8 @@ impl LibP2p {
async move { async move {
let mut set_for_genesis = HashMap::new(); let mut set_for_genesis = HashMap::new();
let mut pending_p2p_connections = vec![]; let mut connected_peers = 0;
// Run this task ad-infinitum
loop { loop {
// Handle pending P2P connections
// TODO: Break this out onto its own task with better peer management logic?
{
let mut connect = |addr: Multiaddr| {
log::info!("found peer from substrate: {addr}");
let protocols = addr.iter().filter_map(|piece| match piece {
// Drop PeerIds from the Substrate P2p network
Protocol::P2p(_) => None,
// Use our own TCP port
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
other => Some(other),
});
let mut new_addr = Multiaddr::empty();
for protocol in protocols {
new_addr.push(protocol);
}
let addr = new_addr;
log::debug!("transformed found peer: {addr}");
if let Err(e) = swarm.dial(addr) {
log::warn!("dialing peer failed: {e:?}");
}
};
while let Some(network) = pending_p2p_connections.pop() {
if let Ok(mut nodes) = serai.p2p_validators(network).await {
// If there's an insufficient amount of nodes known, connect to all yet add it back
// and break
if nodes.len() < 3 {
log::warn!(
"insufficient amount of P2P nodes known for {:?}: {}",
network,
nodes.len()
);
pending_p2p_connections.push(network);
for node in nodes {
connect(node);
}
break;
}
// Randomly select up to 5
for _ in 0 .. 5 {
if !nodes.is_empty() {
let to_connect = nodes.swap_remove(
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
.unwrap(),
);
connect(to_connect);
}
}
}
}
}
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
tokio::select! { tokio::select! {
biased; biased;
@ -392,7 +409,7 @@ impl LibP2p {
let topic = topic_for_set(set); let topic = topic_for_set(set);
if subscribe { if subscribe {
log::info!("subscribing to p2p messages for {set:?}"); log::info!("subscribing to p2p messages for {set:?}");
pending_p2p_connections.push(set.network); pending_p2p_connections_send.send(set.network).unwrap();
set_for_genesis.insert(genesis, set); set_for_genesis.insert(genesis, set);
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
} else { } else {
@ -422,12 +439,26 @@ impl LibP2p {
log::debug!("dialing to peer in connection ID {}", &connection_id); log::debug!("dialing to peer in connection ID {}", &connection_id);
} }
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
if &peer_id == swarm.local_peer_id() {
log::warn!("established a libp2p connection to ourselves");
swarm.close_connection(connection_id);
continue;
}
connected_peers += 1;
log::debug!( log::debug!(
"connection established to peer {} in connection ID {}", "connection established to peer {} in connection ID {}, connected peers: {}",
&peer_id, &peer_id,
&connection_id, &connection_id,
connected_peers,
);
}
Some(SwarmEvent::ConnectionClosed { peer_id, .. }) => {
connected_peers -= 1;
log::debug!(
"connection with peer {peer_id} closed, connected peers: {}",
connected_peers,
); );
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id)
} }
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
GsEvent::Message { propagation_source, message, .. }, GsEvent::Message { propagation_source, message, .. },
@ -440,6 +471,14 @@ impl LibP2p {
} }
} }
// Handle peers to dial
addr = to_dial_recv.recv() => {
let addr = addr.expect("received address was None (sender dropped?)");
if let Err(e) = swarm.dial(addr) {
log::warn!("dialing peer failed: {e:?}");
}
}
// If it's been >80s since we've published a message, publish a KeepAlive since we're // If it's been >80s since we've published a message, publish a KeepAlive since we're
// still an active service // still an active service
// This is useful when we have no active tributaries and accordingly aren't sending // This is useful when we have no active tributaries and accordingly aren't sending

View file

@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::{ use serai_client::{
SeraiError, Block, Serai, TemporalSerai, SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, NetworkId}, primitives::{BlockHash, NetworkId},
validator_sets::{ validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
primitives::{ValidatorSet, amortize_excess_key_shares},
ValidatorSetsEvent,
},
in_instructions::InInstructionsEvent, in_instructions::InInstructionsEvent,
coins::CoinsEvent, coins::CoinsEvent,
}; };
@ -69,12 +66,7 @@ async fn handle_new_set<D: Db>(
let set_participants = let set_participants =
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
let mut set_data = set_participants set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
.into_iter()
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
.collect::<Vec<_>>();
amortize_excess_key_shares(&mut set_data);
set_data
}; };
let time = if let Ok(time) = block.time() { let time = if let Ok(time) = block.time() {

View file

@ -1,5 +1,5 @@
use core::{marker::PhantomData, fmt::Debug}; use core::{marker::PhantomData, fmt::Debug};
use std::{sync::Arc, io}; use std::{sync::Arc, io, collections::VecDeque};
use async_trait::async_trait; use async_trait::async_trait;
@ -194,7 +194,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
); );
let blockchain = Arc::new(RwLock::new(blockchain)); let blockchain = Arc::new(RwLock::new(blockchain));
let to_rebroadcast = Arc::new(RwLock::new(vec![])); let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
// P2P layer // P2P layer
let p2p_meta_task_handle = Arc::new( let p2p_meta_task_handle = Arc::new(
@ -207,7 +207,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
for msg in to_rebroadcast { for msg in to_rebroadcast {
p2p.broadcast(genesis, msg).await; p2p.broadcast(genesis, msg).await;
} }
tokio::time::sleep(core::time::Duration::from_secs(1)).await; tokio::time::sleep(core::time::Duration::from_secs(60)).await;
} }
} }
}) })

View file

@ -1,5 +1,8 @@
use core::ops::Deref; use core::ops::Deref;
use std::{sync::Arc, collections::HashMap}; use std::{
sync::Arc,
collections::{VecDeque, HashMap},
};
use async_trait::async_trait; use async_trait::async_trait;
@ -268,7 +271,7 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
pub(crate) validators: Arc<Validators>, pub(crate) validators: Arc<Validators>,
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>, pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>, pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>,
pub(crate) p2p: P, pub(crate) p2p: P,
} }
@ -277,29 +280,6 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
pub const LATENCY_TIME: u32 = 1667; pub const LATENCY_TIME: u32 = 1667;
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
#[test]
fn assert_target_block_time() {
use serai_db::MemDb;
#[derive(Clone, Debug)]
pub struct DummyP2p;
#[async_trait::async_trait]
impl P2p for DummyP2p {
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
unimplemented!()
}
}
// Type paremeters don't matter here since we only need to call the block_time()
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
// TARGET_BLOCK_TIME is in milliseconds.
assert_eq!(
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
TARGET_BLOCK_TIME / 1000
)
}
#[async_trait] #[async_trait]
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> { impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
type Db = D; type Db = D;
@ -327,19 +307,28 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
} }
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) { async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
to_broadcast.extend(msg.encode());
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
// until the block it's trying to build is complete // until the block it's trying to build is complete
// If the P2P layer drops a message before all nodes obtained access, or a node had an // If the P2P layer drops a message before all nodes obtained access, or a node had an
// intermittent failure, this will ensure reconcilliation // intermittent failure, this will ensure reconcilliation
// Resolves halts caused by timing discrepancies, which technically are violations of
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
// environments
// This is atrocious if there's no content-based deduplication protocol for messages actively // This is atrocious if there's no content-based deduplication protocol for messages actively
// being gossiped // being gossiped
// LibP2p, as used by Serai, is configured to content-based deduplicate // LibP2p, as used by Serai, is configured to content-based deduplicate
let mut to_broadcast = vec![TENDERMINT_MESSAGE]; {
to_broadcast.extend(msg.encode()); let mut to_rebroadcast_lock = self.to_rebroadcast.write().await;
self.to_rebroadcast.write().await.push(to_broadcast.clone()); to_rebroadcast_lock.push_back(to_broadcast.clone());
// We should have, ideally, 3 * validators messages within a round
// Therefore, this should keep the most recent 2-rounds
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
// messages
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
to_rebroadcast_lock.pop_front();
}
}
self.p2p.broadcast(self.genesis, to_broadcast).await self.p2p.broadcast(self.genesis, to_broadcast).await
} }
@ -445,7 +434,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
} }
// Since we've added a valid block, clear to_rebroadcast // Since we've added a valid block, clear to_rebroadcast
*self.to_rebroadcast.write().await = vec![]; *self.to_rebroadcast.write().await = VecDeque::new();
Some(TendermintBlock( Some(TendermintBlock(
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(), self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),

View file

@ -1,3 +1,6 @@
#[cfg(test)]
mod tendermint;
mod transaction; mod transaction;
pub use transaction::*; pub use transaction::*;

View file

@ -0,0 +1,28 @@
use tendermint::ext::Network;
use crate::{
P2p, TendermintTx,
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
};
#[test]
fn assert_target_block_time() {
use serai_db::MemDb;
#[derive(Clone, Debug)]
pub struct DummyP2p;
#[async_trait::async_trait]
impl P2p for DummyP2p {
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
unimplemented!()
}
}
// Type paremeters don't matter here since we only need to call the block_time()
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
// TARGET_BLOCK_TIME is in milliseconds.
assert_eq!(
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
TARGET_BLOCK_TIME / 1000
)
}

View file

@ -1,17 +1,20 @@
FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder # rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT)
FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic
# Move to a Debian package snapshot # Move to a Debian package snapshot
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \ echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \
apt update apt update
# Install dependencies # Install dependencies
RUN apt install clang -y RUN apt update && apt upgrade && apt install clang -y
# Add the wasm toolchain # Add the wasm toolchain
RUN rustup target add wasm32-unknown-unknown RUN rustup target add wasm32-unknown-unknown
FROM deterministic
# Add files for build # Add files for build
ADD patches /serai/patches ADD patches /serai/patches
ADD common /serai/common ADD common /serai/common
@ -30,3 +33,8 @@ ADD Cargo.lock /serai
ADD AGPL-3.0 /serai ADD AGPL-3.0 /serai
WORKDIR /serai WORKDIR /serai
# Build the runtime, copying it to the volume if it exists
CMD cargo build --release -p serai-runtime && \
mkdir -p /volume && \
cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm

View file

@ -43,8 +43,7 @@ CMD ["/run.sh"]
network.label() network.label()
); );
let run = let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin;
os(Os::Debian, "RUN mkdir /volume && chown bitcoin:bitcoin /volume", "bitcoin") + &run_bitcoin;
let res = setup + &run; let res = setup + &run;
let mut bitcoin_path = orchestration_path.to_path_buf(); let mut bitcoin_path = orchestration_path.to_path_buf();

View file

@ -55,12 +55,9 @@ CMD ["/run.sh"]
network.label(), network.label(),
); );
let run = crate::os( let run =
os, crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") +
&("RUN mkdir /volume && chown monero /volume\r\n".to_string() + &run_monero;
if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }),
"monero",
) + &run_monero;
let res = setup + &run; let res = setup + &run;
let mut monero_path = orchestration_path.to_path_buf(); let mut monero_path = orchestration_path.to_path_buf();

View file

@ -11,7 +11,7 @@ pub fn coordinator(
orchestration_path: &Path, orchestration_path: &Path,
network: Network, network: Network,
coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>, coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>, serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) { ) {
let db = network.db(); let db = network.db();
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" }; let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
@ -27,13 +27,16 @@ pub fn coordinator(
RUN apt install -y ca-certificates RUN apt install -y ca-certificates
"#; "#;
#[rustfmt::skip]
const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error";
let env_vars = [ let env_vars = [
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())), ("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())),
("DB_PATH", "./coordinator-db".to_string()), ("DB_PATH", "/volume/coordinator-db".to_string()),
("SERAI_KEY", hex::encode(serai_key.to_repr())), ("SERAI_KEY", hex::encode(serai_key.to_repr())),
("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())), ("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())),
("RUST_LOG", "serai_coordinator=debug,tributary_chain=debug,tendermint=debug".to_string()), ("RUST_LOG", DEFAULT_RUST_LOG.to_string()),
]; ];
let mut env_vars_str = String::new(); let mut env_vars_str = String::new();
for (env_var, value) in env_vars { for (env_var, value) in env_vars {

View file

@ -2,7 +2,14 @@
// TODO: Generate keys for a validator and the infra // TODO: Generate keys for a validator and the infra
use core::ops::Deref; use core::ops::Deref;
use std::{collections::HashSet, env, path::PathBuf, io::Write, fs, process::Command}; use std::{
collections::{HashSet, HashMap},
env,
path::PathBuf,
io::Write,
fs,
process::{Stdio, Command},
};
use zeroize::Zeroizing; use zeroize::Zeroizing;
@ -89,8 +96,12 @@ ENV LD_PRELOAD=libmimalloc.so
RUN apk update && apk upgrade RUN apk update && apk upgrade
# System user (not a human), shell of nologin, no password assigned RUN adduser --system --shell /sbin/nologin --disabled-password {user}
RUN adduser -S -s /sbin/nologin -D {user} RUN addgroup {user}
RUN addgroup {user} {user}
# Make the /volume directory and transfer it to the user
RUN mkdir /volume && chown {user}:{user} /volume
{additional_root} {additional_root}
@ -110,7 +121,10 @@ RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
RUN apt update && apt upgrade -y && apt autoremove -y && apt clean RUN apt update && apt upgrade -y && apt autoremove -y && apt clean
RUN useradd --system --create-home --shell /sbin/nologin {user} RUN useradd --system --user-group --create-home --shell /sbin/nologin {user}
# Make the /volume directory and transfer it to the user
RUN mkdir /volume && chown {user}:{user} /volume
{additional_root} {additional_root}
@ -129,7 +143,7 @@ fn build_serai_service(release: bool, features: &str, package: &str) -> String {
format!( format!(
r#" r#"
FROM rust:1.76-slim-bookworm as builder FROM rust:1.77-slim-bookworm as builder
COPY --from=mimalloc-debian libmimalloc.so /usr/lib COPY --from=mimalloc-debian libmimalloc.so /usr/lib
RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
@ -199,6 +213,55 @@ fn orchestration_path(network: Network) -> PathBuf {
orchestration_path orchestration_path
} }
type InfrastructureKeys =
HashMap<&'static str, (Zeroizing<<Ristretto as Ciphersuite>::F>, <Ristretto as Ciphersuite>::G)>;
fn infrastructure_keys(network: Network) -> InfrastructureKeys {
// Generate entropy for the infrastructure keys
let entropy = if network == Network::Dev {
// Don't use actual entropy if this is a dev environment
Zeroizing::new([0; 32])
} else {
let path = home::home_dir()
.unwrap()
.join(".serai")
.join(network.label())
.join("infrastructure_keys_entropy");
// Check if there's existing entropy
if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) {
assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes");
let mut res = Zeroizing::new([0; 32]);
res.copy_from_slice(entropy.as_ref());
res
} else {
// If there isn't, generate fresh entropy
let mut res = Zeroizing::new([0; 32]);
OsRng.fill_bytes(res.as_mut());
fs::write(&path, &res).unwrap();
res
}
};
let mut transcript =
RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript");
transcript.append_message(b"network", network.label().as_bytes());
transcript.append_message(b"entropy", entropy);
let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys"));
let mut key_pair = || {
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut rng));
let public = Ristretto::generator() * key.deref();
(key, public)
};
HashMap::from([
("coordinator", key_pair()),
("bitcoin", key_pair()),
("ethereum", key_pair()),
("monero", key_pair()),
])
}
fn dockerfiles(network: Network) { fn dockerfiles(network: Network) {
let orchestration_path = orchestration_path(network); let orchestration_path = orchestration_path(network);
@ -209,28 +272,11 @@ fn dockerfiles(network: Network) {
monero_wallet_rpc(&orchestration_path); monero_wallet_rpc(&orchestration_path);
} }
// TODO: Generate infra keys in key_gen, yet service entropy here? let mut infrastructure_keys = infrastructure_keys(network);
let coordinator_key = infrastructure_keys.remove("coordinator").unwrap();
// Generate entropy for the infrastructure keys let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap();
let mut entropy = Zeroizing::new([0; 32]); let ethereum_key = infrastructure_keys.remove("ethereum").unwrap();
// Only use actual entropy if this isn't a development environment let monero_key = infrastructure_keys.remove("monero").unwrap();
if network != Network::Dev {
OsRng.fill_bytes(entropy.as_mut());
}
let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Transcript");
transcript.append_message(b"entropy", entropy);
let mut new_rng = |label| ChaCha20Rng::from_seed(transcript.rng_seed(label));
let mut message_queue_keys_rng = new_rng(b"message_queue_keys");
let mut key_pair = || {
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut message_queue_keys_rng));
let public = Ristretto::generator() * key.deref();
(key, public)
};
let coordinator_key = key_pair();
let bitcoin_key = key_pair();
let ethereum_key = key_pair();
let monero_key = key_pair();
message_queue( message_queue(
&orchestration_path, &orchestration_path,
@ -241,10 +287,9 @@ fn dockerfiles(network: Network) {
monero_key.1, monero_key.1,
); );
let mut processor_entropy_rng = new_rng(b"processor_entropy"); let new_entropy = || {
let mut new_entropy = || {
let mut res = Zeroizing::new([0; 32]); let mut res = Zeroizing::new([0; 32]);
processor_entropy_rng.fill_bytes(res.as_mut()); OsRng.fill_bytes(res.as_mut());
res res
}; };
processor( processor(
@ -276,9 +321,9 @@ fn dockerfiles(network: Network) {
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap()) Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())
}; };
coordinator(&orchestration_path, network, coordinator_key.0, serai_key); coordinator(&orchestration_path, network, coordinator_key.0, &serai_key);
serai(&orchestration_path, network); serai(&orchestration_path, network, &serai_key);
} }
fn key_gen(network: Network) { fn key_gen(network: Network) {
@ -325,6 +370,87 @@ fn start(network: Network, services: HashSet<String>) {
_ => panic!("starting unrecognized service"), _ => panic!("starting unrecognized service"),
}; };
// If we're building the Serai service, first build the runtime
let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label());
if name == "serai" {
// Check if it's built by checking if the volume has the expected runtime file
let built = || {
if let Ok(path) = Command::new("docker")
.arg("volume")
.arg("inspect")
.arg("-f")
.arg("{{ .Mountpoint }}")
.arg(&serai_runtime_volume)
.output()
{
if let Ok(path) = String::from_utf8(path.stdout) {
if let Ok(iter) = std::fs::read_dir(PathBuf::from(path.trim())) {
for item in iter.flatten() {
if item.file_name() == "serai.wasm" {
return true;
}
}
}
}
}
false
};
if !built() {
let mut repo_path = env::current_exe().unwrap();
repo_path.pop();
if repo_path.as_path().ends_with("deps") {
repo_path.pop();
}
assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release"));
repo_path.pop();
assert!(repo_path.as_path().ends_with("target"));
repo_path.pop();
// Build the image to build the runtime
if !Command::new("docker")
.current_dir(&repo_path)
.arg("build")
.arg("-f")
.arg("orchestration/runtime/Dockerfile")
.arg(".")
.arg("-t")
.arg(format!("serai-{}-runtime-img", network.label()))
.spawn()
.unwrap()
.wait()
.unwrap()
.success()
{
panic!("failed to build runtime image");
}
// Run the image, building the runtime
println!("Building the Serai runtime");
let container_name = format!("serai-{}-runtime", network.label());
let _ =
Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait();
let _ = Command::new("docker")
.arg("run")
.arg("--name")
.arg(container_name)
.arg("--volume")
.arg(format!("{serai_runtime_volume}:/volume"))
.arg(format!("serai-{}-runtime-img", network.label()))
.spawn();
// Wait until its built
let mut ticks = 0;
while !built() {
std::thread::sleep(core::time::Duration::from_secs(60));
ticks += 1;
if ticks > 6 * 60 {
panic!("couldn't build the runtime after 6 hours")
}
}
}
}
// Build it // Build it
println!("Building {service}"); println!("Building {service}");
docker::build(&orchestration_path(network), network, name); docker::build(&orchestration_path(network), network, name);
@ -335,6 +461,10 @@ fn start(network: Network, services: HashSet<String>) {
.arg("container") .arg("container")
.arg("inspect") .arg("inspect")
.arg(&docker_name) .arg(&docker_name)
// Use null for all IO to silence 'container does not exist'
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status() .status()
.unwrap() .unwrap()
.success() .success()
@ -348,25 +478,51 @@ fn start(network: Network, services: HashSet<String>) {
let command = command.arg("--restart").arg("always"); let command = command.arg("--restart").arg("always");
let command = command.arg("--log-opt").arg("max-size=100m"); let command = command.arg("--log-opt").arg("max-size=100m");
let command = command.arg("--log-opt").arg("max-file=3"); let command = command.arg("--log-opt").arg("max-file=3");
let command = if network == Network::Dev {
command
} else {
// Assign a persistent volume if this isn't for Dev
command.arg("--volume").arg(volume)
};
let command = match name { let command = match name {
"bitcoin" => { "bitcoin" => {
// Expose the RPC for tests
if network == Network::Dev { if network == Network::Dev {
command.arg("-p").arg("8332:8332") command.arg("-p").arg("8332:8332")
} else { } else {
command.arg("--volume").arg(volume) command
} }
} }
"monero" => { "monero" => {
// Expose the RPC for tests
if network == Network::Dev { if network == Network::Dev {
command.arg("-p").arg("18081:18081") command.arg("-p").arg("18081:18081")
} else { } else {
command.arg("--volume").arg(volume) command
} }
} }
"monero-wallet-rpc" => { "monero-wallet-rpc" => {
assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev");
// Expose the RPC for tests
command.arg("-p").arg("18082:18082") command.arg("-p").arg("18082:18082")
} }
"coordinator" => {
if network == Network::Dev {
command
} else {
// Publish the port
command.arg("-p").arg("30563:30563")
}
}
"serai" => {
let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime"));
if network == Network::Dev {
command
} else {
// Publish the port
command.arg("-p").arg("30333:30333")
}
}
_ => command, _ => command,
}; };
assert!( assert!(
@ -390,10 +546,10 @@ Serai Orchestrator v0.0.1
Commands: Commands:
key_gen *network* key_gen *network*
Generates a key for the validator. Generate a key for the validator.
setup *network* setup *network*
Generate infrastructure keys and the Dockerfiles for every Serai service. Generate the Dockerfiles for every Serai service.
start *network* [service1, service2...] start *network* [service1, service2...]
Start the specified services for the specified network ("dev" or "testnet"). Start the specified services for the specified network ("dev" or "testnet").

View file

@ -20,8 +20,8 @@ pub fn message_queue(
("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())), ("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())),
("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())), ("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())),
("MONERO_KEY", hex::encode(monero_key.to_bytes())), ("MONERO_KEY", hex::encode(monero_key.to_bytes())),
("DB_PATH", "./message-queue-db".to_string()), ("DB_PATH", "/volume/message-queue-db".to_string()),
("RUST_LOG", "serai_message_queue=trace".to_string()), ("RUST_LOG", "info,serai_message_queue=trace".to_string()),
]; ];
let mut env_vars_str = String::new(); let mut env_vars_str = String::new();
for (env_var, value) in env_vars { for (env_var, value) in env_vars {

View file

@ -40,15 +40,15 @@ RUN apt install -y ca-certificates
}; };
let env_vars = [ let env_vars = [
("MESSAGE_QUEUE_RPC", format!("serai-{}-message_queue", network.label())), ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())),
("ENTROPY", hex::encode(entropy.as_ref())), ("ENTROPY", hex::encode(entropy.as_ref())),
("NETWORK", coin.to_string()), ("NETWORK", coin.to_string()),
("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")),
("NETWORK_RPC_HOSTNAME", hostname), ("NETWORK_RPC_HOSTNAME", hostname),
("NETWORK_RPC_PORT", format!("{port}")), ("NETWORK_RPC_PORT", format!("{port}")),
("DB_PATH", "./processor-db".to_string()), ("DB_PATH", "/volume/processor-db".to_string()),
("RUST_LOG", "serai_processor=debug".to_string()), ("RUST_LOG", "info,serai_processor=debug".to_string()),
]; ];
let mut env_vars_str = String::new(); let mut env_vars_str = String::new();
for (env_var, value) in env_vars { for (env_var, value) in env_vars {

View file

@ -1,14 +1,26 @@
use std::{path::Path}; use std::{path::Path};
use zeroize::Zeroizing;
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
pub fn serai(orchestration_path: &Path, network: Network) { pub fn serai(
orchestration_path: &Path,
network: Network,
serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) {
// Always builds in release for performance reasons // Always builds in release for performance reasons
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node"); let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
let setup_fast_epoch = let setup_fast_epoch =
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node"); mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
// TODO: Review the ports exposed here let env_vars = [("KEY", hex::encode(serai_key.to_repr()))];
let mut env_vars_str = String::new();
for (env_var, value) in env_vars {
env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#);
}
let run_serai = format!( let run_serai = format!(
r#" r#"
# Copy the Serai binary and relevant license # Copy the Serai binary and relevant license
@ -16,12 +28,12 @@ COPY --from=builder --chown=serai /serai/bin/serai-node /bin/
COPY --from=builder --chown=serai /serai/AGPL-3.0 . COPY --from=builder --chown=serai /serai/AGPL-3.0 .
# Run the Serai node # Run the Serai node
EXPOSE 30333 9615 9933 9944 EXPOSE 30333 9944
ADD /orchestration/{}/serai/run.sh / ADD /orchestration/{}/serai/run.sh /
CMD ["/run.sh"] CMD {env_vars_str} "/run.sh"
"#, "#,
network.label() network.label(),
); );
let run = os(Os::Debian, "", "serai") + &run_serai; let run = os(Os::Debian, "", "serai") + &run_serai;

View file

@ -1,3 +1,3 @@
#!/bin/sh #!/bin/sh
exit 1 serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator

View file

@ -14,4 +14,4 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
zstd = "0.12" zstd = "0.13"

View file

@ -1,5 +1,5 @@
[toolchain] [toolchain]
channel = "1.76" channel = "1.77"
targets = ["wasm32-unknown-unknown"] targets = ["wasm32-unknown-unknown"]
profile = "minimal" profile = "minimal"
components = ["rust-src", "rustfmt", "clippy"] components = ["rust-src", "rustfmt", "clippy"]

View file

@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly
``` ```
cargo install svm-rs cargo install svm-rs
svm install 0.8.16 svm install 0.8.25
svm use 0.8.16 svm use 0.8.25
``` ```
### Install Solidity Compiler Version Manager ### Install Solidity Compiler Version Manager
``` ```
cargo install svm-rs cargo install svm-rs
svm install 0.8.16 svm install 0.8.25
svm use 0.8.16 svm use 0.8.25
``` ```
### Install foundry (for tests) ### Install foundry (for tests)

View file

@ -29,7 +29,12 @@ macro_rules! serai_test {
"--rpc-cors".to_string(), "--rpc-cors".to_string(),
"all".to_string(), "all".to_string(),
]) ])
.replace_env(HashMap::from([("RUST_LOG".to_string(), "runtime=debug".to_string())])) .replace_env(
HashMap::from([
("RUST_LOG".to_string(), "runtime=debug".to_string()),
("KEY".to_string(), " ".to_string()),
])
)
.set_publish_all_ports(true) .set_publish_all_ports(true)
.set_handle(handle) .set_handle(handle)
.set_start_policy(StartPolicy::Strict) .set_start_policy(StartPolicy::Strict)

View file

@ -14,7 +14,9 @@ async fn dht() {
TestBodySpecification::with_image( TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never),
) )
.replace_env([("SERAI_NAME".to_string(), name.to_string())].into()) .replace_env(
[("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), " ".to_string())].into(),
)
.set_publish_all_ports(true) .set_publish_all_ports(true)
.set_handle(handle(name)) .set_handle(handle(name))
.set_start_policy(StartPolicy::Strict) .set_start_policy(StartPolicy::Strict)

View file

@ -100,7 +100,10 @@ async fn validator_set_rotation() {
"local".to_string(), "local".to_string(),
format!("--{name}"), format!("--{name}"),
]) ])
.replace_env(HashMap::from([("RUST_LOG=runtime".to_string(), "debug".to_string())])) .replace_env(HashMap::from([
("RUST_LOG".to_string(), "runtime=debug".to_string()),
("KEY".to_string(), " ".to_string()),
]))
.set_publish_all_ports(true) .set_publish_all_ports(true)
.set_handle(handle(name)) .set_handle(handle(name))
.set_start_policy(StartPolicy::Strict) .set_start_policy(StartPolicy::Strict)

View file

@ -23,6 +23,7 @@ name = "serai-node"
zeroize = "1" zeroize = "1"
hex = "0.4" hex = "0.4"
rand_core = "0.6"
schnorrkel = "0.11" schnorrkel = "0.11"
sp-core = { git = "https://github.com/serai-dex/substrate" } sp-core = { git = "https://github.com/serai-dex/substrate" }

View file

@ -15,6 +15,14 @@ fn account_from_name(name: &'static str) -> PublicKey {
insecure_pair_from_name(name).public() insecure_pair_from_name(name).public()
} }
fn wasm_binary() -> Vec<u8> {
// TODO: Accept a config of runtime path
if let Ok(binary) = std::fs::read("/runtime/serai.wasm") {
return binary;
}
WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec()
}
fn testnet_genesis( fn testnet_genesis(
wasm_binary: &[u8], wasm_binary: &[u8],
validators: &[&'static str], validators: &[&'static str],
@ -64,18 +72,18 @@ fn testnet_genesis(
} }
} }
pub fn development_config() -> Result<ChainSpec, &'static str> { pub fn development_config() -> ChainSpec {
let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?; let wasm_binary = wasm_binary();
Ok(ChainSpec::from_genesis( ChainSpec::from_genesis(
// Name // Name
"Development Network", "Development Network",
// ID // ID
"devnet", "devnet",
ChainType::Development, ChainType::Development,
|| { move || {
testnet_genesis( testnet_genesis(
wasm_binary, &wasm_binary,
&["Alice"], &["Alice"],
vec![ vec![
account_from_name("Alice"), account_from_name("Alice"),
@ -99,21 +107,21 @@ pub fn development_config() -> Result<ChainSpec, &'static str> {
None, None,
// Extensions // Extensions
None, None,
)) )
} }
pub fn testnet_config() -> Result<ChainSpec, &'static str> { pub fn testnet_config() -> ChainSpec {
let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; let wasm_binary = wasm_binary();
Ok(ChainSpec::from_genesis( ChainSpec::from_genesis(
// Name // Name
"Local Test Network", "Local Test Network",
// ID // ID
"local", "local",
ChainType::Local, ChainType::Local,
|| { move || {
testnet_genesis( testnet_genesis(
wasm_binary, &wasm_binary,
&["Alice", "Bob", "Charlie", "Dave"], &["Alice", "Bob", "Charlie", "Dave"],
vec![ vec![
account_from_name("Alice"), account_from_name("Alice"),
@ -137,5 +145,5 @@ pub fn testnet_config() -> Result<ChainSpec, &'static str> {
None, None,
// Extensions // Extensions
None, None,
)) )
} }

View file

@ -39,8 +39,8 @@ impl SubstrateCli for Cli {
fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> { fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
match id { match id {
"dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)), "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())),
"local" => Ok(Box::new(chain_spec::testnet_config()?)), "local" => Ok(Box::new(chain_spec::testnet_config())),
_ => panic!("Unknown network ID"), _ => panic!("Unknown network ID"),
} }
} }

View file

@ -8,6 +8,9 @@ pub struct Keystore(sr25519::Pair);
impl Keystore { impl Keystore {
pub fn from_env() -> Option<Self> { pub fn from_env() -> Option<Self> {
let mut key_hex = serai_env::var("KEY")?; let mut key_hex = serai_env::var("KEY")?;
if key_hex.trim().is_empty() {
None?;
}
let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex"); let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex");
key_hex.zeroize(); key_hex.zeroize();

View file

@ -1,5 +1,7 @@
use std::{sync::Arc, collections::HashSet}; use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng};
use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata}; use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata};
use sp_block_builder::BlockBuilder; use sp_block_builder::BlockBuilder;
use sp_api::ProvideRuntimeApi; use sp_api::ProvideRuntimeApi;
@ -72,14 +74,19 @@ where
.get_addresses_by_authority_id(validator.into()) .get_addresses_by_authority_id(validator.into())
.await .await
.unwrap_or_else(HashSet::new) .unwrap_or_else(HashSet::new)
.into_iter(); .into_iter()
// Only take a single address .collect::<Vec<_>>();
// Randomly select an address
// There should be one, there may be two if their IP address changed, and more should only // There should be one, there may be two if their IP address changed, and more should only
// occur if they have multiple proxies/an IP address changing frequently/some issue // occur if they have multiple proxies/an IP address changing frequently/some issue
// preventing consistent self-identification // preventing consistent self-identification
// It isn't beneficial to use multiple addresses for a single peer here // It isn't beneficial to use multiple addresses for a single peer here
if let Some(address) = returned_addresses.next() { if !returned_addresses.is_empty() {
all_p2p_addresses.push(address); all_p2p_addresses.push(
returned_addresses.remove(
usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len(),
),
);
} }
} }
Ok(all_p2p_addresses) Ok(all_p2p_addresses)

View file

@ -314,12 +314,10 @@ pub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration;
impl babe::Config for Runtime { impl babe::Config for Runtime {
#[cfg(feature = "fast-epoch")] #[cfg(feature = "fast-epoch")]
#[allow(clippy::identity_op)] type EpochDuration = ConstU64<{ MINUTES / 2 }>; // 30 seconds
type EpochDuration = ConstU64<{ DAYS / (24 * 60 * 2) }>; // 30 seconds
#[cfg(not(feature = "fast-epoch"))] #[cfg(not(feature = "fast-epoch"))]
#[allow(clippy::identity_op)] type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>;
type EpochDuration = ConstU64<{ DAYS }>;
type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;
type EpochChangeTrigger = babe::ExternalTrigger; type EpochChangeTrigger = babe::ExternalTrigger;

View file

@ -142,6 +142,7 @@ pub mod pallet {
} }
// 80% threshold // 80% threshold
// TODO: Use 34% for halting a set (not 80%)
const REQUIREMENT_NUMERATOR: u64 = 4; const REQUIREMENT_NUMERATOR: u64 = 4;
const REQUIREMENT_DIVISOR: u64 = 5; const REQUIREMENT_DIVISOR: u64 = 5;

View file

@ -363,22 +363,27 @@ pub mod pallet {
let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;
let mut iter = SortedAllocationsIter::<T>::new(network);
let mut participants = vec![]; let mut participants = vec![];
let mut key_shares = 0;
let mut total_stake = 0; let mut total_stake = 0;
{
let mut iter = SortedAllocationsIter::<T>::new(network);
let mut key_shares = 0;
while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) {
let Some((key, amount)) = iter.next() else { break }; let Some((key, amount)) = iter.next() else { break };
let these_key_shares = amount.0 / allocation_per_key_share; let these_key_shares =
InSet::<T>::set(network, key, Some(these_key_shares)); (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET));
participants.push((key, these_key_shares)); participants.push((key, these_key_shares));
// This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET
// Off-chain, the key shares per validator will be accordingly adjusted
key_shares += these_key_shares; key_shares += these_key_shares;
total_stake += amount.0; total_stake += amount.0;
} }
amortize_excess_key_shares(&mut participants);
}
for (key, shares) in &participants {
InSet::<T>::set(network, key, Some(*shares));
}
TotalAllocatedStake::<T>::set(network, Some(Amount(total_stake))); TotalAllocatedStake::<T>::set(network, Some(Amount(total_stake)));
let set = ValidatorSet { network, session }; let set = ValidatorSet { network, session };

View file

@ -115,11 +115,11 @@ pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) ->
/// maximum. /// maximum.
/// ///
/// Reduction occurs by reducing each validator in a reverse round-robin. /// Reduction occurs by reducing each validator in a reverse round-robin.
pub fn amortize_excess_key_shares(validators: &mut [(Public, u16)]) { pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u16>(); let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u64>();
for i in 0 .. usize::from( for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
total_key_shares.saturating_sub(u16::try_from(MAX_KEY_SHARES_PER_SET).unwrap()), .unwrap()
) { {
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1; validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
} }
} }

View file

@ -20,7 +20,6 @@ workspace = true
hex = "0.4" hex = "0.4"
async-trait = "0.1" async-trait = "0.1"
async-recursion = "1"
zeroize = { version = "1", default-features = false } zeroize = { version = "1", default-features = false }
rand_core = { version = "0.6", default-features = false } rand_core = { version = "0.6", default-features = false }

View file

@ -5,7 +5,10 @@ use std::{
time::Duration, time::Duration,
}; };
use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex}; use tokio::{
task::AbortHandle,
sync::{Mutex as AsyncMutex, mpsc},
};
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
@ -63,7 +66,9 @@ pub fn serai_composition(name: &str) -> TestBodySpecification {
TestBodySpecification::with_image( TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never),
) )
.replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into()) .replace_env(
[("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), " ".to_string())].into(),
)
.set_publish_all_ports(true) .set_publish_all_ports(true)
} }
@ -96,7 +101,6 @@ pub struct Handles {
pub(crate) message_queue: String, pub(crate) message_queue: String,
} }
#[derive(Clone)]
pub struct Processor { pub struct Processor {
network: NetworkId, network: NetworkId,
@ -104,7 +108,8 @@ pub struct Processor {
#[allow(unused)] #[allow(unused)]
handles: Handles, handles: Handles,
queue: Arc<AsyncMutex<(u64, u64, MessageQueue)>>, msgs: mpsc::UnboundedReceiver<messages::CoordinatorMessage>,
queue_for_sending: MessageQueue,
abort_handle: Option<Arc<AbortHandle>>, abort_handle: Option<Arc<AbortHandle>>,
substrate_key: Arc<AsyncMutex<Option<Zeroizing<<Ristretto as Ciphersuite>::F>>>>, substrate_key: Arc<AsyncMutex<Option<Zeroizing<<Ristretto as Ciphersuite>::F>>>>,
@ -145,47 +150,50 @@ impl Processor {
// The Serai RPC may or may not be started // The Serai RPC may or may not be started
// Assume it is and continue, so if it's a few seconds late, it's still within tolerance // Assume it is and continue, so if it's a few seconds late, it's still within tolerance
// Create the queue
let mut queue = (
0,
Arc::new(MessageQueue::new(
Service::Processor(network),
message_queue_rpc.clone(),
Zeroizing::new(processor_key),
)),
);
let (msg_send, msg_recv) = mpsc::unbounded_channel();
let substrate_key = Arc::new(AsyncMutex::new(None));
let mut res = Processor { let mut res = Processor {
network, network,
serai_rpc, serai_rpc,
handles, handles,
queue: Arc::new(AsyncMutex::new(( queue_for_sending: MessageQueue::new(
0,
0,
MessageQueue::new(
Service::Processor(network), Service::Processor(network),
message_queue_rpc, message_queue_rpc,
Zeroizing::new(processor_key), Zeroizing::new(processor_key),
), ),
))), msgs: msg_recv,
abort_handle: None, abort_handle: None,
substrate_key: Arc::new(AsyncMutex::new(None)), substrate_key: substrate_key.clone(),
}; };
// Handle any cosigns which come up // Spawn a task to handle cosigns and forward messages as appropriate
res.abort_handle = Some(Arc::new( let abort_handle = tokio::spawn({
tokio::spawn({
let mut res = res.clone();
async move { async move {
loop { loop {
tokio::task::yield_now().await; // Get new messages
let (next_recv_id, queue) = &mut queue;
let msg = { let msg = queue.next(Service::Coordinator).await;
let mut queue_lock = res.queue.lock().await;
let (_, next_recv_id, queue) = &mut *queue_lock;
let Ok(msg) =
tokio::time::timeout(Duration::from_secs(1), queue.next(Service::Coordinator))
.await
else {
continue;
};
assert_eq!(msg.from, Service::Coordinator); assert_eq!(msg.from, Service::Coordinator);
assert_eq!(msg.id, *next_recv_id); assert_eq!(msg.id, *next_recv_id);
queue.ack(Service::Coordinator, msg.id).await;
*next_recv_id += 1;
let msg_msg = borsh::from_slice(&msg.msg).unwrap(); let msg_msg = borsh::from_slice(&msg.msg).unwrap();
// Remove any BatchReattempts clogging the pipe // Remove any BatchReattempts clogging the pipe
// TODO: Set up a wrapper around serai-client so we aren't throwing this away yet // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet
// leave it for the tests // leave it for the tests
@ -195,16 +203,26 @@ impl Processor {
messages::coordinator::CoordinatorMessage::BatchReattempt { .. } messages::coordinator::CoordinatorMessage::BatchReattempt { .. }
) )
) { ) {
queue.ack(Service::Coordinator, msg.id).await;
*next_recv_id += 1;
continue; continue;
} }
if !is_cosign_message(&msg_msg) { if !is_cosign_message(&msg_msg) {
msg_send.send(msg_msg).unwrap();
continue; continue;
}; }
queue.ack(Service::Coordinator, msg.id).await; let msg = msg_msg;
*next_recv_id += 1;
msg_msg let send_message = |msg: ProcessorMessage| async move {
queue
.queue(
Metadata {
from: Service::Processor(network),
to: Service::Coordinator,
intent: msg.intent(),
},
borsh::to_vec(&msg).unwrap(),
)
.await;
}; };
struct CurrentCosign { struct CurrentCosign {
@ -219,10 +237,7 @@ impl Processor {
// While technically, each processor should individually track the current cosign, // While technically, each processor should individually track the current cosign,
// this is fine for current testing purposes // this is fine for current testing purposes
CoordinatorMessage::Coordinator( CoordinatorMessage::Coordinator(
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number },
id,
block_number,
},
) => { ) => {
let SubstrateSignId { let SubstrateSignId {
id: SubstrateSignableId::CosigningSubstrateBlock(block), .. id: SubstrateSignableId::CosigningSubstrateBlock(block), ..
@ -235,11 +250,13 @@ impl Processor {
if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) {
*current_cosign = Some(new_cosign); *current_cosign = Some(new_cosign);
} }
res send_message(
.send_message(messages::coordinator::ProcessorMessage::CosignPreprocess { messages::coordinator::ProcessorMessage::CosignPreprocess {
id: id.clone(), id: id.clone(),
preprocesses: vec![[raw_i; 64]], preprocesses: vec![[raw_i; 64]],
}) }
.into(),
)
.await; .await;
} }
CoordinatorMessage::Coordinator( CoordinatorMessage::Coordinator(
@ -247,11 +264,13 @@ impl Processor {
) => { ) => {
// TODO: Assert the ID matches CURRENT_COSIGN // TODO: Assert the ID matches CURRENT_COSIGN
// TODO: Verify the received preprocesses // TODO: Verify the received preprocesses
res send_message(
.send_message(messages::coordinator::ProcessorMessage::SubstrateShare { messages::coordinator::ProcessorMessage::SubstrateShare {
id, id,
shares: vec![[raw_i; 32]], shares: vec![[raw_i; 32]],
}) }
.into(),
)
.await; .await;
} }
CoordinatorMessage::Coordinator( CoordinatorMessage::Coordinator(
@ -263,7 +282,7 @@ impl Processor {
let block_number = current_cosign.as_ref().unwrap().block_number; let block_number = current_cosign.as_ref().unwrap().block_number;
let block = current_cosign.as_ref().unwrap().block; let block = current_cosign.as_ref().unwrap().block;
let substrate_key = res.substrate_key.lock().await.clone().unwrap(); let substrate_key = substrate_key.lock().await.clone().unwrap();
// Expand to a key pair as Schnorrkel expects // Expand to a key pair as Schnorrkel expects
// It's the private key + 32-bytes of entropy for nonces + the public key // It's the private key + 32-bytes of entropy for nonces + the public key
@ -280,12 +299,14 @@ impl Processor {
.to_bytes(), .to_bytes(),
); );
res send_message(
.send_message(messages::coordinator::ProcessorMessage::CosignedBlock { messages::coordinator::ProcessorMessage::CosignedBlock {
block_number, block_number,
block, block,
signature: signature.0.to_vec(), signature: signature.0.to_vec(),
}) }
.into(),
)
.await; .await;
} }
_ => panic!("unexpected message passed is_cosign_message"), _ => panic!("unexpected message passed is_cosign_message"),
@ -293,8 +314,9 @@ impl Processor {
} }
} }
}) })
.abort_handle(), .abort_handle();
));
res.abort_handle = Some(Arc::new(abort_handle));
res res
} }
@ -307,9 +329,8 @@ impl Processor {
pub async fn send_message(&mut self, msg: impl Into<ProcessorMessage>) { pub async fn send_message(&mut self, msg: impl Into<ProcessorMessage>) {
let msg: ProcessorMessage = msg.into(); let msg: ProcessorMessage = msg.into();
let mut queue_lock = self.queue.lock().await; self
let (next_send_id, _, queue) = &mut *queue_lock; .queue_for_sending
queue
.queue( .queue(
Metadata { Metadata {
from: Service::Processor(self.network), from: Service::Processor(self.network),
@ -319,36 +340,13 @@ impl Processor {
borsh::to_vec(&msg).unwrap(), borsh::to_vec(&msg).unwrap(),
) )
.await; .await;
*next_send_id += 1;
}
async fn recv_message_inner(&mut self) -> CoordinatorMessage {
loop {
tokio::task::yield_now().await;
let mut queue_lock = self.queue.lock().await;
let (_, next_recv_id, queue) = &mut *queue_lock;
let msg = queue.next(Service::Coordinator).await;
assert_eq!(msg.from, Service::Coordinator);
assert_eq!(msg.id, *next_recv_id);
// If this is a cosign message, let the cosign task handle it
let msg_msg = borsh::from_slice(&msg.msg).unwrap();
if is_cosign_message(&msg_msg) {
continue;
}
queue.ack(Service::Coordinator, msg.id).await;
*next_recv_id += 1;
return msg_msg;
}
} }
/// Receive a message from the coordinator as a processor. /// Receive a message from the coordinator as a processor.
pub async fn recv_message(&mut self) -> CoordinatorMessage { pub async fn recv_message(&mut self) -> CoordinatorMessage {
// Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of // Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of
// an arbitrary timeout cutting it short // an arbitrary timeout cutting it short
tokio::time::timeout(Duration::from_secs(20 * 60), self.recv_message_inner()).await.unwrap() tokio::time::timeout(Duration::from_secs(20 * 60), self.msgs.recv()).await.unwrap().unwrap()
} }
pub async fn set_substrate_key( pub async fn set_substrate_key(

View file

@ -245,7 +245,7 @@ pub async fn batch(
) )
); );
// Send the ack as expected, though it shouldn't trigger any observable behavior // Send the ack as expected
processor processor
.send_message(messages::ProcessorMessage::Coordinator( .send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck { messages::coordinator::ProcessorMessage::SubstrateBlockAck {

View file

@ -135,7 +135,6 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
*OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;
// Spawns a coordinator, if one has yet to be spawned, or else runs the test. // Spawns a coordinator, if one has yet to be spawned, or else runs the test.
#[async_recursion::async_recursion]
async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) {
// If the outer operations have yet to be set, these *are* the outer operations // If the outer operations have yet to be set, these *are* the outer operations
let outer_ops = OUTER_OPS.get().unwrap(); let outer_ops = OUTER_OPS.get().unwrap();
@ -178,7 +177,10 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
test.provide_container(composition); test.provide_container(composition);
drop(context_lock); drop(context_lock);
test.run_async(spawn_coordinator_or_run_test).await; fn recurse(ops: DockerOperations) -> core::pin::Pin<Box<impl Send + Future<Output = ()>>> {
Box::pin(spawn_coordinator_or_run_test(ops))
}
test.run_async(recurse).await;
} else { } else {
let outer_ops = outer_ops.lock().await.take().unwrap(); let outer_ops = outer_ops.lock().await.take().unwrap();

View file

@ -20,7 +20,6 @@ workspace = true
hex = "0.4" hex = "0.4"
async-trait = "0.1" async-trait = "0.1"
async-recursion = "1"
zeroize = { version = "1", default-features = false } zeroize = { version = "1", default-features = false }
rand_core = { version = "0.6", default-features = false } rand_core = { version = "0.6", default-features = false }

View file

@ -161,8 +161,10 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
*OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;
// Spawns a coordinator, if one has yet to be spawned, or else runs the test. // Spawns a coordinator, if one has yet to be spawned, or else runs the test.
#[async_recursion::async_recursion] pub(crate) fn spawn_coordinator_or_run_test(
async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { inner_ops: DockerOperations,
) -> core::pin::Pin<Box<impl Send + Future<Output = ()>>> {
Box::pin(async {
// If the outer operations have yet to be set, these *are* the outer operations // If the outer operations have yet to be set, these *are* the outer operations
let outer_ops = OUTER_OPS.get().unwrap(); let outer_ops = OUTER_OPS.get().unwrap();
if outer_ops.lock().await.is_none() { if outer_ops.lock().await.is_none() {
@ -209,6 +211,7 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
let outer_ops = outer_ops.lock().await.take().unwrap(); let outer_ops = outer_ops.lock().await.take().unwrap();
test_body.body(outer_ops, handles.clone()).await; test_body.body(outer_ops, handles.clone()).await;
} }
})
} }
test.run_async(spawn_coordinator_or_run_test).await; test.run_async(spawn_coordinator_or_run_test).await;