Merge branch 'main' into block-downloader

This commit is contained in:
Boog900 2024-06-10 18:59:24 +00:00 committed by GitHub
commit f043e0b71a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 943 additions and 81 deletions

4
.github/labeler.yml vendored
View file

@ -43,6 +43,10 @@ A-book-protocol:
- changed-files:
- any-glob-to-any-file: books/protocol/**
A-book-user:
- changed-files:
- any-glob-to-any-file: books/user/**
# Crate (sub-)directories.
A-binaries:
- changed-files:

104
Cargo.lock generated
View file

@ -50,6 +50,13 @@ dependencies = [
"libc",
]
[[package]]
=======
name = "anstyle"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
[[package]]
name = "async-buffer"
version = "0.1.0"
@ -71,6 +78,28 @@ dependencies = [
"pin-project-lite",
]
[[package]]
name = "async-stream"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "async-trait"
version = "0.1.80"
@ -287,6 +316,44 @@ dependencies = [
"windows-targets 0.52.5",
]
[[package]]
name = "clap"
version = "4.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap_builder"
version = "4.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_derive"
version = "4.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "clap_lex"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
[[package]]
name = "concurrent-queue"
version = "2.5.0"
@ -493,6 +560,22 @@ dependencies = [
"tracing",
]
[[package]]
name = "cuprate-fast-sync"
version = "0.1.0"
dependencies = [
"clap",
"cuprate-blockchain",
"cuprate-types",
"hex",
"hex-literal",
"rayon",
"sha3",
"tokio",
"tokio-test",
"tower",
]
[[package]]
name = "cuprate-helper"
version = "0.1.0"
@ -984,6 +1067,12 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "heed"
version = "0.20.2"
@ -2077,7 +2166,7 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
dependencies = [
"heck",
"heck 0.4.1",
"proc-macro2",
"quote",
"syn 2.0.60",
@ -2415,6 +2504,19 @@ dependencies = [
"tokio-util",
]
[[package]]
name = "tokio-test"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
dependencies = [
"async-stream",
"bytes",
"futures-core",
"tokio",
"tokio-stream",
]
[[package]]
name = "tokio-util"
version = "0.7.10"

View file

@ -3,6 +3,7 @@ resolver = "2"
members = [
"consensus",
"consensus/fast-sync",
"consensus/rules",
"cryptonight",
"helper",

View file

@ -4,6 +4,7 @@ This directory contains the source files for Cuprate's various books.
The source files are edited here, and published in other repositories, see:
- [Cuprate's architecture book](https://github.com/Cuprate/architecture-book)
- [Cuprate's protocol book](https://github.com/Cuprate/monero-book)
- [Cuprate's user book](https://github.com/Cuprate/user-book)
## Build tools
Building the book(s) requires [Rust's cargo tool](https://doc.rust-lang.org/cargo/getting-started/installation.html) and [mdBook](https://github.com/rust-lang/mdBook).
@ -17,12 +18,12 @@ cargo install mdbook
To build a book, go into a book's directory and build:
```bash
# This build Cuprate's architecture book.
cd architecture/
# This build Cuprate's user book.
cd user/
mdbook build
```
The output will be in the `book` subdirectory (`architecture/book` for the above example). To open the book, you can open it in your web browser like so:
The output will be in the `book` subdirectory (`user/book` for the above example). To open the book, you can open it in your web browser like so:
```bash
mdbook build --open
```

6
books/user/README.md Normal file
View file

@ -0,0 +1,6 @@
## Cuprate's user book
This book is the end-user documentation for Cuprate, aka, "how to use `cuprated`".
See:
- <https://user.cuprate.org>
- <https://github.com/Cuprate/user-book>

19
books/user/book.toml Normal file
View file

@ -0,0 +1,19 @@
[book]
authors = ["hinto-janai"]
language = "en"
multilingual = false
src = "src"
title = "Cuprate's user book"
git-repository-url = "https://github.com/Cuprate/user-book"
# TODO: fix after importing real files.
#
# [preprocessor.last-changed]
# command = "mdbook-last-changed"
# renderer = ["html"]
#
# [output.html]
# default-theme = "ayu"
# preferred-dark-theme = "ayu"
# git-repository-url = "https://github.com/hinto-janai/cuprate-user"
# additional-css = ["last-changed.css"]

View file

@ -0,0 +1,3 @@
# Summary
- [TODO](todo.md)

1
books/user/src/todo.md Normal file
View file

@ -0,0 +1 @@
# TODO

View file

@ -0,0 +1,23 @@
[package]
name = "cuprate-fast-sync"
version = "0.1.0"
edition = "2021"
license = "MIT"
[[bin]]
name = "cuprate-fast-sync-create-hashes"
path = "src/create.rs"
[dependencies]
clap = { workspace = true, features = ["derive", "std"] }
cuprate-blockchain = { path = "../../storage/cuprate-blockchain" }
cuprate-types = { path = "../../types" }
hex.workspace = true
hex-literal.workspace = true
rayon.workspace = true
sha3 = "0.10.8"
tokio = { workspace = true, features = ["full"] }
tower.workspace = true
[dev-dependencies]
tokio-test = "0.4.4"

View file

@ -0,0 +1,87 @@
use std::{fmt::Write, fs::write};
use clap::Parser;
use tower::{Service, ServiceExt};
use cuprate_blockchain::{config::ConfigBuilder, service::DatabaseReadHandle, RuntimeError};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes};
const BATCH_SIZE: u64 = 512;
async fn read_batch(
handle: &mut DatabaseReadHandle,
height_from: u64,
) -> Result<Vec<BlockId>, RuntimeError> {
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE as usize);
for height in height_from..(height_from + BATCH_SIZE) {
let request = BCReadRequest::BlockHash(height);
let response_channel = handle.ready().await?.call(request);
let response = response_channel.await?;
match response {
BCResponse::BlockHash(block_id) => block_ids.push(block_id),
_ => unreachable!(),
}
}
Ok(block_ids)
}
fn generate_hex(hashes: &[HashOfHashes]) -> String {
let mut s = String::new();
writeln!(&mut s, "[").unwrap();
for hash in hashes {
writeln!(&mut s, "\thex!(\"{}\"),", hex::encode(hash)).unwrap();
}
writeln!(&mut s, "]").unwrap();
s
}
#[derive(Parser)]
#[command(version, about, long_about = None)]
struct Args {
#[arg(short, long)]
height: u64,
}
#[tokio::main]
async fn main() {
let args = Args::parse();
let height_target = args.height;
let config = ConfigBuilder::new().build();
let (mut read_handle, _) = cuprate_blockchain::service::init(config).unwrap();
let mut hashes_of_hashes = Vec::new();
let mut height = 0u64;
while height < height_target {
match read_batch(&mut read_handle, height).await {
Ok(block_ids) => {
let hash = hash_of_hashes(block_ids.as_slice());
hashes_of_hashes.push(hash);
}
Err(_) => {
println!("Failed to read next batch from database");
break;
}
}
height += BATCH_SIZE;
}
drop(read_handle);
let generated = generate_hex(&hashes_of_hashes);
write("src/data/hashes_of_hashes", generated).expect("Could not write file");
println!("Generated hashes up to block height {}", height);
}

View file

@ -0,0 +1,12 @@
[
hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"),
hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"),
hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"),
hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"),
hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"),
hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"),
hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"),
hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"),
hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"),
hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"),
]

View file

@ -0,0 +1,216 @@
use std::{
cmp,
future::Future,
pin::Pin,
task::{Context, Poll},
};
#[allow(unused_imports)]
use hex_literal::hex;
use tower::Service;
use crate::{hash_of_hashes, BlockId, HashOfHashes};
#[cfg(not(test))]
static HASHES_OF_HASHES: &[HashOfHashes] = &include!("./data/hashes_of_hashes");
#[cfg(not(test))]
const BATCH_SIZE: usize = 512;
#[cfg(test)]
static HASHES_OF_HASHES: &[HashOfHashes] = &[
hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"),
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
];
#[cfg(test)]
const BATCH_SIZE: usize = 4;
#[inline]
fn max_height() -> u64 {
(HASHES_OF_HASHES.len() * BATCH_SIZE) as u64
}
pub enum FastSyncRequest {
ValidateHashes {
start_height: u64,
block_ids: Vec<BlockId>,
},
}
#[derive(Debug, PartialEq)]
pub struct ValidBlockId(BlockId);
fn valid_block_ids(block_ids: &[BlockId]) -> Vec<ValidBlockId> {
block_ids.iter().map(|b| ValidBlockId(*b)).collect()
}
#[derive(Debug, PartialEq)]
pub enum FastSyncResponse {
ValidateHashes {
validated_hashes: Vec<ValidBlockId>,
unknown_hashes: Vec<BlockId>,
},
}
#[derive(Debug, PartialEq)]
pub enum FastSyncError {
InvalidStartHeight, // start_height not a multiple of BATCH_SIZE
Mismatch, // hash does not match
NothingToDo, // no complete batch to check
OutOfRange, // start_height too high
}
#[allow(dead_code)]
pub struct FastSyncService<C> {
context_svc: C,
}
impl<C> FastSyncService<C>
where
C: Service<FastSyncRequest, Response = FastSyncResponse, Error = FastSyncError>
+ Clone
+ Send
+ 'static,
{
#[allow(dead_code)]
pub(crate) fn new(context_svc: C) -> FastSyncService<C> {
FastSyncService { context_svc }
}
}
impl<C> Service<FastSyncRequest> for FastSyncService<C>
where
C: Service<FastSyncRequest, Response = FastSyncResponse, Error = FastSyncError>
+ Clone
+ Send
+ 'static,
C::Future: Send + 'static,
{
type Response = FastSyncResponse;
type Error = FastSyncError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: FastSyncRequest) -> Self::Future {
Box::pin(async move {
match req {
FastSyncRequest::ValidateHashes {
start_height,
block_ids,
} => validate_hashes(start_height, &block_ids).await,
}
})
}
}
async fn validate_hashes(
start_height: u64,
block_ids: &[BlockId],
) -> Result<FastSyncResponse, FastSyncError> {
if start_height as usize % BATCH_SIZE != 0 {
return Err(FastSyncError::InvalidStartHeight);
}
if start_height >= max_height() {
return Err(FastSyncError::OutOfRange);
}
let stop_height = start_height as usize + block_ids.len();
let batch_from = start_height as usize / BATCH_SIZE;
let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len());
let n_batches = batch_to - batch_from;
if n_batches == 0 {
return Err(FastSyncError::NothingToDo);
}
for i in 0..n_batches {
let batch = &block_ids[BATCH_SIZE * i..BATCH_SIZE * (i + 1)];
let actual = hash_of_hashes(batch);
let expected = HASHES_OF_HASHES[batch_from + i];
if expected != actual {
return Err(FastSyncError::Mismatch);
}
}
let validated_hashes = valid_block_ids(&block_ids[..n_batches * BATCH_SIZE]);
let unknown_hashes = block_ids[n_batches * BATCH_SIZE..].to_vec();
Ok(FastSyncResponse::ValidateHashes {
validated_hashes,
unknown_hashes,
})
}
#[cfg(test)]
mod tests {
use super::*;
use tokio_test::block_on;
#[test]
fn test_validate_hashes_errors() {
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
assert_eq!(
block_on(validate_hashes(3, &[])),
Err(FastSyncError::InvalidStartHeight)
);
assert_eq!(
block_on(validate_hashes(3, &ids)),
Err(FastSyncError::InvalidStartHeight)
);
assert_eq!(
block_on(validate_hashes(20, &[])),
Err(FastSyncError::OutOfRange)
);
assert_eq!(
block_on(validate_hashes(20, &ids)),
Err(FastSyncError::OutOfRange)
);
assert_eq!(
block_on(validate_hashes(4, &[])),
Err(FastSyncError::NothingToDo)
);
assert_eq!(
block_on(validate_hashes(4, &ids[..3])),
Err(FastSyncError::NothingToDo)
);
}
#[test]
fn test_validate_hashes_success() {
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
let validated_hashes = valid_block_ids(&ids[0..4]);
let unknown_hashes = ids[4..].to_vec();
assert_eq!(
block_on(validate_hashes(0, &ids)),
Ok(FastSyncResponse::ValidateHashes {
validated_hashes,
unknown_hashes
})
);
}
#[test]
fn test_validate_hashes_mismatch() {
let ids = [
[1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32],
];
assert_eq!(
block_on(validate_hashes(0, &ids)),
Err(FastSyncError::Mismatch)
);
assert_eq!(
block_on(validate_hashes(4, &ids)),
Err(FastSyncError::Mismatch)
);
}
}

View file

@ -0,0 +1,4 @@
pub mod fast_sync;
pub mod util;
pub use util::{hash_of_hashes, BlockId, HashOfHashes};

View file

@ -0,0 +1,8 @@
use sha3::{Digest, Keccak256};
pub type BlockId = [u8; 32];
pub type HashOfHashes = [u8; 32];
pub fn hash_of_hashes(hashes: &[BlockId]) -> HashOfHashes {
Keccak256::digest(hashes.concat().as_slice()).into()
}

View file

@ -0,0 +1,29 @@
use multiexp::BatchVerifier as InternalBatchVerifier;
/// This trait represents a batch verifier.
///
/// A batch verifier is used to speed up verification by verifying multiple transactions together.
///
/// Not all proofs can be batched and at its core it's intended to verify a series of statements are
/// each equivalent to zero.
pub trait BatchVerifier {
/// Queue a statement for batch verification.
///
/// # Panics
/// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`.
// TODO: remove the panics by adding a generic API upstream.
fn queue_statement<R>(
&mut self,
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
) -> R;
}
// impl this for a single threaded batch verifier.
impl BatchVerifier for &'_ mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint> {
fn queue_statement<R>(
&mut self,
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
) -> R {
stmt(self)
}
}

View file

@ -1,5 +1,6 @@
use std::time::{SystemTime, UNIX_EPOCH};
pub mod batch_verifier;
pub mod blocks;
mod decomposed_amount;
pub mod genesis;

View file

@ -3,10 +3,10 @@ use std::cmp::Ordering;
use monero_serai::ringct::RctType;
use monero_serai::transaction::{Input, Output, Timelock, Transaction};
use multiexp::BatchVerifier;
use crate::{
blocks::penalty_free_zone, check_point_canonically_encoded, is_decomposed_amount, HardFork,
batch_verifier::BatchVerifier, blocks::penalty_free_zone, check_point_canonically_encoded,
is_decomposed_amount, HardFork,
};
mod contextual_data;
@ -606,7 +606,7 @@ pub fn check_transaction_semantic(
tx_weight: usize,
tx_hash: &[u8; 32],
hf: &HardFork,
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
verifier: impl BatchVerifier,
) -> Result<u64, TransactionError> {
// <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
if tx_blob_size > MAX_TX_BLOB_SIZE

View file

@ -9,12 +9,11 @@ use monero_serai::{
transaction::{Input, Transaction},
H,
};
use multiexp::BatchVerifier;
use rand::thread_rng;
#[cfg(feature = "rayon")]
use rayon::prelude::*;
use crate::{transactions::Rings, try_par_iter, HardFork};
use crate::{batch_verifier::BatchVerifier, transactions::Rings, try_par_iter, HardFork};
/// This constant contains the IDs of 2 transactions that should be allowed after the fork the ringCT
/// type they used should be banned.
@ -91,7 +90,7 @@ fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> {
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs+.html>
fn check_output_range_proofs(
rct_sig: &RctSignatures,
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
mut verifier: impl BatchVerifier,
) -> Result<(), RingCTError> {
let commitments = &rct_sig.base.commitments;
@ -109,7 +108,9 @@ fn check_output_range_proofs(
}),
RctPrunable::MlsagBulletproofs { bulletproofs, .. }
| RctPrunable::Clsag { bulletproofs, .. } => {
if bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments) {
if verifier.queue_statement(|verifier| {
bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments)
}) {
Ok(())
} else {
Err(RingCTError::BulletproofsRangeInvalid)
@ -121,7 +122,7 @@ fn check_output_range_proofs(
pub(crate) fn ring_ct_semantic_checks(
tx: &Transaction,
tx_hash: &[u8; 32],
verifier: &mut BatchVerifier<(), dalek_ff_group::EdwardsPoint>,
verifier: impl BatchVerifier,
hf: &HardFork,
) -> Result<(), RingCTError> {
let rct_type = tx.rct_signatures.rct_type();

View file

@ -4,8 +4,6 @@ use multiexp::BatchVerifier as InternalBatchVerifier;
use rayon::prelude::*;
use thread_local::ThreadLocal;
use crate::ConsensusError;
/// A multithreaded batch verifier.
pub struct MultiThreadedBatchVerifier {
internal: ThreadLocal<RefCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
@ -19,19 +17,6 @@ impl MultiThreadedBatchVerifier {
}
}
pub fn queue_statement<R>(
&self,
stmt: impl FnOnce(
&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>,
) -> Result<R, ConsensusError>,
) -> Result<R, ConsensusError> {
let verifier_cell = self
.internal
.get_or(|| RefCell::new(InternalBatchVerifier::new(8)));
// TODO: this is not ok as a rayon par_iter could be called in stmt.
stmt(verifier_cell.borrow_mut().deref_mut())
}
pub fn verify(self) -> bool {
self.internal
.into_iter()
@ -41,3 +26,17 @@ impl MultiThreadedBatchVerifier {
.is_none()
}
}
impl cuprate_consensus_rules::batch_verifier::BatchVerifier for &'_ MultiThreadedBatchVerifier {
fn queue_statement<R>(
&mut self,
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
) -> R {
let mut verifier = self
.internal
.get_or(|| RefCell::new(InternalBatchVerifier::new(32)))
.borrow_mut();
stmt(verifier.deref_mut())
}
}

View file

@ -7,27 +7,91 @@ use std::{
task::{Context, Poll},
};
use cuprate_helper::asynch::rayon_spawn_async;
use futures::FutureExt;
use monero_serai::{block::Block, transaction::Input};
use monero_serai::{
block::Block,
transaction::{Input, Transaction},
};
use rayon::prelude::*;
use tower::{Service, ServiceExt};
use tracing::instrument;
use cuprate_consensus_rules::{
blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX},
blocks::{
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height,
randomx_seed_height, BlockError, RandomX,
},
miner_tx::MinerTxError,
ConsensusError, HardFork,
};
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::{
context::{BlockChainContextRequest, BlockChainContextResponse},
context::{
rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse,
RawBlockChainContext,
},
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
Database, ExtendedConsensusError,
};
/// A pre-prepared block with all data needed to verify it, except the block's proof of work.
#[derive(Debug)]
pub struct PreparedBlockExPow {
/// The block.
pub block: Block,
/// The serialised block's bytes.
pub block_blob: Vec<u8>,
/// The block's hard-fork vote.
pub hf_vote: HardFork,
/// The block's hard-fork version.
pub hf_version: HardFork,
/// The block's hash.
pub block_hash: [u8; 32],
/// The height of the block.
pub height: u64,
/// The weight of the block's miner transaction.
pub miner_tx_weight: usize,
}
impl PreparedBlockExPow {
/// Prepare a new block.
///
/// # Errors
/// This errors if either the `block`'s:
/// - Hard-fork values are invalid
/// - Miner transaction is missing a miner input
pub fn new(block: Block) -> Result<PreparedBlockExPow, ConsensusError> {
let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PreparedBlockExPow {
block_blob: block.serialize(),
hf_vote,
hf_version,
block_hash: block.hash(),
height: *height,
miner_tx_weight: block.miner_tx.weight(),
block,
})
}
}
/// A pre-prepared block with all data needed to verify it.
#[derive(Debug)]
pub struct PrePreparedBlock {
pub struct PreparedBlock {
/// The block
pub block: Block,
/// The serialised blocks bytes
@ -47,15 +111,15 @@ pub struct PrePreparedBlock {
pub miner_tx_weight: usize,
}
impl PrePreparedBlock {
/// Creates a new [`PrePreparedBlock`].
impl PreparedBlock {
/// Creates a new [`PreparedBlock`].
///
/// The randomX VM must be Some if RX is needed or this will panic.
/// The randomX VM must also be initialised with the correct seed.
fn new<R: RandomX>(
block: Block,
randomx_vm: Option<&R>,
) -> Result<PrePreparedBlock, ConsensusError> {
) -> Result<PreparedBlock, ConsensusError> {
let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
@ -65,7 +129,7 @@ impl PrePreparedBlock {
)))?
};
Ok(PrePreparedBlock {
Ok(PreparedBlock {
block_blob: block.serialize(),
hf_vote,
hf_version,
@ -82,6 +146,36 @@ impl PrePreparedBlock {
block,
})
}
/// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`].
///
/// This function will give an invalid PoW hash if `randomx_vm` is not initialised
/// with the correct seed.
///
/// # Panics
/// This function will panic if `randomx_vm` is
/// [`None`] even though RandomX is needed.
fn new_prepped<R: RandomX>(
block: PreparedBlockExPow,
randomx_vm: Option<&R>,
) -> Result<PreparedBlock, ConsensusError> {
Ok(PreparedBlock {
block_blob: block.block_blob,
hf_vote: block.hf_vote,
hf_version: block.hf_version,
block_hash: block.block_hash,
pow_hash: calculate_pow_hash(
randomx_vm,
&block.block.serialize_hashable(),
block.height,
&block.hf_version,
)?,
miner_tx_weight: block.block.miner_tx.weight(),
block: block.block,
})
}
}
/// A request to verify a block.
@ -91,12 +185,28 @@ pub enum VerifyBlockRequest {
block: Block,
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
},
/// Verifies a prepared block.
MainChainPrepped {
/// The already prepared block.
block: PreparedBlock,
/// The full list of transactions for this block, in the order given in `block`.
txs: Vec<Arc<TransactionVerificationData>>,
},
/// Batch prepares a list of blocks and transactions for verification.
MainChainBatchPrepareBlocks {
/// The list of blocks and their transactions (not necessarily in the order given in the block).
blocks: Vec<(Block, Vec<Transaction>)>,
},
}
/// A response from a verify block request.
#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`])
pub enum VerifyBlockResponse {
/// This block is valid.
MainChain(VerifiedBlockInformation),
/// A list of prepared blocks for verification, you should call [`VerifyBlockRequest::MainChainPrepped`] on each of the returned
/// blocks to fully verify them.
MainChainBatchPrepped(Vec<(PreparedBlock, Vec<Arc<TransactionVerificationData>>)>),
}
/// The block verifier service.
@ -178,17 +288,188 @@ where
} => {
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
}
VerifyBlockRequest::MainChainBatchPrepareBlocks { blocks } => {
batch_prepare_main_chain_block(blocks, context_svc).await
}
VerifyBlockRequest::MainChainPrepped { block, txs } => {
verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None)
.await
}
}
}
.boxed()
}
}
/// Batch prepares a list of blocks for verification.
#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))]
async fn batch_prepare_main_chain_block<C>(
blocks: Vec<(Block, Vec<Transaction>)>,
mut context_svc: C,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
{
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
tracing::debug!("Calculating block hashes.");
let blocks: Vec<PreparedBlockExPow> = rayon_spawn_async(|| {
blocks
.into_iter()
.map(PreparedBlockExPow::new)
.collect::<Result<Vec<_>, _>>()
})
.await?;
// A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block.
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
let mut new_rx_vm = None;
tracing::debug!("Checking blocks follow each other.");
// For every block make sure they have the correct height and previous ID
for window in blocks.windows(2) {
let block_0 = &window[0];
let block_1 = &window[1];
if block_0.block_hash != block_1.block.header.previous
|| block_0.height != block_1.height - 1
{
tracing::debug!("Blocks do not follow each other, verification failed.");
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
// Cache any potential RX VM seeds as we may need them for future blocks in the batch.
if is_randomx_seed_height(block_0.height) {
new_rx_vm = Some((block_0.height, block_0.block_hash));
}
timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version))
}
// Get the current blockchain context.
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
// Calculate the expected difficulties for each block in the batch.
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::BatchGetDifficulties(
timestamps_hfs,
))
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
// Make sure the blocks follow the main chain.
if context.chain_height != blocks[0].height {
tracing::debug!("Blocks do not follow main chain, verification failed.");
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputsHeightIncorrect,
)))?;
}
if context.top_hash != blocks[0].block.header.previous {
tracing::debug!("Blocks do not follow main chain, verification failed.");
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
let mut rx_vms = context.rx_vms;
// If we have a RX seed in the batch calculate it.
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
tracing::debug!("New randomX seed in batch, initialising VM");
let new_vm = rayon_spawn_async(move || {
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
})
.await;
context_svc
.ready()
.await?
.call(BlockChainContextRequest::NewRXVM((
new_vm_seed,
new_vm.clone(),
)))
.await
.map_err(Into::<ExtendedConsensusError>::into)?;
rx_vms.insert(new_vm_height, new_vm);
}
tracing::debug!("Calculating PoW and prepping transaction");
let blocks = rayon_spawn_async(move || {
blocks
.into_par_iter()
.zip(difficulties)
.zip(txs)
.map(|((block, difficultly), txs)| {
// Calculate the PoW for the block.
let height = block.height;
let block = PreparedBlock::new_prepped(
block,
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
)?;
// Check the PoW
check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?;
// Now setup the txs.
let mut txs = txs
.into_par_iter()
.map(|tx| {
let tx = TransactionVerificationData::new(tx)?;
Ok::<_, ConsensusError>((tx.tx_hash, tx))
})
.collect::<Result<HashMap<_, _>, _>>()?;
// Order the txs correctly.
let mut ordered_txs = Vec::with_capacity(txs.len());
for tx_hash in &block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
}
Ok((block, ordered_txs))
})
.collect::<Result<Vec<_>, ExtendedConsensusError>>()
})
.await?;
Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks))
}
/// Verifies a prepared block.
async fn verify_main_chain_block<C, TxV>(
block: Block,
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
context_svc: C,
mut context_svc: C,
tx_verifier_svc: TxV,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
@ -201,8 +482,85 @@ where
C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{
tracing::debug!("getting blockchain context");
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
tracing::debug!(
"Preparing block for verification, expected height: {}",
context.chain_height
);
// Set up the block and just pass it to [`verify_prepped_main_chain_block`]
let rx_vms = context.rx_vms.clone();
let height = context.chain_height;
let prepped_block = rayon_spawn_async(move || {
PreparedBlock::new(
block,
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
)
})
.await?;
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
.map_err(ConsensusError::Block)?;
// Check that the txs included are what we need and that there are not any extra.
let mut ordered_txs = Vec::with_capacity(txs.len());
tracing::debug!("Ordering transactions for block.");
if !prepped_block.block.txs.is_empty() {
for tx_hash in &prepped_block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
}
drop(txs);
}
verify_prepped_main_chain_block(
prepped_block,
ordered_txs,
context_svc,
tx_verifier_svc,
Some(context),
)
.await
}
async fn verify_prepped_main_chain_block<C, TxV>(
prepped_block: PreparedBlock,
txs: Vec<Arc<TransactionVerificationData>>,
context_svc: C,
tx_verifier_svc: TxV,
cached_context: Option<RawBlockChainContext>,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{
let context = if let Some(context) = cached_context {
context
} else {
let BlockChainContextResponse::Context(checked_context) = context_svc
.oneshot(BlockChainContextRequest::GetContext)
.await
@ -212,52 +570,42 @@ where
};
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
tracing::debug!("got blockchain context: {context:?}");
let rx_vms = context.rx_vms.clone();
let height = context.chain_height;
let prepped_block = rayon_spawn_async(move || {
PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref))
})
.await?;
context
};
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
.map_err(ConsensusError::Block)?;
// Check that the txs included are what we need and that there are not any extra.
let mut ordered_txs = Vec::with_capacity(txs.len());
tracing::debug!("Checking we have correct transactions for block.");
for tx_hash in &prepped_block.block.txs {
let tx = txs
.remove(tx_hash)
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
ordered_txs.push(Arc::new(tx));
if prepped_block.block.txs.len() != txs.len() {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
drop(txs);
tracing::debug!("Verifying transactions for block.");
if !prepped_block.block.txs.is_empty() {
for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) {
if expected_tx_hash != &tx.tx_hash {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
}
}
tx_verifier_svc
.oneshot(VerifyTxRequest::Prepped {
txs: ordered_txs.clone(),
txs: txs.clone(),
current_chain_height: context.chain_height,
top_hash: context.top_hash,
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
hf: context.current_hf,
})
.await?;
}
let block_weight =
prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::<u64>();
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
tracing::debug!("Verifying block header.");
let (_, generated_coins) = check_block(
@ -273,7 +621,7 @@ where
block_hash: prepped_block.block_hash,
block: prepped_block.block,
block_blob: prepped_block.block_blob,
txs: ordered_txs
txs: txs
.into_iter()
.map(|tx| {
// Note: it would be possible for the transaction verification service to hold onto the tx after the call

View file

@ -484,26 +484,23 @@ where
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
rayon_spawn_async(move || {
let batch_veriifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
txs.par_iter()
.zip(txs_ring_member_info.par_iter())
.try_for_each(|((tx, verification_needed), ring)| {
// do semantic validation if needed.
if *verification_needed == VerificationNeeded::SemanticAndContextual {
batch_veriifier.queue_statement(|verifier| {
let fee = check_transaction_semantic(
&tx.tx,
tx.tx_blob.len(),
tx.tx_weight,
&tx.tx_hash,
&hf,
verifier,
&batch_verifier,
)?;
// make sure monero-serai calculated the same fee.
assert_eq!(fee, tx.fee);
Ok(())
})?;
}
// Both variants of `VerificationNeeded` require contextual validation.
@ -518,7 +515,7 @@ where
Ok::<_, ConsensusError>(())
})?;
if !batch_veriifier.verify() {
if !batch_verifier.verify() {
return Err(ExtendedConsensusError::OneOrMoreBatchVerificationStatementsInvalid);
}