mirror of
https://github.com/hinto-janai/cuprate.git
synced 2025-03-30 19:08:47 +00:00
Merge branch 'main' into other
This commit is contained in:
commit
366c4a3ea4
131 changed files with 2738 additions and 898 deletions
.github
.gitignoreCargo.lockCargo.tomlREADME.mdbinaries/cuprated
Cargo.toml
config
src
blockchain.rs
blockchain
commands.rsconfig.rsconfig
constants.rskillswitch.rslogging.rsmain.rsrpc/service
txpool
books
architecture/src/resources/cap
user
consensus
cryptonight/src
deny.tomlhelper/src
misc
net
p2p
dandelion-tower/src
p2p-core/src
3
.github/actions/monerod-download/action.yml
vendored
3
.github/actions/monerod-download/action.yml
vendored
|
@ -29,7 +29,7 @@ runs:
|
|||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
monerod
|
||||
|
@ -48,6 +48,7 @@ runs:
|
|||
"Windows X86") FILE=monero-win-x86-${{ inputs.version }}.zip ;;
|
||||
"Linux X64") FILE=monero-linux-x64-${{ inputs.version }}.tar.bz2 ;;
|
||||
"Linux X86") FILE=monero-linux-x86-${{ inputs.version }}.tar.bz2 ;;
|
||||
"Linux ARM64") FILE=monero-linux-armv8-${{ inputs.version }}.tar.bz2 ;;
|
||||
"macOS X64") FILE=monero-mac-x64-${{ inputs.version }}.tar.bz2 ;;
|
||||
"macOS ARM64") FILE=monero-mac-armv8-${{ inputs.version }}.tar.bz2 ;;
|
||||
*) exit 1 ;;
|
||||
|
|
65
.github/workflows/ci.yml
vendored
65
.github/workflows/ci.yml
vendored
|
@ -84,23 +84,13 @@ jobs:
|
|||
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-latest, ubuntu-latest]
|
||||
include:
|
||||
- os: windows-latest
|
||||
shell: msys2 {0}
|
||||
# GNU Windows is used as we need
|
||||
# `unistd.h` and more in `cryptonight/`.
|
||||
rust: stable-x86_64-pc-windows-gnu
|
||||
- os: macos-latest
|
||||
shell: bash
|
||||
rust: stable
|
||||
- os: ubuntu-latest
|
||||
shell: bash
|
||||
rust: stable
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
os: [
|
||||
windows-2022, # EOL = 2026-10-13 | <https://endoflife.date/windows-server>
|
||||
macos-15, # EOL = 2027-09-16 | <https://endoflife.date/macos>
|
||||
macos-13, # EOL = 2025-10-24 | For x64.
|
||||
ubuntu-22.04, # EOL = 2027-04-01 | <https://endoflife.date/ubuntu>
|
||||
ubuntu-22.04-arm,
|
||||
]
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
@ -111,7 +101,7 @@ jobs:
|
|||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
toolchain: stable
|
||||
components: clippy
|
||||
|
||||
- name: Cache
|
||||
|
@ -123,37 +113,9 @@ jobs:
|
|||
- name: Download monerod
|
||||
uses: ./.github/actions/monerod-download
|
||||
|
||||
# Packages other than `Boost` used by `Monero` are listed here.
|
||||
# https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71
|
||||
|
||||
- name: Install dependencies (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: sudo apt install -y libboost-dev
|
||||
|
||||
- name: Install dependencies (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: HOMEBREW_NO_AUTO_UPDATE=1 brew install boost
|
||||
|
||||
- name: Install dependencies (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
update: true
|
||||
install: mingw-w64-x86_64-toolchain mingw-w64-x86_64-boost msys2-runtime-devel git mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja
|
||||
|
||||
# HACK: 2024-05-14
|
||||
# GCC 14+ fails to build `lmdb-master-sys` with no clear error message:
|
||||
# <https://github.com/Cuprate/cuprate/pull/127>
|
||||
#
|
||||
# - MSYS2 repos carry older versions of packages
|
||||
# - pacman lets us manually downgrade from package files
|
||||
# - Note that `gcc` requires `gcc-libs`
|
||||
- name: Downgrade to GCC 13.2 (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
wget https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
||||
pacman -U --noconfirm mingw-w64-x86_64-gcc-13.2.0-6-any.pkg.tar.zst mingw-w64-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
||||
if: matrix.os == 'windows-2022'
|
||||
uses: lukka/get-cmake@v3.31.6 # Needed for `randomx-rs`
|
||||
|
||||
- name: Documentation
|
||||
run: cargo doc --workspace --all-features --no-deps
|
||||
|
@ -167,11 +129,10 @@ jobs:
|
|||
cargo test --all-features --workspace
|
||||
cargo test --package cuprate-blockchain --no-default-features --features redb
|
||||
|
||||
- name: Build
|
||||
run: cargo build --all-features --all-targets --workspace
|
||||
|
||||
- name: Hack Check
|
||||
run: |
|
||||
cargo install cargo-hack --locked
|
||||
cargo hack --workspace check --feature-powerset --no-dev-deps
|
||||
|
||||
# TODO: upload binaries with `actions/upload-artifact@v3`
|
||||
- name: Build
|
||||
run: cargo build --all-features --all-targets --workspace
|
||||
|
|
112
.github/workflows/release.yml
vendored
Normal file
112
.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
name: Release Builds
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
commit:
|
||||
description: 'Commit to build'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
# Show colored output in CI.
|
||||
CARGO_TERM_COLOR: always
|
||||
# The folder used to store release files - this will be uploaded.
|
||||
ARCHIVE: "__ARCHIVE"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [
|
||||
windows-2022, # EOL = 2026-10-13 | <https://endoflife.date/windows-server>
|
||||
macos-15, # EOL = 2027-09-16 | <https://endoflife.date/macos>
|
||||
macos-13, # EOL = 2025-10-24 | For x64.
|
||||
ubuntu-22.04, # EOL = 2027-04-01 | <https://endoflife.date/ubuntu>
|
||||
ubuntu-22.04-arm,
|
||||
]
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
ref: ${{ inputs.commit }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Build
|
||||
run: cargo build --release --package cuprated
|
||||
|
||||
- name: Generate Archives
|
||||
run: |
|
||||
set -e -o pipefail # Exit on failures
|
||||
umask 0022 # 755 permissions
|
||||
export TZ=UTC # UTC timezone
|
||||
|
||||
# Reset archive directory in-case.
|
||||
rm -rf ${{ env.ARCHIVE }}
|
||||
mkdir -p ${{ env.ARCHIVE }}
|
||||
ARCHIVE=$(realpath ${{ env.ARCHIVE }})
|
||||
VERSION=$(grep version binaries/cuprated/Cargo.toml | grep -oE [0-9]+.[0-9]+.[0-9]+)
|
||||
|
||||
# All archives have these files.
|
||||
cp LICENSE-AGPL target/release/LICENSE
|
||||
cp binaries/cuprated/config/Cuprated.toml target/release/
|
||||
|
||||
OS=${{ matrix.os }}
|
||||
|
||||
# Generate archives for Linux.
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
# FIXME: <https://github.com/Cuprate/cuprate/issues/396>
|
||||
# cp binaries/cuprated/cuprated.service target/release/
|
||||
cd target/release
|
||||
|
||||
if [ "$OS" == "ubuntu-22.04" ]; then
|
||||
NAME="cuprated-${VERSION}-linux-x64.tar.gz"
|
||||
else
|
||||
NAME="cuprated-${VERSION}-linux-arm64.tar.gz"
|
||||
fi
|
||||
|
||||
# FIXME: #396
|
||||
# tar -czpf "$ARCHIVE/$NAME" cuprated LICENSE Cuprated.toml cuprated.service
|
||||
tar -czpf "$ARCHIVE/$NAME" cuprated LICENSE Cuprated.toml
|
||||
fi
|
||||
|
||||
# Generate archives for macOS.
|
||||
if [ "$RUNNER_OS" == "macOS" ]; then
|
||||
cd target/release
|
||||
|
||||
if [ "$OS" == "macos-15" ]; then
|
||||
NAME="cuprated-${VERSION}-macos-arm64.tar.gz"
|
||||
else
|
||||
NAME="cuprated-${VERSION}-macos-x64.tar.gz"
|
||||
fi
|
||||
|
||||
tar -czpf "$ARCHIVE/$NAME" cuprated LICENSE Cuprated.toml
|
||||
fi
|
||||
|
||||
# Generate archives for Windows.
|
||||
if [ "$RUNNER_OS" == "Windows" ]; then
|
||||
mv target/release/cuprated.exe target/release/
|
||||
cd target/release
|
||||
|
||||
NAME="cuprated-${VERSION}-windows-x64.zip"
|
||||
powershell Compress-Archive -LiteralPath cuprated.exe, LICENSE, Cuprated.toml -DestinationPath "$ARCHIVE/$NAME"
|
||||
fi
|
||||
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}
|
||||
compression-level: 0
|
||||
path: ${{ env.ARCHIVE }}/**
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -2,3 +2,4 @@ target/
|
|||
.vscode
|
||||
monerod
|
||||
books/*/book
|
||||
fast_sync_hashes.bin
|
||||
|
|
61
Cargo.lock
generated
61
Cargo.lock
generated
|
@ -354,9 +354,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.4"
|
||||
version = "1.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf"
|
||||
checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a"
|
||||
dependencies = [
|
||||
"shlex",
|
||||
]
|
||||
|
@ -609,6 +609,7 @@ dependencies = [
|
|||
"curve25519-dalek",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"indexmap",
|
||||
"monero-serai",
|
||||
"pretty_assertions",
|
||||
"proptest",
|
||||
|
@ -635,6 +636,7 @@ dependencies = [
|
|||
"futures",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"indexmap",
|
||||
"monero-serai",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
|
@ -682,6 +684,7 @@ dependencies = [
|
|||
"curve25519-dalek",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"indexmap",
|
||||
"monero-serai",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
|
@ -772,19 +775,20 @@ dependencies = [
|
|||
name = "cuprate-fast-sync"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"clap",
|
||||
"cuprate-blockchain",
|
||||
"cuprate-consensus",
|
||||
"cuprate-consensus-context",
|
||||
"cuprate-consensus-rules",
|
||||
"cuprate-helper",
|
||||
"cuprate-p2p",
|
||||
"cuprate-p2p-core",
|
||||
"cuprate-types",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"monero-serai",
|
||||
"sha3",
|
||||
"thiserror",
|
||||
"proptest",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
"tower 0.5.1",
|
||||
]
|
||||
|
||||
|
@ -1026,6 +1030,7 @@ dependencies = [
|
|||
"cuprate-hex",
|
||||
"curve25519-dalek",
|
||||
"hex-literal",
|
||||
"indexmap",
|
||||
"monero-serai",
|
||||
"pretty_assertions",
|
||||
"proptest",
|
||||
|
@ -1114,6 +1119,7 @@ dependencies = [
|
|||
"indexmap",
|
||||
"monero-address",
|
||||
"monero-serai",
|
||||
"nu-ansi-term",
|
||||
"paste",
|
||||
"pin-project",
|
||||
"rand",
|
||||
|
@ -1124,6 +1130,7 @@ dependencies = [
|
|||
"serde_bytes",
|
||||
"serde_json",
|
||||
"strum",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"thread_local",
|
||||
"tokio",
|
||||
|
@ -1168,7 +1175,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "dalek-ff-group"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"curve25519-dalek",
|
||||
|
@ -1333,7 +1340,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "flexible-transcript"
|
||||
version = "0.3.2"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"digest",
|
||||
|
@ -1855,6 +1862,7 @@ checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f"
|
|||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2038,7 +2046,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-address"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-io",
|
||||
|
@ -2051,7 +2059,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-borromean"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -2064,7 +2072,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-bulletproofs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -2079,7 +2087,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-clsag"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
|
@ -2099,7 +2107,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-generators"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
|
@ -2113,7 +2121,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-io"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"std-shims",
|
||||
|
@ -2122,7 +2130,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-mlsag"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -2136,7 +2144,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-primitives"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -2149,7 +2157,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-rpc"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"hex",
|
||||
|
@ -2165,7 +2173,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-serai"
|
||||
version = "0.1.4-alpha"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"hex-literal",
|
||||
|
@ -2183,7 +2191,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-simple-request-rpc"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"digest_auth",
|
||||
"hex",
|
||||
|
@ -2516,8 +2524,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "randomx-rs"
|
||||
version = "1.3.0"
|
||||
source = "git+https://github.com/Cuprate/randomx-rs.git?rev=0028464#002846452ed79b0b9568235a1a4100dcf2a5f9ba"
|
||||
version = "1.3.2"
|
||||
source = "git+https://github.com/Cuprate/randomx-rs.git?rev=e09955c#e09955cde78482203f125fbe6bcb81098048df98"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"libc",
|
||||
|
@ -2601,15 +2609,14 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
|||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.8"
|
||||
version = "0.17.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
|
||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom",
|
||||
"libc",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
@ -2880,7 +2887,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "simple-request"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
|
@ -2946,7 +2953,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
|||
[[package]]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6fdef6#e6fdef6d0b4481932ac9647796eb3fa56197ed66"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=e6ae8c2#e6ae8c2b1f9d791f35ea225032cc0a3f79dec99d"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.5",
|
||||
"spin",
|
||||
|
|
11
Cargo.toml
11
Cargo.toml
|
@ -125,11 +125,12 @@ futures = { version = "0.3", default-features = false }
|
|||
hex = { version = "0.4", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2", default-features = false }
|
||||
monero-address = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6", default-features = false }
|
||||
monero-address = { git = "https://github.com/Cuprate/serai.git", rev = "e6ae8c2", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "e6ae8c2", default-features = false }
|
||||
nu-ansi-term = { version = "0.46", default-features = false }
|
||||
paste = { version = "1", default-features = false }
|
||||
pin-project = { version = "1", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "e09955c", default-features = false }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
rand_distr = { version = "0.4", default-features = false }
|
||||
rayon = { version = "1", default-features = false }
|
||||
|
@ -149,8 +150,8 @@ tracing-subscriber = { version = "0.3", default-features = false }
|
|||
tracing = { version = "0.1", default-features = false }
|
||||
|
||||
## workspace.dev-dependencies
|
||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
|
||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6fdef6" }
|
||||
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6ae8c2" }
|
||||
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "e6ae8c2" }
|
||||
tempfile = { version = "3" }
|
||||
pretty_assertions = { version = "1" }
|
||||
proptest = { version = "1" }
|
||||
|
|
18
README.md
18
README.md
|
@ -12,7 +12,8 @@ _(work-in-progress)_
|
|||
## Contents
|
||||
|
||||
- [About](#about)
|
||||
- [Documentation](#documentation)
|
||||
- [Books](#books)
|
||||
- [Crates](#crates)
|
||||
- [Contributing](#contributing)
|
||||
- [Security](#security)
|
||||
- [License](#license)
|
||||
|
@ -30,14 +31,14 @@ TODO: add these sections someday.
|
|||
## About
|
||||
|
||||
Cuprate is an effort to create an alternative [Monero](https://getmonero.org) node implementation
|
||||
in [Rust](http://rust-lang.org).
|
||||
in [Rust](https://rust-lang.org).
|
||||
|
||||
It will be able to independently validate Monero consensus rules, providing a layer of security and redundancy for the
|
||||
Monero network.
|
||||
|
||||
<!-- TODO: add some details about what Cuprate is and is not, goals, status -->
|
||||
|
||||
## Documentation
|
||||
## Books
|
||||
|
||||
_Cuprate is currently a work-in-progress; documentation will be changing/unfinished._
|
||||
|
||||
|
@ -49,18 +50,21 @@ Cuprate maintains various documentation books:
|
|||
| [Monero's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol |
|
||||
| [Cuprate's user book](https://user.cuprate.org) | Practical user-guide for using `cuprated` |
|
||||
|
||||
For crate (library) documentation, see: https://doc.cuprate.org. This site holds documentation for Cuprate's crates and all dependencies. All Cuprate crates start with `cuprate_`, for example: [`cuprate_database`](https://doc.cuprate.org/cuprate_database).
|
||||
## Crates
|
||||
For a detailed list of all crates, see: <https://architecture.cuprate.org/appendix/crates.html>.
|
||||
|
||||
For crate (library) documentation, see: <https://doc.cuprate.org>. This site holds documentation for Cuprate's crates and all dependencies. All Cuprate crates start with `cuprate_`, for example: [`cuprate_database`](https://doc.cuprate.org/cuprate_database).
|
||||
|
||||
## Contributing
|
||||
|
||||
See [`CONTRIBUTING.md`](CONTRIBUTING.md).
|
||||
See [`CONTRIBUTING.md`](/CONTRIBUTING.md).
|
||||
|
||||
## Security
|
||||
|
||||
Cuprate has a responsible vulnerability disclosure policy, see [`SECURITY.md`](SECURITY.md).
|
||||
Cuprate has a responsible vulnerability disclosure policy, see [`SECURITY.md`](/SECURITY.md).
|
||||
|
||||
## License
|
||||
|
||||
The `binaries/` directory is licensed under AGPL-3.0, everything else is licensed under MIT.
|
||||
|
||||
See [`LICENSE`](LICENSE) for more details.
|
||||
See [`LICENSE`](/LICENSE) for more details.
|
||||
|
|
|
@ -60,6 +60,7 @@ hex-literal = { workspace = true }
|
|||
indexmap = { workspace = true }
|
||||
monero-address = { workspace = true }
|
||||
monero-serai = { workspace = true }
|
||||
nu-ansi-term = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
pin-project = { workspace = true }
|
||||
randomx-rs = { workspace = true }
|
||||
|
@ -81,5 +82,8 @@ tracing-appender = { workspace = true }
|
|||
tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] }
|
||||
tracing = { workspace = true, features = ["default"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -43,9 +43,9 @@ peer_save_period = { secs = 90, nanos = 0 }
|
|||
## The block downloader config.
|
||||
[p2p.block_downloader]
|
||||
## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes).
|
||||
buffer_bytes = 50_000_000
|
||||
buffer_bytes = 1_000_000_000
|
||||
## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes).
|
||||
in_progress_queue_bytes = 50_000_000
|
||||
in_progress_queue_bytes = 500_000_000
|
||||
## The target size of a batch of blocks (bytes), must not exceed 100MB.
|
||||
target_batch_bytes = 10_000_000
|
||||
## The amount of time between checking the pool of connected peers for free peers to download blocks.
|
1
binaries/cuprated/config/Cuprated.toml
Symbolic link
1
binaries/cuprated/config/Cuprated.toml
Symbolic link
|
@ -0,0 +1 @@
|
|||
0.0.1.toml
|
6
binaries/cuprated/config/README.md
Normal file
6
binaries/cuprated/config/README.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# `cuprated` configs
|
||||
This directory holds configuration files for all `cuprated` versions.
|
||||
|
||||
For example, `0.0.1.toml` is the config file for `cuprated v0.0.1`.
|
||||
|
||||
`Cuprated.toml` is a symlink to the latest config file.
|
|
@ -20,11 +20,13 @@ use cuprate_types::{
|
|||
use crate::constants::PANIC_CRITICAL_SERVICE_ERROR;
|
||||
|
||||
mod chain_service;
|
||||
mod fast_sync;
|
||||
pub mod interface;
|
||||
mod manager;
|
||||
mod syncer;
|
||||
mod types;
|
||||
|
||||
pub use fast_sync::set_fast_sync_hashes;
|
||||
pub use manager::init_blockchain_manager;
|
||||
pub use types::ConsensusBlockchainReadHandle;
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@ use futures::{future::BoxFuture, FutureExt, TryFutureExt};
|
|||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_fast_sync::validate_entries;
|
||||
use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse};
|
||||
use cuprate_p2p_core::NetworkZone;
|
||||
use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
||||
|
||||
/// That service that allows retrieving the chain state to give to the P2P crates, so we can figure out
|
||||
|
@ -14,8 +16,8 @@ use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse};
|
|||
#[derive(Clone)]
|
||||
pub struct ChainService(pub BlockchainReadHandle);
|
||||
|
||||
impl Service<ChainSvcRequest> for ChainService {
|
||||
type Response = ChainSvcResponse;
|
||||
impl<N: NetworkZone> Service<ChainSvcRequest<N>> for ChainService {
|
||||
type Response = ChainSvcResponse<N>;
|
||||
type Error = tower::BoxError;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
|
@ -23,7 +25,7 @@ impl Service<ChainSvcRequest> for ChainService {
|
|||
self.0.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: ChainSvcRequest) -> Self::Future {
|
||||
fn call(&mut self, req: ChainSvcRequest<N>) -> Self::Future {
|
||||
let map_res = |res: BlockchainResponse| match res {
|
||||
BlockchainResponse::CompactChainHistory {
|
||||
block_ids,
|
||||
|
@ -67,6 +69,18 @@ impl Service<ChainSvcRequest> for ChainService {
|
|||
})
|
||||
.map_err(Into::into)
|
||||
.boxed(),
|
||||
ChainSvcRequest::ValidateEntries(entries, start_height) => {
|
||||
let mut blockchain_read_handle = self.0.clone();
|
||||
|
||||
async move {
|
||||
let (valid, unknown) =
|
||||
validate_entries(entries, start_height, &mut blockchain_read_handle)
|
||||
.await?;
|
||||
|
||||
Ok(ChainSvcResponse::ValidateEntries { valid, unknown })
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
24
binaries/cuprated/src/blockchain/fast_sync.rs
Normal file
24
binaries/cuprated/src/blockchain/fast_sync.rs
Normal file
|
@ -0,0 +1,24 @@
|
|||
use std::slice;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
/// The hashes of the compiled in fast sync file.
|
||||
static FAST_SYNC_HASHES: &[[u8; 32]] = {
|
||||
let bytes = include_bytes!("./fast_sync/fast_sync_hashes.bin");
|
||||
|
||||
if bytes.len() % 32 == 0 {
|
||||
// SAFETY: The file byte length must be perfectly divisible by 32, checked above.
|
||||
unsafe { slice::from_raw_parts(bytes.as_ptr().cast::<[u8; 32]>(), bytes.len() / 32) }
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
};
|
||||
|
||||
/// Set the fast-sync hashes according to the provided values.
|
||||
pub fn set_fast_sync_hashes(fast_sync: bool, network: Network) {
|
||||
cuprate_fast_sync::set_fast_sync_hashes(if fast_sync && network == Network::Mainnet {
|
||||
FAST_SYNC_HASHES
|
||||
} else {
|
||||
&[]
|
||||
});
|
||||
}
|
BIN
binaries/cuprated/src/blockchain/fast_sync/fast_sync_hashes.bin
Normal file
BIN
binaries/cuprated/src/blockchain/fast_sync/fast_sync_hashes.bin
Normal file
Binary file not shown.
|
@ -2,7 +2,7 @@ use std::{collections::HashMap, sync::Arc};
|
|||
|
||||
use futures::StreamExt;
|
||||
use monero_serai::block::Block;
|
||||
use tokio::sync::{mpsc, oneshot, Notify};
|
||||
use tokio::sync::{mpsc, oneshot, Notify, OwnedSemaphorePermit};
|
||||
use tower::{BoxError, Service, ServiceExt};
|
||||
use tracing::error;
|
||||
|
||||
|
@ -33,6 +33,9 @@ use crate::{
|
|||
mod commands;
|
||||
mod handler;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use commands::{BlockchainManagerCommand, IncomingBlockOk};
|
||||
|
||||
/// Initialize the blockchain manager.
|
||||
|
@ -106,15 +109,17 @@ impl BlockchainManager {
|
|||
/// The [`BlockchainManager`] task.
|
||||
pub async fn run(
|
||||
mut self,
|
||||
mut block_batch_rx: mpsc::Receiver<BlockBatch>,
|
||||
mut block_batch_rx: mpsc::Receiver<(BlockBatch, Arc<OwnedSemaphorePermit>)>,
|
||||
mut command_rx: mpsc::Receiver<BlockchainManagerCommand>,
|
||||
) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(batch) = block_batch_rx.recv() => {
|
||||
Some((batch, permit)) = block_batch_rx.recv() => {
|
||||
self.handle_incoming_block_batch(
|
||||
batch,
|
||||
).await;
|
||||
|
||||
drop(permit);
|
||||
}
|
||||
Some(incoming_command) = command_rx.recv() => {
|
||||
self.handle_command(incoming_command).await;
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
//! The blockchain manager handler functions.
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{TryFutureExt, TryStreamExt};
|
||||
use monero_serai::{
|
||||
|
@ -6,10 +8,8 @@ use monero_serai::{
|
|||
transaction::{Input, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use std::ops::ControlFlow;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::{info, instrument};
|
||||
use tracing::{info, instrument, Span};
|
||||
|
||||
use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle};
|
||||
use cuprate_consensus::{
|
||||
|
@ -21,12 +21,13 @@ use cuprate_consensus::{
|
|||
BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError,
|
||||
};
|
||||
use cuprate_consensus_context::NewBlockData;
|
||||
use cuprate_fast_sync::{block_to_verified_block_information, fast_sync_stop_height};
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest};
|
||||
use cuprate_txpool::service::interface::TxpoolWriteRequest;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest},
|
||||
AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation,
|
||||
AltBlockInformation, Chain, HardFork, TransactionVerificationData, VerifiedBlockInformation,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -166,9 +167,17 @@ impl super::BlockchainManager {
|
|||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from or if the incoming batch contains no blocks.
|
||||
async fn handle_incoming_block_batch_main_chain(&mut self, batch: BlockBatch) {
|
||||
let Ok(prepped_blocks) =
|
||||
batch_prepare_main_chain_blocks(batch.blocks, &mut self.blockchain_context_service)
|
||||
.await
|
||||
if batch.blocks.last().unwrap().0.number().unwrap() < fast_sync_stop_height() {
|
||||
self.handle_incoming_block_batch_fast_sync(batch).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok((prepped_blocks, mut output_cache)) = batch_prepare_main_chain_blocks(
|
||||
batch.blocks,
|
||||
&mut self.blockchain_context_service,
|
||||
self.blockchain_read_handle.clone(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
batch.peer_handle.ban_peer(LONG_BAN);
|
||||
self.stop_current_block_downloader.notify_one();
|
||||
|
@ -181,6 +190,7 @@ impl super::BlockchainManager {
|
|||
txs,
|
||||
&mut self.blockchain_context_service,
|
||||
self.blockchain_read_handle.clone(),
|
||||
Some(&mut output_cache),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
|
@ -191,7 +201,32 @@ impl super::BlockchainManager {
|
|||
|
||||
self.add_valid_block_to_main_chain(verified_block).await;
|
||||
}
|
||||
info!("Successfully added block batch");
|
||||
info!(fast_sync = false, "Successfully added block batch");
|
||||
}
|
||||
|
||||
/// Handles an incoming block batch while we are under the fast sync height.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn handle_incoming_block_batch_fast_sync(&mut self, batch: BlockBatch) {
|
||||
let mut valid_blocks = Vec::with_capacity(batch.blocks.len());
|
||||
for (block, txs) in batch.blocks {
|
||||
let block = block_to_verified_block_information(
|
||||
block,
|
||||
txs,
|
||||
self.blockchain_context_service.blockchain_context(),
|
||||
);
|
||||
self.add_valid_block_to_blockchain_cache(&block).await;
|
||||
|
||||
valid_blocks.push(block);
|
||||
}
|
||||
|
||||
self.batch_add_valid_block_to_blockchain_database(valid_blocks)
|
||||
.await;
|
||||
|
||||
info!(fast_sync = true, "Successfully added block batch");
|
||||
}
|
||||
|
||||
/// Handles an incoming [`BlockBatch`] that does not follow the main-chain.
|
||||
|
@ -208,7 +243,6 @@ impl super::BlockchainManager {
|
|||
/// recover from.
|
||||
async fn handle_incoming_block_batch_alt_chain(&mut self, mut batch: BlockBatch) {
|
||||
// TODO: this needs testing (this whole section does but alt-blocks specifically).
|
||||
|
||||
let mut blocks = batch.blocks.into_iter();
|
||||
|
||||
while let Some((block, txs)) = blocks.next() {
|
||||
|
@ -244,6 +278,8 @@ impl super::BlockchainManager {
|
|||
Ok(AddAltBlock::Cached) => (),
|
||||
}
|
||||
}
|
||||
|
||||
info!(alt_chain = true, "Successfully added block batch");
|
||||
}
|
||||
|
||||
/// Handles an incoming alt [`Block`].
|
||||
|
@ -267,12 +303,29 @@ impl super::BlockchainManager {
|
|||
block: Block,
|
||||
prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||
) -> Result<AddAltBlock, anyhow::Error> {
|
||||
// Check if a block already exists.
|
||||
let BlockchainResponse::FindBlock(chain) = self
|
||||
.blockchain_read_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainReadRequest::FindBlock(block.hash()))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
match chain {
|
||||
Some((Chain::Alt(_), _)) => return Ok(AddAltBlock::Cached),
|
||||
Some((Chain::Main, _)) => anyhow::bail!("Alt block already in main chain"),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let alt_block_info =
|
||||
sanity_check_alt_block(block, prepared_txs, self.blockchain_context_service.clone())
|
||||
.await?;
|
||||
|
||||
// TODO: check in consensus crate if alt block with this hash already exists.
|
||||
|
||||
// If this alt chain has more cumulative difficulty, reorg.
|
||||
if alt_block_info.cumulative_difficulty
|
||||
> self
|
||||
|
@ -343,7 +396,7 @@ impl super::BlockchainManager {
|
|||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::PopBlocks(
|
||||
current_main_chain_height - split_height + 1,
|
||||
current_main_chain_height - split_height,
|
||||
))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
|
@ -356,7 +409,7 @@ impl super::BlockchainManager {
|
|||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::PopBlocks {
|
||||
numb_blocks: current_main_chain_height - split_height + 1,
|
||||
numb_blocks: current_main_chain_height - split_height,
|
||||
})
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
@ -404,6 +457,7 @@ impl super::BlockchainManager {
|
|||
prepped_txs,
|
||||
&mut self.blockchain_context_service,
|
||||
self.blockchain_read_handle.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -431,28 +485,14 @@ impl super::BlockchainManager {
|
|||
.iter()
|
||||
.flat_map(|tx| {
|
||||
tx.tx.prefix().inputs.iter().map(|input| match input {
|
||||
Input::ToKey { key_image, .. } => key_image.compress().0,
|
||||
Input::ToKey { key_image, .. } => key_image.0,
|
||||
Input::Gen(_) => unreachable!(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<[u8; 32]>>();
|
||||
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: verified_block.block_hash,
|
||||
height: verified_block.height,
|
||||
timestamp: verified_block.block.header.timestamp,
|
||||
weight: verified_block.weight,
|
||||
long_term_weight: verified_block.long_term_weight,
|
||||
generated_coins: verified_block.generated_coins,
|
||||
vote: HardFork::from_vote(verified_block.block.header.hardfork_signal),
|
||||
cumulative_difficulty: verified_block.cumulative_difficulty,
|
||||
}))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
self.add_valid_block_to_blockchain_cache(&verified_block)
|
||||
.await;
|
||||
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
|
@ -470,6 +510,55 @@ impl super::BlockchainManager {
|
|||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Adds a [`VerifiedBlockInformation`] to the blockchain context cache.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn add_valid_block_to_blockchain_cache(
|
||||
&mut self,
|
||||
verified_block: &VerifiedBlockInformation,
|
||||
) {
|
||||
self.blockchain_context_service
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: verified_block.block_hash,
|
||||
height: verified_block.height,
|
||||
timestamp: verified_block.block.header.timestamp,
|
||||
weight: verified_block.weight,
|
||||
long_term_weight: verified_block.long_term_weight,
|
||||
generated_coins: verified_block.generated_coins,
|
||||
vote: HardFork::from_vote(verified_block.block.header.hardfork_signal),
|
||||
cumulative_difficulty: verified_block.cumulative_difficulty,
|
||||
}))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
|
||||
/// Batch writes the [`VerifiedBlockInformation`]s to the database.
|
||||
///
|
||||
/// The blocks must be sequential.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if any internal service returns an unexpected error that we cannot
|
||||
/// recover from.
|
||||
async fn batch_add_valid_block_to_blockchain_database(
|
||||
&mut self,
|
||||
blocks: Vec<VerifiedBlockInformation>,
|
||||
) {
|
||||
self.blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::BatchWriteBlocks(blocks))
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
/// The result from successfully adding an alt-block.
|
||||
|
|
204
binaries/cuprated/src/blockchain/manager/tests.rs
Normal file
204
binaries/cuprated/src/blockchain/manager/tests.rs
Normal file
|
@ -0,0 +1,204 @@
|
|||
use std::{collections::HashMap, env::temp_dir, path::PathBuf, sync::Arc};
|
||||
|
||||
use monero_serai::{
|
||||
block::{Block, BlockHeader},
|
||||
transaction::{Input, Output, Timelock, Transaction, TransactionPrefix},
|
||||
};
|
||||
use tokio::sync::{oneshot, watch};
|
||||
use tower::BoxError;
|
||||
|
||||
use cuprate_consensus_context::{BlockchainContext, ContextConfig};
|
||||
use cuprate_consensus_rules::{hard_forks::HFInfo, miner_tx::calculate_block_reward, HFsInfo};
|
||||
use cuprate_helper::network::Network;
|
||||
use cuprate_p2p::BroadcastSvc;
|
||||
|
||||
use crate::blockchain::{
|
||||
check_add_genesis, manager::BlockchainManager, manager::BlockchainManagerCommand,
|
||||
ConsensusBlockchainReadHandle,
|
||||
};
|
||||
|
||||
async fn mock_manager(data_dir: PathBuf) -> BlockchainManager {
|
||||
let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
|
||||
.data_directory(data_dir.clone())
|
||||
.build();
|
||||
let txpool_config = cuprate_txpool::config::ConfigBuilder::new()
|
||||
.data_directory(data_dir)
|
||||
.build();
|
||||
|
||||
let (mut blockchain_read_handle, mut blockchain_write_handle, _) =
|
||||
cuprate_blockchain::service::init(blockchain_config).unwrap();
|
||||
let (txpool_read_handle, txpool_write_handle, _) =
|
||||
cuprate_txpool::service::init(txpool_config).unwrap();
|
||||
|
||||
check_add_genesis(
|
||||
&mut blockchain_read_handle,
|
||||
&mut blockchain_write_handle,
|
||||
Network::Mainnet,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut context_config = ContextConfig::main_net();
|
||||
context_config.difficulty_cfg.fixed_difficulty = Some(1);
|
||||
context_config.hard_fork_cfg.info = HFsInfo::new([HFInfo::new(0, 0); 16]);
|
||||
|
||||
let blockchain_read_handle =
|
||||
ConsensusBlockchainReadHandle::new(blockchain_read_handle, BoxError::from);
|
||||
|
||||
let blockchain_context_service = cuprate_consensus_context::initialize_blockchain_context(
|
||||
context_config,
|
||||
blockchain_read_handle.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
BlockchainManager {
|
||||
blockchain_write_handle,
|
||||
blockchain_read_handle,
|
||||
txpool_write_handle,
|
||||
blockchain_context_service,
|
||||
stop_current_block_downloader: Arc::new(Default::default()),
|
||||
broadcast_svc: BroadcastSvc::mock(),
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_block(context: &BlockchainContext) -> Block {
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
hardfork_version: 16,
|
||||
hardfork_signal: 16,
|
||||
timestamp: 1000,
|
||||
previous: context.top_hash,
|
||||
nonce: 0,
|
||||
},
|
||||
miner_transaction: Transaction::V2 {
|
||||
prefix: TransactionPrefix {
|
||||
additional_timelock: Timelock::Block(context.chain_height + 60),
|
||||
inputs: vec![Input::Gen(context.chain_height)],
|
||||
outputs: vec![Output {
|
||||
// we can set the block weight to 1 as the true value won't get us into the penalty zone.
|
||||
amount: Some(calculate_block_reward(
|
||||
1,
|
||||
context.median_weight_for_block_reward,
|
||||
context.already_generated_coins,
|
||||
context.current_hf,
|
||||
)),
|
||||
key: Default::default(),
|
||||
view_tag: Some(1),
|
||||
}],
|
||||
extra: rand::random::<[u8; 32]>().to_vec(),
|
||||
},
|
||||
proofs: None,
|
||||
},
|
||||
transactions: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn simple_reorg() {
|
||||
// create 2 managers
|
||||
let data_dir_1 = tempfile::tempdir().unwrap();
|
||||
let mut manager_1 = mock_manager(data_dir_1.path().to_path_buf()).await;
|
||||
|
||||
let data_dir_2 = tempfile::tempdir().unwrap();
|
||||
let mut manager_2 = mock_manager(data_dir_2.path().to_path_buf()).await;
|
||||
|
||||
// give both managers the same first non-genesis block
|
||||
let block_1 = generate_block(manager_1.blockchain_context_service.blockchain_context());
|
||||
|
||||
manager_1
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_1.clone(),
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
manager_2
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_1,
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
manager_1.blockchain_context_service.blockchain_context(),
|
||||
manager_2.blockchain_context_service.blockchain_context()
|
||||
);
|
||||
|
||||
// give managers different 2nd block
|
||||
let block_2a = generate_block(manager_1.blockchain_context_service.blockchain_context());
|
||||
let block_2b = generate_block(manager_2.blockchain_context_service.blockchain_context());
|
||||
|
||||
manager_1
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_2a,
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
manager_2
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_2b.clone(),
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
let manager_1_context = manager_1
|
||||
.blockchain_context_service
|
||||
.blockchain_context()
|
||||
.clone();
|
||||
assert_ne!(
|
||||
&manager_1_context,
|
||||
manager_2.blockchain_context_service.blockchain_context()
|
||||
);
|
||||
|
||||
// give manager 1 missing block
|
||||
|
||||
manager_1
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_2b,
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
// make sure this didn't change the context
|
||||
assert_eq!(
|
||||
&manager_1_context,
|
||||
manager_1.blockchain_context_service.blockchain_context()
|
||||
);
|
||||
|
||||
// give both managers new block (built of manager 2's chain)
|
||||
let block_3 = generate_block(manager_2.blockchain_context_service.blockchain_context());
|
||||
|
||||
manager_1
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_3.clone(),
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
manager_2
|
||||
.handle_command(BlockchainManagerCommand::AddBlock {
|
||||
block: block_3,
|
||||
prepped_txs: HashMap::new(),
|
||||
response_tx: oneshot::channel().0,
|
||||
})
|
||||
.await;
|
||||
|
||||
// make sure manager 1 reorged.
|
||||
assert_eq!(
|
||||
manager_1.blockchain_context_service.blockchain_context(),
|
||||
manager_2.blockchain_context_service.blockchain_context()
|
||||
);
|
||||
assert_eq!(
|
||||
manager_1
|
||||
.blockchain_context_service
|
||||
.blockchain_context()
|
||||
.chain_height,
|
||||
4
|
||||
);
|
||||
}
|
|
@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration};
|
|||
|
||||
use futures::StreamExt;
|
||||
use tokio::{
|
||||
sync::{mpsc, Notify},
|
||||
sync::{mpsc, Notify, OwnedSemaphorePermit, Semaphore},
|
||||
time::interval,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
@ -15,7 +15,7 @@ use cuprate_p2p::{
|
|||
block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse},
|
||||
NetworkInterface, PeerSetRequest, PeerSetResponse,
|
||||
};
|
||||
use cuprate_p2p_core::ClearNet;
|
||||
use cuprate_p2p_core::{ClearNet, NetworkZone};
|
||||
|
||||
const CHECK_SYNC_FREQUENCY: Duration = Duration::from_secs(30);
|
||||
|
||||
|
@ -30,17 +30,21 @@ pub enum SyncerError {
|
|||
|
||||
/// The syncer tasks that makes sure we are fully synchronised with our connected peers.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
pub async fn syncer<CN>(
|
||||
mut context_svc: BlockchainContextService,
|
||||
our_chain: CN,
|
||||
mut clearnet_interface: NetworkInterface<ClearNet>,
|
||||
incoming_block_batch_tx: mpsc::Sender<BlockBatch>,
|
||||
incoming_block_batch_tx: mpsc::Sender<(BlockBatch, Arc<OwnedSemaphorePermit>)>,
|
||||
stop_current_block_downloader: Arc<Notify>,
|
||||
block_downloader_config: BlockDownloaderConfig,
|
||||
) -> Result<(), SyncerError>
|
||||
where
|
||||
CN: Service<ChainSvcRequest, Response = ChainSvcResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
CN: Service<
|
||||
ChainSvcRequest<ClearNet>,
|
||||
Response = ChainSvcResponse<ClearNet>,
|
||||
Error = tower::BoxError,
|
||||
> + Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
CN::Future: Send + 'static,
|
||||
|
@ -51,6 +55,9 @@ where
|
|||
|
||||
tracing::debug!("Waiting for new sync info in top sync channel");
|
||||
|
||||
let semaphore = Arc::new(Semaphore::new(1));
|
||||
|
||||
let mut sync_permit = Arc::new(Arc::clone(&semaphore).acquire_owned().await.unwrap());
|
||||
loop {
|
||||
check_sync_interval.tick().await;
|
||||
|
||||
|
@ -72,10 +79,19 @@ where
|
|||
tokio::select! {
|
||||
() = stop_current_block_downloader.notified() => {
|
||||
tracing::info!("Received stop signal, stopping block downloader");
|
||||
|
||||
drop(sync_permit);
|
||||
sync_permit = Arc::new(Arc::clone(&semaphore).acquire_owned().await.unwrap());
|
||||
|
||||
break;
|
||||
}
|
||||
batch = block_batch_stream.next() => {
|
||||
let Some(batch) = batch else {
|
||||
// Wait for all references to the permit have been dropped (which means all blocks in the queue
|
||||
// have been handled before checking if we are synced.
|
||||
drop(sync_permit);
|
||||
sync_permit = Arc::new(Arc::clone(&semaphore).acquire_owned().await.unwrap());
|
||||
|
||||
let blockchain_context = context_svc.blockchain_context();
|
||||
|
||||
if !check_behind_peers(blockchain_context, &mut clearnet_interface).await? {
|
||||
|
@ -86,7 +102,7 @@ where
|
|||
};
|
||||
|
||||
tracing::debug!("Got batch, len: {}", batch.blocks.len());
|
||||
if incoming_block_batch_tx.send(batch).await.is_err() {
|
||||
if incoming_block_batch_tx.send((batch, Arc::clone(&sync_permit))).await.is_err() {
|
||||
return Err(SyncerError::IncomingBlockChannelClosed);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,9 @@ pub enum Command {
|
|||
|
||||
/// Print status information on `cuprated`.
|
||||
Status,
|
||||
|
||||
/// Print the height of first block not contained in the fast sync hashes.
|
||||
FastSyncStopHeight,
|
||||
}
|
||||
|
||||
/// The log output target.
|
||||
|
@ -123,6 +126,11 @@ pub async fn io_loop(
|
|||
|
||||
println!("STATUS:\n uptime: {h}h {m}m {s}s,\n height: {height},\n top_hash: {top_hash}");
|
||||
}
|
||||
Command::FastSyncStopHeight => {
|
||||
let stop_height = cuprate_fast_sync::fast_sync_stop_height();
|
||||
|
||||
println!("{stop_height}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,11 @@ use cuprate_helper::{
|
|||
use cuprate_p2p::block_downloader::BlockDownloaderConfig;
|
||||
use cuprate_p2p_core::{ClearNet, ClearNetServerCfg};
|
||||
|
||||
use crate::{
|
||||
constants::{DEFAULT_CONFIG_STARTUP_DELAY, DEFAULT_CONFIG_WARNING},
|
||||
logging::eprintln_red,
|
||||
};
|
||||
|
||||
mod args;
|
||||
mod fs;
|
||||
mod p2p;
|
||||
|
@ -42,7 +47,7 @@ pub fn read_config_and_args() -> Config {
|
|||
match Config::read_from_path(config_file) {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to read config from file: {e}");
|
||||
eprintln_red(&format!("Failed to read config from file: {e}"));
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -60,7 +65,10 @@ pub fn read_config_and_args() -> Config {
|
|||
})
|
||||
.inspect_err(|e| {
|
||||
tracing::debug!("Failed to read config from config dir: {e}");
|
||||
eprintln!("Failed to find/read config file, using default config.");
|
||||
if !args.skip_config_warning {
|
||||
eprintln_red(DEFAULT_CONFIG_WARNING);
|
||||
std::thread::sleep(DEFAULT_CONFIG_STARTUP_DELAY);
|
||||
}
|
||||
})
|
||||
.unwrap_or_default()
|
||||
};
|
||||
|
@ -75,6 +83,8 @@ pub struct Config {
|
|||
/// The network we should run on.
|
||||
network: Network,
|
||||
|
||||
pub no_fast_sync: bool,
|
||||
|
||||
/// [`tracing`] config.
|
||||
pub tracing: TracingConfig,
|
||||
|
||||
|
@ -101,13 +111,14 @@ impl Config {
|
|||
let file_text = read_to_string(file.as_ref())?;
|
||||
|
||||
Ok(toml::from_str(&file_text)
|
||||
.inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy()))
|
||||
.inspect(|_| println!("Using config at: {}", file.as_ref().to_string_lossy()))
|
||||
.inspect_err(|e| {
|
||||
eprintln!("{e}");
|
||||
eprintln!(
|
||||
eprintln_red(&format!(
|
||||
"Failed to parse config file at: {}",
|
||||
file.as_ref().to_string_lossy()
|
||||
);
|
||||
));
|
||||
eprintln_red(&format!("{e}"));
|
||||
std::process::exit(1);
|
||||
})?)
|
||||
}
|
||||
|
||||
|
@ -177,3 +188,35 @@ impl Config {
|
|||
self.p2p.block_downloader.clone().into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use toml::from_str;
|
||||
|
||||
use crate::constants::EXAMPLE_CONFIG;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Tests the latest config is the `Default`.
|
||||
#[test]
|
||||
fn config_latest() {
|
||||
let config: Config = from_str(EXAMPLE_CONFIG).unwrap();
|
||||
assert_eq!(config, Config::default());
|
||||
}
|
||||
|
||||
/// Tests backwards compatibility.
|
||||
#[test]
|
||||
fn config_backwards_compat() {
|
||||
// (De)serialization tests.
|
||||
#[expect(
|
||||
clippy::single_element_loop,
|
||||
reason = "Remove after adding other versions"
|
||||
)]
|
||||
for version in ["0.0.1"] {
|
||||
let path = format!("config/{version}.toml");
|
||||
println!("Testing config serde backwards compat: {path}");
|
||||
let string = read_to_string(path).unwrap();
|
||||
from_str::<Config>(&string).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,13 @@ pub struct Args {
|
|||
)]
|
||||
pub network: Network,
|
||||
|
||||
/// Disable fast sync, all past blocks will undergo full verification when syncing.
|
||||
///
|
||||
/// This significantly increases initial sync time. This provides no extra security, you just
|
||||
/// have to trust the devs to insert the correct hashes (which are verifiable).
|
||||
#[arg(long)]
|
||||
no_fast_sync: bool,
|
||||
|
||||
/// The amount of outbound clear-net connections to maintain.
|
||||
#[arg(long)]
|
||||
pub outbound_connections: Option<usize>,
|
||||
|
@ -32,6 +39,10 @@ pub struct Args {
|
|||
#[arg(long)]
|
||||
pub generate_config: bool,
|
||||
|
||||
/// Stops the missing config warning and startup delay if a config file is missing.
|
||||
#[arg(long)]
|
||||
pub skip_config_warning: bool,
|
||||
|
||||
/// Print misc version information in JSON.
|
||||
#[arg(short, long)]
|
||||
pub version: bool,
|
||||
|
@ -60,6 +71,7 @@ impl Args {
|
|||
/// This may exit the program if a config value was set that requires an early exit.
|
||||
pub const fn apply_args(&self, mut config: Config) -> Config {
|
||||
config.network = self.network;
|
||||
config.no_fast_sync = config.no_fast_sync || self.no_fast_sync;
|
||||
|
||||
if let Some(outbound_connections) = self.outbound_connections {
|
||||
config.p2p.clear_net.general.outbound_connections = outbound_connections;
|
||||
|
|
|
@ -47,8 +47,8 @@ impl From<BlockDownloaderConfig> for cuprate_p2p::block_downloader::BlockDownloa
|
|||
impl Default for BlockDownloaderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer_bytes: 50_000_000,
|
||||
in_progress_queue_bytes: 50_000_000,
|
||||
buffer_bytes: 1_000_000_000,
|
||||
in_progress_queue_bytes: 500_000_000,
|
||||
check_client_pool_interval: Duration::from_secs(30),
|
||||
target_batch_bytes: 10_000_000,
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//! General constants used throughout `cuprated`.
|
||||
use std::time::Duration;
|
||||
|
||||
use const_format::formatcp;
|
||||
|
||||
|
@ -23,7 +24,17 @@ pub const VERSION_BUILD: &str = formatcp!("{VERSION}-{}", cuprate_constants::bui
|
|||
pub const PANIC_CRITICAL_SERVICE_ERROR: &str =
|
||||
"A service critical to Cuprate's function returned an unexpected error.";
|
||||
|
||||
pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml");
|
||||
pub const DEFAULT_CONFIG_WARNING: &str = formatcp!(
|
||||
"WARNING: no config file found, using default config.\
|
||||
\nThe default config may not be optimal for your setup, see the user book here: https://user.cuprate.org/.\
|
||||
\nPausing startup for {} seconds. \
|
||||
\nUse the `--skip-config-warning` arg to skip this delay if you really want to use the default.",
|
||||
DEFAULT_CONFIG_STARTUP_DELAY.as_secs()
|
||||
);
|
||||
|
||||
pub const DEFAULT_CONFIG_STARTUP_DELAY: Duration = Duration::from_secs(15);
|
||||
|
||||
pub const EXAMPLE_CONFIG: &str = include_str!("../config/Cuprated.toml");
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
@ -45,11 +56,4 @@ mod test {
|
|||
assert_eq!(VERSION_BUILD, "0.0.1-release");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_config_text_is_valid() {
|
||||
let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap();
|
||||
|
||||
assert_eq!(config, Config::default());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,19 +16,24 @@ use std::{process::exit, time::Duration};
|
|||
|
||||
use cuprate_helper::time::current_unix_timestamp;
|
||||
|
||||
/// Assert that this is not a v1 release and an alpha release.
|
||||
/// Assert that this is an alpha release.
|
||||
const _: () = {
|
||||
const_format::assertcp_ne!(
|
||||
crate::constants::MAJOR_VERSION,
|
||||
"1",
|
||||
"`cuprated` major version is 1, killswitch module should be deleted."
|
||||
);
|
||||
const_format::assertcp_ne!(
|
||||
crate::constants::MINOR_VERSION,
|
||||
"1",
|
||||
"`cuprated` minor version is 1, killswitch module should be deleted."
|
||||
);
|
||||
};
|
||||
|
||||
/// The killswitch activates if the current timestamp is ahead of this timestamp.
|
||||
///
|
||||
/// Sat Mar 01 2025 05:00:00 GMT+0000
|
||||
pub const KILLSWITCH_ACTIVATION_TIMESTAMP: u64 = 1740805200;
|
||||
/// Wed Apr 16 12:00:00 AM UTC 2025
|
||||
pub const KILLSWITCH_ACTIVATION_TIMESTAMP: u64 = 1744761600;
|
||||
|
||||
/// Check if the system clock is past a certain timestamp,
|
||||
/// if so, exit the entire program.
|
||||
|
@ -39,8 +44,8 @@ fn killswitch() {
|
|||
/// sanity checking the system's clock to make
|
||||
/// sure it is not overly behind.
|
||||
///
|
||||
/// Fri Jan 17 2025 14:19:10 GMT+0000
|
||||
const SYSTEM_CLOCK_SANITY_TIMESTAMP: u64 = 1737123550;
|
||||
/// Tue Mar 11 08:33:20 PM UTC 2025
|
||||
const SYSTEM_CLOCK_SANITY_TIMESTAMP: u64 = 1741725200;
|
||||
|
||||
let current_ts = current_unix_timestamp();
|
||||
|
||||
|
|
|
@ -140,3 +140,8 @@ pub fn modify_stdout_output(f: impl FnOnce(&mut CupratedTracingFilter)) {
|
|||
pub fn modify_file_output(f: impl FnOnce(&mut CupratedTracingFilter)) {
|
||||
FILE_WRITER_FILTER_HANDLE.get().unwrap().modify(f).unwrap();
|
||||
}
|
||||
|
||||
/// Prints some text using [`eprintln`], with [`nu_ansi_term::Color::Red`] applied.
|
||||
pub fn eprintln_red(s: &str) {
|
||||
eprintln!("{}", nu_ansi_term::Color::Red.bold().paint(s));
|
||||
}
|
||||
|
|
|
@ -16,17 +16,18 @@
|
|||
reason = "TODO: remove after v1.0.0"
|
||||
)]
|
||||
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::{mem, sync::Arc};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing::{info, level_filters::LevelFilter};
|
||||
use tracing_subscriber::{layer::SubscriberExt, reload::Handle, util::SubscriberInitExt, Registry};
|
||||
|
||||
use cuprate_consensus_context::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, BlockchainContextService,
|
||||
};
|
||||
use cuprate_helper::time::secs_to_hms;
|
||||
use cuprate_types::blockchain::BlockchainWriteRequest;
|
||||
|
||||
use crate::{
|
||||
config::Config, constants::PANIC_CRITICAL_SERVICE_ERROR, logging::CupratedTracingFilter,
|
||||
|
@ -54,6 +55,8 @@ fn main() {
|
|||
|
||||
let config = config::read_config_and_args();
|
||||
|
||||
blockchain::set_fast_sync_hashes(!config.no_fast_sync, config.network());
|
||||
|
||||
// Initialize logging.
|
||||
logging::init_logging(&config);
|
||||
|
||||
|
@ -81,6 +84,15 @@ fn main() {
|
|||
// Initialize async tasks.
|
||||
|
||||
rt.block_on(async move {
|
||||
// TODO: we could add an option for people to keep these like monerod?
|
||||
blockchain_write_handle
|
||||
.ready()
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR)
|
||||
.call(BlockchainWriteRequest::FlushAltBlocks)
|
||||
.await
|
||||
.expect(PANIC_CRITICAL_SERVICE_ERROR);
|
||||
|
||||
// Check add the genesis block to the blockchain.
|
||||
blockchain::check_add_genesis(
|
||||
&mut blockchain_read_handle,
|
||||
|
@ -129,13 +141,19 @@ fn main() {
|
|||
.await;
|
||||
|
||||
// Start the command listener.
|
||||
let (command_tx, command_rx) = mpsc::channel(1);
|
||||
std::thread::spawn(|| commands::command_listener(command_tx));
|
||||
if std::io::IsTerminal::is_terminal(&std::io::stdin()) {
|
||||
let (command_tx, command_rx) = mpsc::channel(1);
|
||||
std::thread::spawn(|| commands::command_listener(command_tx));
|
||||
|
||||
// Wait on the io_loop, spawned on a separate task as this improves performance.
|
||||
tokio::spawn(commands::io_loop(command_rx, context_svc))
|
||||
.await
|
||||
.unwrap();
|
||||
// Wait on the io_loop, spawned on a separate task as this improves performance.
|
||||
tokio::spawn(commands::io_loop(command_rx, context_svc))
|
||||
.await
|
||||
.unwrap();
|
||||
} else {
|
||||
// If no STDIN, await OS exit signal.
|
||||
info!("Terminal/TTY not detected, disabling STDIN commands");
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -6,10 +6,11 @@ use std::{
|
|||
};
|
||||
|
||||
use anyhow::Error;
|
||||
use cuprate_rpc_types::misc::GetOutputsOut;
|
||||
use indexmap::{IndexMap, IndexSet};
|
||||
use monero_serai::block::Block;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_rpc_types::misc::GetOutputsOut;
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
use cuprate_types::{
|
||||
|
@ -219,8 +220,8 @@ pub async fn generated_coins(
|
|||
/// [`BlockchainReadRequest::Outputs`]
|
||||
pub async fn outputs(
|
||||
blockchain_read: &mut BlockchainReadHandle,
|
||||
outputs: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, Error> {
|
||||
outputs: IndexMap<u64, IndexSet<u64>>,
|
||||
) -> Result<OutputCache, Error> {
|
||||
let BlockchainResponse::Outputs(outputs) = blockchain_read
|
||||
.ready()
|
||||
.await?
|
||||
|
|
|
@ -12,7 +12,7 @@ use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer};
|
|||
use cuprate_p2p::{ClientDropGuard, NetworkInterface, PeerSetRequest, PeerSetResponse};
|
||||
use cuprate_p2p_core::{
|
||||
client::{Client, InternalPeerID},
|
||||
ClearNet, NetworkZone, PeerRequest, ProtocolRequest,
|
||||
BroadcastMessage, ClearNet, NetworkZone, PeerRequest, ProtocolRequest,
|
||||
};
|
||||
use cuprate_wire::protocol::NewTransactions;
|
||||
|
||||
|
@ -91,17 +91,16 @@ impl<N: NetworkZone> Service<StemRequest<DandelionTx>> for StemPeerService<N> {
|
|||
type Future = <Client<N> as Service<PeerRequest>>::Future;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
self.0.broadcast_client().poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: StemRequest<DandelionTx>) -> Self::Future {
|
||||
self.0
|
||||
.call(PeerRequest::Protocol(ProtocolRequest::NewTransactions(
|
||||
NewTransactions {
|
||||
txs: vec![req.0 .0],
|
||||
dandelionpp_fluff: false,
|
||||
padding: Bytes::new(),
|
||||
},
|
||||
)))
|
||||
.broadcast_client()
|
||||
.call(BroadcastMessage::NewTransactions(NewTransactions {
|
||||
txs: vec![req.0 .0],
|
||||
dandelionpp_fluff: false,
|
||||
padding: Bytes::new(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,6 +172,7 @@ async fn handle_incoming_txs(
|
|||
context.current_adjusted_timestamp_for_time_lock(),
|
||||
context.current_hf,
|
||||
blockchain_read_handle,
|
||||
None,
|
||||
)
|
||||
.verify()
|
||||
.await
|
||||
|
|
|
@ -8,7 +8,7 @@ There are "synchronization primitives" that help with this, common ones being:
|
|||
- [Channels](https://en.wikipedia.org/wiki/Channel_(programming))
|
||||
- [Atomics](https://en.wikipedia.org/wiki/Linearizability#Primitive_atomic_instructions)
|
||||
|
||||
These tools are relatively easy to use in isolation, but trickier to do so when considering the entire system. It is not uncommon for _the_ bottleneck to be the [poor orchastration](https://en.wikipedia.org/wiki/Starvation_(computer_science)) of these primitives.
|
||||
These tools are relatively easy to use in isolation, but trickier to do so when considering the entire system. It is not uncommon for _the_ bottleneck to be the [poor orchestration](https://en.wikipedia.org/wiki/Starvation_(computer_science)) of these primitives.
|
||||
|
||||
## Analogy
|
||||
A common analogy for a parallel system is an intersection.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
## Cuprate's user book
|
||||
This book is the end-user documentation for Cuprate, aka, "how to use `cuprated`".
|
||||
This book is the end-user documentation for Cuprate.
|
||||
|
||||
See:
|
||||
- <https://user.cuprate.org>
|
||||
|
|
|
@ -3,17 +3,10 @@ authors = ["hinto-janai"]
|
|||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Cuprate's user book"
|
||||
git-repository-url = "https://github.com/Cuprate/user-book"
|
||||
title = "Cuprate User Book - v0.0.1"
|
||||
git-repository-url = "https://github.com/Cuprate/cuprate/books/user"
|
||||
|
||||
# TODO: fix after importing real files.
|
||||
#
|
||||
# [preprocessor.last-changed]
|
||||
# command = "mdbook-last-changed"
|
||||
# renderer = ["html"]
|
||||
#
|
||||
# [output.html]
|
||||
# default-theme = "ayu"
|
||||
# preferred-dark-theme = "ayu"
|
||||
# git-repository-url = "https://github.com/hinto-janai/cuprate-user"
|
||||
# additional-css = ["last-changed.css"]
|
||||
[output.html]
|
||||
default-theme = "ayu"
|
||||
preferred-dark-theme = "ayu"
|
||||
no-section-label = true
|
||||
|
|
|
@ -1,3 +1,22 @@
|
|||
# Summary
|
||||
|
||||
- [TODO](todo.md)
|
||||
[Introduction](introduction.md)
|
||||
|
||||
<!-- TODO: add after reproducible builds - [Verifying](getting-started/verify.md) -->
|
||||
|
||||
- [Getting started](getting-started/intro.md)
|
||||
- [System requirements](getting-started/sys-req.md)
|
||||
- [Download](getting-started/download.md)
|
||||
- [Building from source](getting-started/source.md)
|
||||
- [Running](getting-started/run.md)
|
||||
|
||||
- [Configuration](config.md)
|
||||
- [Command line](cli.md)
|
||||
|
||||
- [Resources](resources/intro.md)
|
||||
- [Disk](resources/disk.md)
|
||||
- [Ports](resources/ports.md)
|
||||
- [IP](resources/ip.md)
|
||||
|
||||
- [Platform support](platform.md)
|
||||
- [License](license.md)
|
17
books/user/src/cli.md
Normal file
17
books/user/src/cli.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Command line
|
||||
|
||||
Command line options will override any overlapping [config](./config.md) values.
|
||||
|
||||
Usage: `cuprated [OPTIONS]`
|
||||
|
||||
<!-- TODO: automate the generation of the below table from `./cuprated --help` -->
|
||||
|
||||
| Option | Description | Default | Possible values |
|
||||
|--------|-------------|---------|-----------------|
|
||||
| `--network <NETWORK>` | The network to run on | `mainnet` | `mainnet`, `testnet`, `stagenet`
|
||||
| `--outbound-connections <OUTBOUND_CONNECTIONS>` | The amount of outbound clear-net connections to maintain | `64` |
|
||||
| `--config-file <CONFIG_FILE>` | The PATH of the `cuprated` config file | `Cuprated.toml` |
|
||||
| `--generate-config` | Generate a config file and print it to stdout | |
|
||||
| `--skip-config-warning` | Stops the missing config warning and startup delay if a config file is missing | |
|
||||
| `-v`, `--version` | Print misc version information in JSON | |
|
||||
| `-h`, `--help` | Print help | |
|
16
books/user/src/config.md
Normal file
16
books/user/src/config.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Configuration
|
||||
`cuprated` reads its configuration file `Cuprated.toml` on startup - this is in the [TOML](https://toml.io) file format.
|
||||
|
||||
`cuprated` will try to look for `Cuprated.toml` in the follow places, in order:
|
||||
- PATH specified in `--config-file`
|
||||
- Current directory: `./Cuprated.toml`
|
||||
- [OS specific directory](./resources/disk.md)
|
||||
|
||||
## `Cuprated.toml`
|
||||
This is the default configuration file `cuprated` creates and uses, sourced from [here](https://github.com/Cuprate/cuprate/blob/main/binaries/cuprated/config/Cuprated.toml).
|
||||
|
||||
If `cuprated` is started with no [`--options`](./cli.md), then the configuration used will be equivalent to this config file.
|
||||
|
||||
```toml
|
||||
{{#include ../../../binaries/cuprated/config/Cuprated.toml}}
|
||||
```
|
12
books/user/src/getting-started/download.md
Normal file
12
books/user/src/getting-started/download.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Download
|
||||
For convenience, Cuprate offers pre-built binaries for `cuprated` for the platforms listed in [`Platform support`](../platform.md) using GitHub CI in a non-reproducible way; it is highly recommended to build `cuprated` from source instead, see [`Building from source`](./source.md).
|
||||
|
||||
| Platform | Download |
|
||||
|------------------------------|----------|
|
||||
| Windows x86_64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-windows-x64.zip>
|
||||
| macOS x86_64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-macos-x64.tar.gz>
|
||||
| macOS ARM64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-macos-arm64.tar.gz>
|
||||
| Linux x86_64 (glibc >= 2.36) | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-linux-x64.tar.gz>
|
||||
| Linux ARM64 (glibc >= 2.36) | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-linux-arm64.tar.gz>
|
||||
|
||||
All release files are archived and also available at <https://archive.hinto.rs>.
|
2
books/user/src/getting-started/intro.md
Normal file
2
books/user/src/getting-started/intro.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Getting started
|
||||
This section contains information on downloading/building and running `cuprated`.
|
8
books/user/src/getting-started/run.md
Normal file
8
books/user/src/getting-started/run.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Running
|
||||
To run `cuprated`:
|
||||
|
||||
```bash
|
||||
./cuprated --config-file Cuprated.toml
|
||||
```
|
||||
|
||||
`cuprated` can be ran without a config file although it is recommended to use one; see [`Configuration`](../config.md) for a default config file.
|
60
books/user/src/getting-started/source.md
Normal file
60
books/user/src/getting-started/source.md
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Building from source
|
||||
To build `cuprated` from source you will need:
|
||||
|
||||
- `git`
|
||||
- Up-to-date Rust toolchain
|
||||
- Compiler toolchain
|
||||
- Certain system dependencies
|
||||
|
||||
To install Rust, follow [these instructions](https://www.rust-lang.org/learn/get-started) or run:
|
||||
```bash
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
```
|
||||
|
||||
<!-- TODO: Windows build instruction -->
|
||||
|
||||
## Linux
|
||||
Install the required system dependencies:
|
||||
|
||||
```bash
|
||||
# Debian/Ubuntu
|
||||
sudo apt install -y build-essentials cmake git
|
||||
|
||||
# Arch
|
||||
sudo pacman -Syu base-devel cmake git
|
||||
|
||||
# Fedora
|
||||
sudo dnf install @development-tools gcc gcc-c++ cmake git
|
||||
```
|
||||
|
||||
Clone the Cuprate repository and build:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Cuprate/cuprate
|
||||
cd cuprate/
|
||||
cargo build --release --package cuprated
|
||||
```
|
||||
|
||||
The built `cuprated` binary should be located at `target/release/cuprated`.
|
||||
|
||||
## macOS
|
||||
Install [Homebrew](https://brew.sh):
|
||||
|
||||
```bash
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
```
|
||||
|
||||
Install the required system dependencies:
|
||||
```bash
|
||||
brew install cmake
|
||||
```
|
||||
|
||||
Clone the Cuprate repository and build:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Cuprate/cuprate
|
||||
cd cuprate/
|
||||
cargo build --release --package cuprated
|
||||
```
|
||||
|
||||
The built `cuprated` binary should be located at `target/release/cuprated`.
|
17
books/user/src/getting-started/sys-req.md
Normal file
17
books/user/src/getting-started/sys-req.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
# System requirements
|
||||
|
||||
`cuprated` has the following hardware requirements.
|
||||
|
||||
| Hardware requirement | Recommended | Minimum |
|
||||
|----------------------|-------------------|---------|
|
||||
| CPU | 8+ cores | 1 core
|
||||
| RAM | 8+ GB | 2 GB
|
||||
| Disk (space) | 500+ GB | 300 GB
|
||||
| Disk (speed) | 100+ MB/s writes | ~5 MB/s writes
|
||||
| Network (speed) | 30+ MB/s up/down | ~1 MB/s up/down
|
||||
| Network (bandwidth) | 1 TB+ per month | ~300 GB per month
|
||||
|
||||
Note that `cuprated`:
|
||||
- Benefits greatly from high-end hardware
|
||||
- Can run on weaker hardware (last tested on a `Raspberry Pi 4 (2GB RAM)` and `Raspberry Pi 5 (8 GB RAM)`)
|
||||
- Requires tweaking the config for your system, especially if you are at either extremes of the hardware scale
|
43
books/user/src/getting-started/verify.md
Normal file
43
books/user/src/getting-started/verify.md
Normal file
|
@ -0,0 +1,43 @@
|
|||
# Verifying
|
||||
Verification of release files is optional but highly recommended. This ensures that you have not downloaded a tampered version of `cuprated`.
|
||||
|
||||
To verify release files of `cuprated`, follow these instructions:
|
||||
|
||||
<!--
|
||||
TODO:
|
||||
add some pictures, make this process easier to understand in
|
||||
general e.g. similar to bitcoin/monero's verify section.
|
||||
-->
|
||||
|
||||
### Download verification files for latest release
|
||||
- Latest release: <https://github.com/Cuprate/cuprate/releases/latest>
|
||||
- Hashes: <https://github.com/Cuprate/cuprate/releases/download/v0.0.1/SHA256SUMS>
|
||||
- Hash signatures: <https://github.com/Cuprate/cuprate/releases/download/v0.0.1/SHA256SUMS.asc>
|
||||
|
||||
### Verify the hashes
|
||||
After downloading the release files, compare their hashes with the `SHA256SUMS` file.
|
||||
|
||||
```bash
|
||||
sha256sum --ignore-missing --check SHA256SUMS
|
||||
```
|
||||
|
||||
You should see something like: `cuprate-0.0.1-linux-x64.tar.gz: OK`.
|
||||
|
||||
### Verify the hash signatures
|
||||
Cuprate releases are signed by multiple individuals.
|
||||
|
||||
First, import the PGP keys for all individuals:
|
||||
```bash
|
||||
# Clone the Cuprate repository.
|
||||
git clone https://github.com/Cuprate/cuprate
|
||||
|
||||
# Import all PGP keys.
|
||||
gpg --import cuprate/misc/gpg_keys/*.asc
|
||||
```
|
||||
|
||||
Then, confirm all signatures:
|
||||
```bash
|
||||
gpg --verify SHA256SUMS.asc
|
||||
```
|
||||
|
||||
You should see `gpg: Good signature` for all keys.
|
1
books/user/src/images/CuprateLogo.svg
Symbolic link
1
books/user/src/images/CuprateLogo.svg
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../../misc/logo/CuprateLogo.svg
|
132
books/user/src/introduction.md
Normal file
132
books/user/src/introduction.md
Normal file
|
@ -0,0 +1,132 @@
|
|||
<div align="center">
|
||||
<img src="images/CuprateLogo.svg" width="50%"/>
|
||||
|
||||
[Cuprate](https://github.com/Cuprate/cuprate) is an alternative and independent [Monero](https://getmonero.org) node implementation that is focused on being fast, user-friendly, and backwards compatible with [`monerod`](https://github.com/monero-project/monero).
|
||||
|
||||
This project is currently a work-in-progress; the `cuprated` node can be ran by users although it is not yet ready for production. This book contains brief sections documenting `cuprated` usage, however, be aware that it is **incomplete** and missing sections.
|
||||
|
||||
To get started, see: [`Getting started`](./getting-started/intro.md).
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
# FAQ
|
||||
Frequently asked questions about Cuprate.
|
||||
|
||||
## Who?
|
||||
Cuprate was started by [SyntheticBird45](https://github.com/SyntheticBird45) in [early 2023](https://github.com/Cuprate/cuprate/commit/2c7cb27548c727550ce4684cb31d0eafcf852c8e) and was later joined by [boog900](https://github.com/boog900), [hinto-janai](https://github.com/hinto-janai), and [other contributors](https://github.com/Cuprate/cuprate/graphs/contributors).
|
||||
|
||||
A few Cuprate contributors are funded by Monero's [Community Crowdfunding System](https://ccs.getmonero.org) to work on Cuprate and occasionally `monerod`.
|
||||
|
||||
## What is `cuprated`?
|
||||
`monerod` is the [daemon](https://en.wikipedia.org/wiki/Daemon_(computing)) of the Monero project, the Monero node.
|
||||
|
||||
`cuprated` is the daemon of the Cuprate project, the Cuprate node.
|
||||
|
||||
Both operate on the same network, the Monero network, and are responsible for roughly the same tasks.
|
||||
|
||||
For more information on the role of alternative node implementations, see:
|
||||
- <https://clientdiversity.org>
|
||||
- <https://bchfaq.com/knowledge-base/what-are-the-full-node-implementations-for-bitcoin-cash>
|
||||
- <https://zfnd.org/zebra-stable-release>
|
||||
|
||||
## Does `cuprated` replace `monerod`?
|
||||
No.
|
||||
|
||||
`cuprated` cannot currently replace `monerod` in production environments. With that said, there will be practical performance benefits for users to use `cuprated` eventually.
|
||||
|
||||
## Is it safe to run `cuprated`?
|
||||
**⚠️ This project is still in development; do NOT use `cuprated` for any serious purposes ⚠️**
|
||||
|
||||
`cuprated` is fine to run for casual purposes and has a similar attack surface to other network connected services.
|
||||
|
||||
See [`Resources`](./resources/intro.md) for information on what system resources `cuprated` will use.
|
||||
|
||||
## What files does `cuprated` create?
|
||||
See [`Resources/Disk`](./resources/disk.md).
|
||||
|
||||
## What can `cuprated` currently do?
|
||||
Cuprate's node (`cuprated`) can currently:
|
||||
|
||||
- Sync the blockchain and transaction pool
|
||||
- Broadcast and relay blocks and transactions
|
||||
- Help other peers sync their blockchain
|
||||
|
||||
## How fast does `cuprated` sync?
|
||||
The current full verification sync timings are around ~7.5x faster than `monerod`.
|
||||
|
||||
In real terms, 16 hour full verification syncs and 4 hour fast-sync syncs have been reported on consumer grade hardware. On faster hardware (14 threads, 10Gbps networking), sub 2 hour fast-syncs have been reported.
|
||||
|
||||
Various testing results can be found [here](https://github.com/Cuprate/cuprate/issues/195).
|
||||
|
||||
## How to see status of `cuprated`?
|
||||
In the terminal running `cuprated`, type `status`.
|
||||
|
||||
Use the `help` command to see the full list of commands.
|
||||
|
||||
## How to tell `cuprated` is fully synced?
|
||||
`cuprated` emits a message when it is fully synced: `synchronised with the network`.
|
||||
|
||||
It also logs its block height status when syncing, for example:
|
||||
|
||||
```text
|
||||
2025-03-01T22:15:52.516944Z INFO incoming_block_batch{start_height=3362022 len=29}: Successfully added block batch
|
||||
```
|
||||
|
||||
- `start_height` is the height `cuprated` was previously at
|
||||
- `len` is how many blocks have been added to the blockchain
|
||||
|
||||
`start_height` can be compared to a block height from `monerod`
|
||||
or a block explorer to see if `cuprated` is near synced.
|
||||
|
||||
## How big is the database?
|
||||
As of March 4th 2025, `cuprated`'s database is ~240GB in size.
|
||||
|
||||
For reference, `monerod`'s database is ~200GB in size.
|
||||
|
||||
This is planned to be improved in the future.
|
||||
|
||||
## Is the database compatible with `monerod`?
|
||||
No.
|
||||
|
||||
The database `cuprated` generates and uses cannot directly be used by `monerod` and vice-versa. Supporting this is possible but there are no current plans to do so.
|
||||
|
||||
## Can I connect a wallet to `cuprated`?
|
||||
Not yet.
|
||||
|
||||
Wallets require the [daemon RPC API](https://docs.getmonero.org/rpc-library/monerod-rpc). This is actively being worked on to be backwards compatible with `monerod`, although this is not yet available.
|
||||
|
||||
## Can `cuprated` be used with an anonymity network like Tor?
|
||||
Not yet (directly).
|
||||
|
||||
Tor is planned to be integrated into `cuprated` via [`arti`](https://arti.torproject.org), although this is not yet available.
|
||||
|
||||
In the meanwhile, solutions like [`torsocks`](https://github.com/dgoulet/torsocks)
|
||||
can redirect any program's networking through Tor, including `cuprated`.
|
||||
Note that this will slow down syncing speeds heavily.
|
||||
|
||||
## `cuprated` won't start because of a "killswitch", why?
|
||||
The current alpha builds of `cuprated` contain killswitches that activate 1 week after the _next_ release is out. If the killswitch activates, you must upgrade to the [latest release](https://github.com/Cuprate/cuprate/releases/latest).
|
||||
|
||||
The reasoning for why this exists can be found here: <https://github.com/Cuprate/cuprate/pull/365>.
|
||||
|
||||
## What is the release schedule?
|
||||
New versions of `cuprated` are planned to release every 4 weeks.
|
||||
|
||||
See [this GitHub issue](https://github.com/Cuprate/cuprate/issues/374) for more details.
|
||||
|
||||
## What is the versioning scheme?
|
||||
`cuprated` is currently in alpha (`0.0.x`).
|
||||
|
||||
After sufficient testing and development, `cuprated` will enter beta (`0.x.y`) then stable (`x.y.z`) releases.
|
||||
|
||||
See [this GitHub issue](https://github.com/Cuprate/cuprate/issues/374) for more details.
|
||||
|
||||
## What is the current progress?
|
||||
See [this Reddit thread](https://www.reddit.com/r/Monero/comments/1ij2sw6/cuprate_2024_progress_report) for a brief report on Cuprate's progress throughout 2024.
|
||||
|
||||
Things are always changing so feel free to join our [Matrix channel](https://matrix.to/#/#cuprate:monero.social) and ask questions.
|
||||
|
||||
## What is the current roadmap?
|
||||
See [this GitHub issue](https://github.com/Cuprate/cuprate/issues/376) for Cuprate's rough 2025 roadmap.
|
6
books/user/src/license.md
Normal file
6
books/user/src/license.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# License
|
||||
The `cuprated` binary is distributed under the [AGPL-3.0 license](https://github.com/Cuprate/cuprate/blob/main/LICENSE-AGPL).
|
||||
|
||||
Much of the codebase that makes up `cuprated` is under the [MIT license](https://github.com/Cuprate/cuprate/blob/main/LICENSE-MIT).
|
||||
|
||||
See also: <https://github.com/Cuprate/cuprate/blob/main/LICENSE>.
|
46
books/user/src/platform.md
Normal file
46
books/user/src/platform.md
Normal file
|
@ -0,0 +1,46 @@
|
|||
# Platform support
|
||||
|
||||
Support for different platforms ("targets") are organized into three tiers,
|
||||
each with a different set of guarantees. Targets are identified by the
|
||||
[Rust "target triple"](https://doc.rust-lang.org/rustc/platform-support.html)
|
||||
which is the string used when compiling `cuprated`.
|
||||
|
||||
| Attribute | Tier 1 | Tier 2 | Tier 3 |
|
||||
|---------------------|--------|-------------------|--------|
|
||||
| Official builds | 🟢 | 🟢 | 🔴
|
||||
| Guaranteed to build | 🟢 | 🟢 | 🟡
|
||||
| Automated testing | 🟢 | 🟡 (some targets) | 🔴
|
||||
| Manual testing | 🟢 | 🟡 (sometimes) | 🔴
|
||||
|
||||
## Tier 1
|
||||
|
||||
Tier 1 targets can be thought of as "guaranteed to work".
|
||||
|
||||
| Target | Notes |
|
||||
|-----------------------------|--------|
|
||||
| `x86_64-unknown-linux-gnu` | x64 Linux (glibc 2.36+)
|
||||
| `aarch64-unknown-linux-gnu` | ARM64 Linux (glibc 2.36+)
|
||||
| `aarch64-apple-darwin` | ARM64 macOS (11.0+)
|
||||
|
||||
## Tier 2
|
||||
|
||||
Tier 2 targets can be thought of as "guaranteed to build".
|
||||
|
||||
| Target | Notes |
|
||||
|-----------------------------|--------|
|
||||
| `x86_64-pc-windows-msvc` | x64 Windows (MSVC, Windows Server 2022+)
|
||||
|
||||
## Tier 3
|
||||
|
||||
Tier 3 targets are those which the Cuprate codebase likely can support,
|
||||
but which Cuprate does not build or test on a regular basis, so they may or may not work.
|
||||
Official builds are not available, but may eventually be planned.
|
||||
|
||||
| Target | Notes |
|
||||
|------------------------------|--------|
|
||||
| `x86_64-unknown-linux-musl` | x64 Linux (musl 1.2.3)
|
||||
| `aarch64-unknown-linux-musl` | ARM64 Linux (musl 1.2.3)
|
||||
| `x86_64-unknown-freebsd` | x64 FreeBSD
|
||||
| `aarch64-unknown-freebsd` | ARM64 FreeBSD
|
||||
| `aarch64-pc-windows-msvc` | ARM64 Windows (MSVC, Windows Server 2022+)
|
||||
| `x86_64-apple-darwin` | x64 macOS
|
55
books/user/src/resources/disk.md
Normal file
55
books/user/src/resources/disk.md
Normal file
|
@ -0,0 +1,55 @@
|
|||
# Disk
|
||||
`cuprated` requires at least ~300 GB of disk storage for operation although 500+ GB is recommended.
|
||||
|
||||
## Cache
|
||||
The directory used for cache files is:
|
||||
|
||||
| OS | Directory |
|
||||
|---------|----------------------------------------|
|
||||
| Windows | `C:\Users\User\AppData\Local\Cuprate\` |
|
||||
| macOS | `/Users/User/Library/Caches/Cuprate/` |
|
||||
| Linux | `/home/user/.cache/cuprate/` |
|
||||
|
||||
Although not recommended, this directory can be deleted without major disruption to `cuprated`.
|
||||
|
||||
The files in this directory are:
|
||||
|
||||
| File | Purpose |
|
||||
|------------------------|---------|
|
||||
| `addressbook/ClearNet` | P2P state for clear-net
|
||||
|
||||
## Configuration
|
||||
The directory used for files related to configuration is:
|
||||
|
||||
| OS | Directory |
|
||||
|---------|----------------------------------------------------|
|
||||
| Windows | `C:\Users\User\AppData\Roaming\Cuprate\` |
|
||||
| macOS | `/Users/User/Library/Application Support/Cuprate/` |
|
||||
| Linux | `/home/user/.config/cuprate/` |
|
||||
|
||||
The files in this directory are:
|
||||
|
||||
| File | Purpose |
|
||||
|-----------------|---------|
|
||||
| `Cuprated.toml` | `cuprated` configuration file
|
||||
|
||||
## Data
|
||||
The directory used for general data is:
|
||||
|
||||
| OS | Directory |
|
||||
|---------|----------------------------------------------------|
|
||||
| Windows | `C:\Users\User\AppData\Roaming\Cuprate\` |
|
||||
| macOS | `/Users/User/Library/Application Support/Cuprate/` |
|
||||
| Linux | `/home/user/.local/share/cuprate/` |
|
||||
|
||||
The files in this directory are:
|
||||
|
||||
<!-- TODO: document redb files -->
|
||||
|
||||
| File | Purpose |
|
||||
|-----------------------|---------|
|
||||
| `blockchain/data.mdb` | Blockchain database file
|
||||
| `blockchain/lock.mdb` | Blockchain database lock
|
||||
| `txpool/data.mdb` | Transaction pool database file
|
||||
| `txpool/lock.mdb` | Transaction pool database lock
|
||||
| `logs/{YYYY-MM-DD}` | Log files for each day
|
2
books/user/src/resources/intro.md
Normal file
2
books/user/src/resources/intro.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Resources
|
||||
This section documents the system resources `cuprated` uses.
|
5
books/user/src/resources/ip.md
Normal file
5
books/user/src/resources/ip.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# IP
|
||||
`cuprated` currently binds to a single [IPv4 address](https://en.wikipedia.org/wiki/IPv4) for P2P connections.
|
||||
|
||||
By default, this IP address is `0.0.0.0`, which will bind to all available interfaces.
|
||||
See the [`listen_on` option in the config file](../config.md) to manually set this IP address.
|
5
books/user/src/resources/ports.md
Normal file
5
books/user/src/resources/ports.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Ports
|
||||
`cuprated` currently uses a single port to accept incoming P2P connections.
|
||||
|
||||
By default, this port is randomly selected.
|
||||
See the [`p2p_port` option in the config file](../config.md) to manually set this port.
|
|
@ -1 +0,0 @@
|
|||
# TODO
|
|
@ -1,2 +1,9 @@
|
|||
# <https://rust-lang.github.io/rust-clippy/master/index.html#upper_case_acronyms>
|
||||
upper-case-acronyms-aggressive = true
|
||||
upper-case-acronyms-aggressive = true
|
||||
|
||||
# <https://rust-lang.github.io/rust-clippy/master/index.html#doc_markdown>
|
||||
doc-valid-idents = [
|
||||
"RandomX",
|
||||
# This adds the rest of the default exceptions.
|
||||
".."
|
||||
]
|
||||
|
|
|
@ -23,6 +23,7 @@ monero-serai = { workspace = true, features = ["std"] }
|
|||
rayon = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
|
||||
indexmap = { workspace = true, features = ["std"] }
|
||||
hex = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
|
||||
|
|
|
@ -52,9 +52,10 @@ impl AltChainContextCache {
|
|||
block_weight: usize,
|
||||
long_term_block_weight: usize,
|
||||
timestamp: u64,
|
||||
cumulative_difficulty: u128,
|
||||
) {
|
||||
if let Some(difficulty_cache) = &mut self.difficulty_cache {
|
||||
difficulty_cache.new_block(height, timestamp, difficulty_cache.cumulative_difficulty());
|
||||
difficulty_cache.new_block(height, timestamp, cumulative_difficulty);
|
||||
}
|
||||
|
||||
if let Some(weight_cache) = &mut self.weight_cache {
|
||||
|
@ -83,12 +84,8 @@ impl AltChainMap {
|
|||
}
|
||||
|
||||
/// Add an alt chain cache to the map.
|
||||
pub(crate) fn add_alt_cache(
|
||||
&mut self,
|
||||
prev_id: [u8; 32],
|
||||
alt_cache: Box<AltChainContextCache>,
|
||||
) {
|
||||
self.alt_cache_map.insert(prev_id, alt_cache);
|
||||
pub(crate) fn add_alt_cache(&mut self, alt_cache: Box<AltChainContextCache>) {
|
||||
self.alt_cache_map.insert(alt_cache.top_hash, alt_cache);
|
||||
}
|
||||
|
||||
/// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is
|
||||
|
@ -119,7 +116,7 @@ impl AltChainMap {
|
|||
weight_cache: None,
|
||||
difficulty_cache: None,
|
||||
cached_rx_vm: None,
|
||||
chain_height: top_height,
|
||||
chain_height: top_height + 1,
|
||||
top_hash: prev_id,
|
||||
chain_id: None,
|
||||
parent_chain,
|
||||
|
|
|
@ -36,17 +36,11 @@ pub struct DifficultyCacheConfig {
|
|||
pub window: usize,
|
||||
pub cut: usize,
|
||||
pub lag: usize,
|
||||
/// If [`Some`] the difficulty cache will always return this value as the current difficulty.
|
||||
pub fixed_difficulty: Option<u128>,
|
||||
}
|
||||
|
||||
impl DifficultyCacheConfig {
|
||||
/// Create a new difficulty cache config.
|
||||
///
|
||||
/// # Notes
|
||||
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
|
||||
pub const fn new(window: usize, cut: usize, lag: usize) -> Self {
|
||||
Self { window, cut, lag }
|
||||
}
|
||||
|
||||
/// Returns the total amount of blocks we need to track to calculate difficulty
|
||||
pub const fn total_block_count(&self) -> usize {
|
||||
self.window + self.lag
|
||||
|
@ -64,6 +58,7 @@ impl DifficultyCacheConfig {
|
|||
window: DIFFICULTY_WINDOW,
|
||||
cut: DIFFICULTY_CUT,
|
||||
lag: DIFFICULTY_LAG,
|
||||
fixed_difficulty: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -297,6 +292,10 @@ fn next_difficulty(
|
|||
cumulative_difficulties: &VecDeque<u128>,
|
||||
hf: HardFork,
|
||||
) -> u128 {
|
||||
if let Some(fixed_difficulty) = config.fixed_difficulty {
|
||||
return fixed_difficulty;
|
||||
}
|
||||
|
||||
if timestamps.len() <= 1 {
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ pub struct NewBlockData {
|
|||
/// A request to the blockchain context cache.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BlockChainContextRequest {
|
||||
/// Gets all the current `RandomX` VMs.
|
||||
/// Gets all the current RandomX VMs.
|
||||
CurrentRxVms,
|
||||
|
||||
/// Get the next difficulties for these blocks.
|
||||
|
@ -297,7 +297,7 @@ pub enum BlockChainContextRequest {
|
|||
/// This variant is private and is not callable from outside this crate, the block verifier service will
|
||||
/// handle getting the randomX VM of an alt chain.
|
||||
AltChainRxVM {
|
||||
/// The height the `RandomX` VM is needed for.
|
||||
/// The height the RandomX VM is needed for.
|
||||
height: usize,
|
||||
/// The chain to look in for the seed.
|
||||
chain: Chain,
|
||||
|
@ -310,8 +310,6 @@ pub enum BlockChainContextRequest {
|
|||
/// This variant is private and is not callable from outside this crate, the block verifier service will
|
||||
/// handle returning the alt cache to the context service.
|
||||
AddAltChainContextCache {
|
||||
/// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader).
|
||||
prev_id: [u8; 32],
|
||||
/// The cache.
|
||||
cache: Box<AltChainContextCache>,
|
||||
/// An internal token to prevent external crates calling this request.
|
||||
|
@ -332,7 +330,7 @@ pub enum BlockChainContextResponse {
|
|||
|
||||
/// Response to [`BlockChainContextRequest::CurrentRxVms`]
|
||||
///
|
||||
/// A map of seed height to `RandomX` VMs.
|
||||
/// A map of seed height to RandomX VMs.
|
||||
RxVms(HashMap<usize, Arc<RandomXVm>>),
|
||||
|
||||
/// A list of difficulties.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! `RandomX` VM Cache
|
||||
//! RandomX VM Cache
|
||||
//!
|
||||
//! This module keeps track of the `RandomX` VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially
|
||||
//! This module keeps track of the RandomX VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially
|
||||
//! more VMs around this height.
|
||||
//!
|
||||
use std::{
|
||||
|
@ -34,11 +34,11 @@ pub const RX_SEEDS_CACHED: usize = 2;
|
|||
/// A multithreaded randomX VM.
|
||||
#[derive(Debug)]
|
||||
pub struct RandomXVm {
|
||||
/// These `RandomX` VMs all share the same cache.
|
||||
/// These RandomX VMs all share the same cache.
|
||||
vms: ThreadLocal<VmInner>,
|
||||
/// The `RandomX` cache.
|
||||
/// The RandomX cache.
|
||||
cache: RandomXCache,
|
||||
/// The flags used to start the `RandomX` VMs.
|
||||
/// The flags used to start the RandomX VMs.
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ impl RandomXVmCache {
|
|||
Ok(alt_vm)
|
||||
}
|
||||
|
||||
/// Get the main-chain `RandomX` VMs.
|
||||
/// Get the main-chain RandomX VMs.
|
||||
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
|
||||
match self.seeds.len().checked_sub(self.vms.len()) {
|
||||
// No difference in the amount of seeds to VMs.
|
||||
|
@ -213,7 +213,7 @@ impl RandomXVmCache {
|
|||
self.vms.clone()
|
||||
}
|
||||
|
||||
/// Removes all the `RandomX` VMs above the `new_height`.
|
||||
/// Removes all the RandomX VMs above the `new_height`.
|
||||
pub fn pop_blocks_main_chain(&mut self, new_height: usize) {
|
||||
self.seeds.retain(|(height, _)| *height < new_height);
|
||||
self.vms.retain(|height, _| *height < new_height);
|
||||
|
|
|
@ -303,12 +303,8 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
|||
.get_alt_vm(height, chain, &mut self.database)
|
||||
.await?,
|
||||
),
|
||||
BlockChainContextRequest::AddAltChainContextCache {
|
||||
prev_id,
|
||||
cache,
|
||||
_token,
|
||||
} => {
|
||||
self.alt_chain_cache_map.add_alt_cache(prev_id, cache);
|
||||
BlockChainContextRequest::AddAltChainContextCache { cache, _token } => {
|
||||
self.alt_chain_cache_map.add_alt_cache(cache);
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
BlockChainContextRequest::HardForkInfo(_)
|
||||
|
|
|
@ -5,27 +5,28 @@ edition = "2021"
|
|||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "cuprate-fast-sync-create-hashes"
|
||||
name = "create-fs-file"
|
||||
path = "src/create.rs"
|
||||
|
||||
[dependencies]
|
||||
cuprate-blockchain = { workspace = true }
|
||||
cuprate-consensus = { workspace = true }
|
||||
cuprate-consensus-rules = { workspace = true }
|
||||
cuprate-consensus-context = { workspace = true }
|
||||
cuprate-types = { workspace = true }
|
||||
cuprate-helper = { workspace = true, features = ["cast"] }
|
||||
cuprate-p2p = { workspace = true }
|
||||
cuprate-p2p-core = { workspace = true }
|
||||
|
||||
clap = { workspace = true, features = ["derive", "std"] }
|
||||
hex = { workspace = true }
|
||||
hex-literal = { workspace = true }
|
||||
monero-serai = { workspace = true }
|
||||
sha3 = { version = "0.10.8" }
|
||||
thiserror = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tower = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
reason = "binary shares same Cargo.toml as library"
|
||||
)]
|
||||
|
||||
use std::{fmt::Write, fs::write};
|
||||
use std::fs::write;
|
||||
|
||||
use clap::Parser;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
@ -16,48 +16,30 @@ use cuprate_types::{
|
|||
Chain,
|
||||
};
|
||||
|
||||
use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes};
|
||||
|
||||
const BATCH_SIZE: usize = 512;
|
||||
use cuprate_fast_sync::FAST_SYNC_BATCH_LEN;
|
||||
|
||||
async fn read_batch(
|
||||
handle: &mut BlockchainReadHandle,
|
||||
height_from: usize,
|
||||
) -> DbResult<Vec<BlockId>> {
|
||||
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE);
|
||||
) -> DbResult<Vec<[u8; 32]>> {
|
||||
let request = BlockchainReadRequest::BlockHashInRange(
|
||||
height_from..(height_from + FAST_SYNC_BATCH_LEN),
|
||||
Chain::Main,
|
||||
);
|
||||
let response_channel = handle.ready().await?.call(request);
|
||||
let response = response_channel.await?;
|
||||
|
||||
for height in height_from..(height_from + BATCH_SIZE) {
|
||||
let request = BlockchainReadRequest::BlockHash(height, Chain::Main);
|
||||
let response_channel = handle.ready().await?.call(request);
|
||||
let response = response_channel.await?;
|
||||
|
||||
match response {
|
||||
BlockchainResponse::BlockHash(block_id) => block_ids.push(block_id),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
let BlockchainResponse::BlockHashInRange(block_ids) = response else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(block_ids)
|
||||
}
|
||||
|
||||
fn generate_hex(hashes: &[HashOfHashes]) -> String {
|
||||
let mut s = String::new();
|
||||
|
||||
writeln!(&mut s, "[").unwrap();
|
||||
|
||||
for hash in hashes {
|
||||
writeln!(&mut s, "\thex!(\"{}\"),", hex::encode(hash)).unwrap();
|
||||
}
|
||||
|
||||
writeln!(&mut s, "]").unwrap();
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
#[arg(short, long)]
|
||||
#[arg(long)]
|
||||
height: usize,
|
||||
}
|
||||
|
||||
|
@ -74,7 +56,7 @@ async fn main() {
|
|||
|
||||
let mut height = 0_usize;
|
||||
|
||||
while height < height_target {
|
||||
while (height + FAST_SYNC_BATCH_LEN) < height_target {
|
||||
if let Ok(block_ids) = read_batch(&mut read_handle, height).await {
|
||||
let hash = hash_of_hashes(block_ids.as_slice());
|
||||
hashes_of_hashes.push(hash);
|
||||
|
@ -82,13 +64,19 @@ async fn main() {
|
|||
println!("Failed to read next batch from database");
|
||||
break;
|
||||
}
|
||||
height += BATCH_SIZE;
|
||||
height += FAST_SYNC_BATCH_LEN;
|
||||
|
||||
println!("height: {height}");
|
||||
}
|
||||
|
||||
drop(read_handle);
|
||||
|
||||
let generated = generate_hex(&hashes_of_hashes);
|
||||
write("src/data/hashes_of_hashes", generated).expect("Could not write file");
|
||||
write("fast_sync_hashes.bin", hashes_of_hashes.concat().as_slice())
|
||||
.expect("Could not write file");
|
||||
|
||||
println!("Generated hashes up to block height {height}");
|
||||
}
|
||||
|
||||
pub fn hash_of_hashes(hashes: &[[u8; 32]]) -> [u8; 32] {
|
||||
blake3::hash(hashes.concat().as_slice()).into()
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
[
|
||||
hex_literal::hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"),
|
||||
hex_literal::hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"),
|
||||
hex_literal::hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"),
|
||||
hex_literal::hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"),
|
||||
hex_literal::hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"),
|
||||
hex_literal::hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"),
|
||||
hex_literal::hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"),
|
||||
hex_literal::hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"),
|
||||
hex_literal::hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"),
|
||||
hex_literal::hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"),
|
||||
]
|
|
@ -1,225 +1,219 @@
|
|||
use std::{
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
cmp::min,
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use blake3::Hasher;
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Transaction},
|
||||
};
|
||||
use tower::Service;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_consensus::transactions::new_tx_verification_data;
|
||||
use cuprate_consensus_context::BlockchainContextService;
|
||||
use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError};
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||
use cuprate_consensus_context::BlockchainContext;
|
||||
use cuprate_p2p::block_downloader::ChainEntry;
|
||||
use cuprate_p2p_core::NetworkZone;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain, VerifiedBlockInformation, VerifiedTransactionInformation,
|
||||
};
|
||||
|
||||
use crate::{hash_of_hashes, BlockId, HashOfHashes};
|
||||
/// A [`OnceLock`] representing the fast sync hashes.
|
||||
static FAST_SYNC_HASHES: OnceLock<&[[u8; 32]]> = OnceLock::new();
|
||||
|
||||
#[cfg(not(test))]
|
||||
static HASHES_OF_HASHES: &[HashOfHashes] = &include!("./data/hashes_of_hashes");
|
||||
/// The size of a batch of block hashes to hash to create a fast sync hash.
|
||||
pub const FAST_SYNC_BATCH_LEN: usize = 512;
|
||||
|
||||
#[cfg(not(test))]
|
||||
const BATCH_SIZE: usize = 512;
|
||||
|
||||
#[cfg(test)]
|
||||
static HASHES_OF_HASHES: &[HashOfHashes] = &[
|
||||
hex_literal::hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"),
|
||||
hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||
hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
const BATCH_SIZE: usize = 4;
|
||||
|
||||
#[inline]
|
||||
fn max_height() -> u64 {
|
||||
(HASHES_OF_HASHES.len() * BATCH_SIZE) as u64
|
||||
/// Returns the height of the first block not included in the embedded hashes.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if [`set_fast_sync_hashes`] has not been called.
|
||||
pub fn fast_sync_stop_height() -> usize {
|
||||
FAST_SYNC_HASHES.get().unwrap().len() * FAST_SYNC_BATCH_LEN
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ValidBlockId(BlockId);
|
||||
|
||||
fn valid_block_ids(block_ids: &[BlockId]) -> Vec<ValidBlockId> {
|
||||
block_ids.iter().map(|b| ValidBlockId(*b)).collect()
|
||||
/// Sets the hashes to use for fast-sync.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if this is called more than once.
|
||||
pub fn set_fast_sync_hashes(hashes: &'static [[u8; 32]]) {
|
||||
FAST_SYNC_HASHES.set(hashes).unwrap();
|
||||
}
|
||||
|
||||
#[expect(clippy::large_enum_variant)]
|
||||
pub enum FastSyncRequest {
|
||||
ValidateHashes {
|
||||
start_height: u64,
|
||||
block_ids: Vec<BlockId>,
|
||||
},
|
||||
ValidateBlock {
|
||||
block: Block,
|
||||
txs: HashMap<[u8; 32], Transaction>,
|
||||
token: ValidBlockId,
|
||||
},
|
||||
}
|
||||
|
||||
#[expect(clippy::large_enum_variant)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum FastSyncResponse {
|
||||
ValidateHashes {
|
||||
validated_hashes: Vec<ValidBlockId>,
|
||||
unknown_hashes: Vec<BlockId>,
|
||||
},
|
||||
ValidateBlock(VerifiedBlockInformation),
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||
pub enum FastSyncError {
|
||||
#[error("Block does not match its expected hash")]
|
||||
BlockHashMismatch,
|
||||
|
||||
#[error("Start height must be a multiple of the batch size")]
|
||||
InvalidStartHeight,
|
||||
|
||||
#[error("Hash of hashes mismatch")]
|
||||
Mismatch,
|
||||
|
||||
#[error("Given range too small for fast sync (less than one batch)")]
|
||||
NothingToDo,
|
||||
|
||||
#[error("Start height too high for fast sync")]
|
||||
OutOfRange,
|
||||
|
||||
#[error("Block does not have the expected height entry")]
|
||||
BlockHeightMismatch,
|
||||
|
||||
#[error("Block does not contain the expected transaction list")]
|
||||
TxsIncludedWithBlockIncorrect,
|
||||
|
||||
#[error(transparent)]
|
||||
Consensus(#[from] ConsensusError),
|
||||
|
||||
#[error(transparent)]
|
||||
MinerTx(#[from] MinerTxError),
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
DbErr(String),
|
||||
}
|
||||
|
||||
impl From<tower::BoxError> for FastSyncError {
|
||||
fn from(error: tower::BoxError) -> Self {
|
||||
Self::DbErr(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FastSyncService {
|
||||
context_svc: BlockchainContextService,
|
||||
}
|
||||
|
||||
impl FastSyncService {
|
||||
#[expect(dead_code)]
|
||||
pub(crate) const fn new(context_svc: BlockchainContextService) -> Self {
|
||||
Self { context_svc }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<FastSyncRequest> for FastSyncService {
|
||||
type Response = FastSyncResponse;
|
||||
type Error = FastSyncError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
/// Validates that the given [`ChainEntry`]s are in the fast-sync hashes.
|
||||
///
|
||||
/// `entries` should be a list of sequential entries.
|
||||
/// `start_height` should be the height of the first block in the first entry.
|
||||
///
|
||||
/// Returns a tuple, the first element being the entries that are valid* the second
|
||||
/// the entries we do not know are valid and should be passed in again when we have more entries.
|
||||
///
|
||||
/// *once we are passed the fast sync blocks all entries will be returned as valid as
|
||||
/// we can not check their validity here.
|
||||
///
|
||||
/// There may be more entries returned than passed in as entries could be split.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if [`set_fast_sync_hashes`] has not been called.
|
||||
pub async fn validate_entries<N: NetworkZone>(
|
||||
mut entries: VecDeque<ChainEntry<N>>,
|
||||
start_height: usize,
|
||||
blockchain_read_handle: &mut BlockchainReadHandle,
|
||||
) -> Result<(VecDeque<ChainEntry<N>>, VecDeque<ChainEntry<N>>), tower::BoxError> {
|
||||
// if we are past the top fast sync block return all entries as valid.
|
||||
if start_height >= fast_sync_stop_height() {
|
||||
return Ok((entries, VecDeque::new()));
|
||||
}
|
||||
|
||||
fn call(&mut self, req: FastSyncRequest) -> Self::Future {
|
||||
let mut context_svc = self.context_svc.clone();
|
||||
/*
|
||||
The algorithm used here needs to preserve which peer told us about which blocks, so we cannot
|
||||
simply join all the hashes together return all the ones that can be validated and the ones that
|
||||
can't, we need to keep the batches separate.
|
||||
|
||||
Box::pin(async move {
|
||||
match req {
|
||||
FastSyncRequest::ValidateHashes {
|
||||
start_height,
|
||||
block_ids,
|
||||
} => validate_hashes(start_height, &block_ids),
|
||||
FastSyncRequest::ValidateBlock { block, txs, token } => {
|
||||
validate_block(&mut context_svc, block, txs, &token)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
The first step is to calculate how many hashes we need from the blockchain to make up the first
|
||||
fast-sync hash.
|
||||
|
||||
fn validate_hashes(
|
||||
start_height: u64,
|
||||
block_ids: &[BlockId],
|
||||
) -> Result<FastSyncResponse, FastSyncError> {
|
||||
let start_height_usize = u64_to_usize(start_height);
|
||||
Then will take out all the batches at the end for which we cannot make up a full fast-sync hash
|
||||
for, we will split a batch if it can only be partially validated.
|
||||
|
||||
if start_height_usize % BATCH_SIZE != 0 {
|
||||
return Err(FastSyncError::InvalidStartHeight);
|
||||
With the remaining hashes from the blockchain and the hashes in the batches we can validate we
|
||||
work on calculating the fast sync hashes and comparing them to the ones in [`FAST_SYNC_HASHES`].
|
||||
*/
|
||||
|
||||
// First calculate the start and stop for this range of hashes.
|
||||
let hashes_start_height = (start_height / FAST_SYNC_BATCH_LEN) * FAST_SYNC_BATCH_LEN;
|
||||
let amount_of_hashes = entries.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
let last_height = amount_of_hashes + start_height;
|
||||
|
||||
let hashes_stop_height = min(
|
||||
(last_height / FAST_SYNC_BATCH_LEN) * FAST_SYNC_BATCH_LEN,
|
||||
fast_sync_stop_height(),
|
||||
);
|
||||
|
||||
let mut hashes_stop_diff_last_height = last_height - hashes_stop_height;
|
||||
|
||||
// get the hashes we are missing to create the first fast-sync hash.
|
||||
let BlockchainResponse::BlockHashInRange(starting_hashes) = blockchain_read_handle
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::BlockHashInRange(
|
||||
hashes_start_height..start_height,
|
||||
Chain::Main,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
// If we don't have enough hashes to make up a batch we can't validate any.
|
||||
if amount_of_hashes + starting_hashes.len() < FAST_SYNC_BATCH_LEN {
|
||||
return Ok((VecDeque::new(), entries));
|
||||
}
|
||||
|
||||
if start_height >= max_height() {
|
||||
return Err(FastSyncError::OutOfRange);
|
||||
}
|
||||
let mut unknown = VecDeque::new();
|
||||
|
||||
let stop_height = start_height_usize + block_ids.len();
|
||||
// start moving from the back of the batches taking enough hashes out so we are only left with hashes
|
||||
// that can be verified.
|
||||
while !entries.is_empty() && hashes_stop_diff_last_height != 0 {
|
||||
let back = entries.back_mut().unwrap();
|
||||
|
||||
let batch_from = start_height_usize / BATCH_SIZE;
|
||||
let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len());
|
||||
let n_batches = batch_to - batch_from;
|
||||
if back.ids.len() >= hashes_stop_diff_last_height {
|
||||
// This batch is partially valid so split it.
|
||||
unknown.push_front(ChainEntry {
|
||||
ids: back
|
||||
.ids
|
||||
.drain((back.ids.len() - hashes_stop_diff_last_height)..)
|
||||
.collect(),
|
||||
peer: back.peer,
|
||||
handle: back.handle.clone(),
|
||||
});
|
||||
|
||||
if n_batches == 0 {
|
||||
return Err(FastSyncError::NothingToDo);
|
||||
}
|
||||
|
||||
for i in 0..n_batches {
|
||||
let batch = &block_ids[BATCH_SIZE * i..BATCH_SIZE * (i + 1)];
|
||||
let actual = hash_of_hashes(batch);
|
||||
let expected = HASHES_OF_HASHES[batch_from + i];
|
||||
|
||||
if expected != actual {
|
||||
return Err(FastSyncError::Mismatch);
|
||||
break;
|
||||
}
|
||||
|
||||
// Add this batch to the front of the unknowns, we do not know its validity.
|
||||
let back = entries.pop_back().unwrap();
|
||||
hashes_stop_diff_last_height -= back.ids.len();
|
||||
unknown.push_front(back);
|
||||
}
|
||||
|
||||
let validated_hashes = valid_block_ids(&block_ids[..n_batches * BATCH_SIZE]);
|
||||
let unknown_hashes = block_ids[n_batches * BATCH_SIZE..].to_vec();
|
||||
// Start verifying the hashes.
|
||||
let mut hasher = Hasher::default();
|
||||
let mut last_i = 1;
|
||||
for (i, hash) in starting_hashes
|
||||
.iter()
|
||||
.chain(entries.iter().flat_map(|e| e.ids.iter()))
|
||||
.enumerate()
|
||||
{
|
||||
hasher.update(hash);
|
||||
|
||||
Ok(FastSyncResponse::ValidateHashes {
|
||||
validated_hashes,
|
||||
unknown_hashes,
|
||||
})
|
||||
if (i + 1) % FAST_SYNC_BATCH_LEN == 0 {
|
||||
let got_hash = hasher.finalize();
|
||||
|
||||
if got_hash
|
||||
!= FAST_SYNC_HASHES.get().unwrap()
|
||||
[get_hash_index_for_height(hashes_start_height + i)]
|
||||
{
|
||||
return Err("Hashes do not match".into());
|
||||
}
|
||||
hasher.reset();
|
||||
}
|
||||
|
||||
last_i = i + 1;
|
||||
}
|
||||
// Make sure we actually checked all hashes.
|
||||
assert_eq!(last_i % FAST_SYNC_BATCH_LEN, 0);
|
||||
|
||||
Ok((entries, unknown))
|
||||
}
|
||||
|
||||
fn validate_block(
|
||||
context_svc: &mut BlockchainContextService,
|
||||
block: Block,
|
||||
mut txs: HashMap<[u8; 32], Transaction>,
|
||||
token: &ValidBlockId,
|
||||
) -> Result<FastSyncResponse, FastSyncError> {
|
||||
let block_chain_ctx = context_svc.blockchain_context().clone();
|
||||
/// Get the index of the hash that contains this block in the fast sync hashes.
|
||||
const fn get_hash_index_for_height(height: usize) -> usize {
|
||||
height / FAST_SYNC_BATCH_LEN
|
||||
}
|
||||
|
||||
/// Creates a [`VerifiedBlockInformation`] from a block known to be valid.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This may panic if used on an invalid block.
|
||||
pub fn block_to_verified_block_information(
|
||||
block: Block,
|
||||
txs: Vec<Transaction>,
|
||||
blockchin_ctx: &BlockchainContext,
|
||||
) -> VerifiedBlockInformation {
|
||||
let block_hash = block.hash();
|
||||
if block_hash != token.0 {
|
||||
return Err(FastSyncError::BlockHashMismatch);
|
||||
}
|
||||
|
||||
let block_blob = block.serialize();
|
||||
|
||||
let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
|
||||
return Err(FastSyncError::MinerTx(MinerTxError::InputNotOfTypeGen));
|
||||
panic!("fast sync block invalid");
|
||||
};
|
||||
if *height != block_chain_ctx.chain_height {
|
||||
return Err(FastSyncError::BlockHeightMismatch);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
*height, blockchin_ctx.chain_height,
|
||||
"fast sync block invalid"
|
||||
);
|
||||
|
||||
let mut txs = txs
|
||||
.into_iter()
|
||||
.map(|tx| {
|
||||
let data = new_tx_verification_data(tx).expect("fast sync block invalid");
|
||||
|
||||
(data.tx_hash, data)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut verified_txs = Vec::with_capacity(txs.len());
|
||||
for tx in &block.transactions {
|
||||
let tx = txs
|
||||
.remove(tx)
|
||||
.ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?;
|
||||
let data = txs.remove(tx).expect("fast sync block invalid");
|
||||
|
||||
let data = new_tx_verification_data(tx)?;
|
||||
verified_txs.push(VerifiedTransactionInformation {
|
||||
tx_blob: data.tx_blob,
|
||||
tx_weight: data.tx_weight,
|
||||
|
@ -243,68 +237,161 @@ fn validate_block(
|
|||
let weight = block.miner_transaction.weight()
|
||||
+ verified_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||
|
||||
Ok(FastSyncResponse::ValidateBlock(VerifiedBlockInformation {
|
||||
VerifiedBlockInformation {
|
||||
block_blob,
|
||||
txs: verified_txs,
|
||||
block_hash,
|
||||
pow_hash: [0_u8; 32],
|
||||
pow_hash: [u8::MAX; 32],
|
||||
height: *height,
|
||||
generated_coins,
|
||||
weight,
|
||||
long_term_weight: block_chain_ctx.next_block_long_term_weight(weight),
|
||||
cumulative_difficulty: block_chain_ctx.cumulative_difficulty
|
||||
+ block_chain_ctx.next_difficulty,
|
||||
long_term_weight: blockchin_ctx.next_block_long_term_weight(weight),
|
||||
cumulative_difficulty: blockchin_ctx.cumulative_difficulty + blockchin_ctx.next_difficulty,
|
||||
block,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::{collections::VecDeque, slice, sync::LazyLock};
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_errors() {
|
||||
let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]];
|
||||
assert_eq!(
|
||||
validate_hashes(3, &[]),
|
||||
Err(FastSyncError::InvalidStartHeight)
|
||||
);
|
||||
assert_eq!(
|
||||
validate_hashes(3, &ids),
|
||||
Err(FastSyncError::InvalidStartHeight)
|
||||
);
|
||||
use proptest::proptest;
|
||||
|
||||
assert_eq!(validate_hashes(20, &[]), Err(FastSyncError::OutOfRange));
|
||||
assert_eq!(validate_hashes(20, &ids), Err(FastSyncError::OutOfRange));
|
||||
use cuprate_p2p::block_downloader::ChainEntry;
|
||||
use cuprate_p2p_core::{client::InternalPeerID, handles::HandleBuilder, ClearNet};
|
||||
|
||||
assert_eq!(validate_hashes(4, &[]), Err(FastSyncError::NothingToDo));
|
||||
assert_eq!(
|
||||
validate_hashes(4, &ids[..3]),
|
||||
Err(FastSyncError::NothingToDo)
|
||||
);
|
||||
}
|
||||
use crate::{
|
||||
fast_sync_stop_height, set_fast_sync_hashes, validate_entries, FAST_SYNC_BATCH_LEN,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_success() {
|
||||
let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]];
|
||||
let validated_hashes = valid_block_ids(&ids[0..4]);
|
||||
let unknown_hashes = ids[4..].to_vec();
|
||||
assert_eq!(
|
||||
validate_hashes(0, &ids),
|
||||
Ok(FastSyncResponse::ValidateHashes {
|
||||
validated_hashes,
|
||||
unknown_hashes
|
||||
static HASHES: LazyLock<&[[u8; 32]]> = LazyLock::new(|| {
|
||||
let hashes = (0..FAST_SYNC_BATCH_LEN * 2000)
|
||||
.map(|i| {
|
||||
let mut ret = [0; 32];
|
||||
ret[..8].copy_from_slice(&i.to_le_bytes());
|
||||
ret
|
||||
})
|
||||
);
|
||||
}
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
#[test]
|
||||
fn test_validate_hashes_mismatch() {
|
||||
let ids = [
|
||||
[1_u8; 32], [2_u8; 32], [3_u8; 32], [5_u8; 32], [1_u8; 32], [2_u8; 32], [3_u8; 32],
|
||||
[4_u8; 32],
|
||||
];
|
||||
assert_eq!(validate_hashes(0, &ids), Err(FastSyncError::Mismatch));
|
||||
assert_eq!(validate_hashes(4, &ids), Err(FastSyncError::Mismatch));
|
||||
let hashes = hashes.leak();
|
||||
|
||||
let fast_sync_hashes = hashes
|
||||
.chunks(FAST_SYNC_BATCH_LEN)
|
||||
.map(|chunk| {
|
||||
let len = chunk.len() * 32;
|
||||
let bytes = chunk.as_ptr().cast::<u8>();
|
||||
|
||||
// SAFETY:
|
||||
// We are casting a valid [[u8; 32]] to a [u8], no alignment requirements and we are using it
|
||||
// within the [[u8; 32]]'s lifetime.
|
||||
unsafe { blake3::hash(slice::from_raw_parts(bytes, len)).into() }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
set_fast_sync_hashes(fast_sync_hashes.leak());
|
||||
|
||||
hashes
|
||||
});
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn valid_entry(len in 0_usize..1_500_000) {
|
||||
let mut ids = HASHES.to_vec();
|
||||
ids.resize(len, [0_u8; 32]);
|
||||
|
||||
let handle = HandleBuilder::new().build();
|
||||
|
||||
let entry = ChainEntry {
|
||||
ids,
|
||||
peer: InternalPeerID::Unknown(1),
|
||||
handle: handle.1
|
||||
};
|
||||
|
||||
let data_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
tokio_test::block_on(async move {
|
||||
let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
|
||||
.data_directory(data_dir.path().to_path_buf())
|
||||
.build();
|
||||
|
||||
let (mut blockchain_read_handle, _, _) =
|
||||
cuprate_blockchain::service::init(blockchain_config).unwrap();
|
||||
|
||||
|
||||
let ret = validate_entries::<ClearNet>(VecDeque::from([entry]), 0, &mut blockchain_read_handle).await.unwrap();
|
||||
|
||||
let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
|
||||
assert_eq!(len_left + len_right, len);
|
||||
assert!(len_left <= fast_sync_stop_height());
|
||||
assert!(len_right < FAST_SYNC_BATCH_LEN || len > fast_sync_stop_height());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_hash_entries(len in 0_usize..1_500_000) {
|
||||
let handle = HandleBuilder::new().build();
|
||||
let entries = (0..len).map(|i| {
|
||||
ChainEntry {
|
||||
ids: vec![HASHES.get(i).copied().unwrap_or_default()],
|
||||
peer: InternalPeerID::Unknown(1),
|
||||
handle: handle.1.clone()
|
||||
}
|
||||
}).collect();
|
||||
|
||||
let data_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
tokio_test::block_on(async move {
|
||||
let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
|
||||
.data_directory(data_dir.path().to_path_buf())
|
||||
.build();
|
||||
|
||||
let (mut blockchain_read_handle, _, _) =
|
||||
cuprate_blockchain::service::init(blockchain_config).unwrap();
|
||||
|
||||
|
||||
let ret = validate_entries::<ClearNet>(entries, 0, &mut blockchain_read_handle).await.unwrap();
|
||||
|
||||
let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
|
||||
assert_eq!(len_left + len_right, len);
|
||||
assert!(len_left <= fast_sync_stop_height());
|
||||
assert!(len_right < FAST_SYNC_BATCH_LEN || len > fast_sync_stop_height());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not_enough_hashes(len in 0_usize..FAST_SYNC_BATCH_LEN) {
|
||||
let hashes_start_height = FAST_SYNC_BATCH_LEN * 1234;
|
||||
|
||||
let handle = HandleBuilder::new().build();
|
||||
let entry = ChainEntry {
|
||||
ids: HASHES[hashes_start_height..(hashes_start_height + len)].to_vec(),
|
||||
peer: InternalPeerID::Unknown(1),
|
||||
handle: handle.1
|
||||
};
|
||||
|
||||
let data_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
tokio_test::block_on(async move {
|
||||
let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
|
||||
.data_directory(data_dir.path().to_path_buf())
|
||||
.build();
|
||||
|
||||
let (mut blockchain_read_handle, _, _) =
|
||||
cuprate_blockchain::service::init(blockchain_config).unwrap();
|
||||
|
||||
|
||||
let ret = validate_entries::<ClearNet>(VecDeque::from([entry]), 0, &mut blockchain_read_handle).await.unwrap();
|
||||
|
||||
let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
|
||||
|
||||
assert_eq!(len_right, len);
|
||||
assert_eq!(len_left, 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,9 @@ use cuprate_blockchain as _;
|
|||
use hex as _;
|
||||
use tokio as _;
|
||||
|
||||
pub mod fast_sync;
|
||||
pub mod util;
|
||||
mod fast_sync;
|
||||
|
||||
pub use util::{hash_of_hashes, BlockId, HashOfHashes};
|
||||
pub use fast_sync::{
|
||||
block_to_verified_block_information, fast_sync_stop_height, set_fast_sync_hashes,
|
||||
validate_entries, FAST_SYNC_BATCH_LEN,
|
||||
};
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
use sha3::{Digest, Keccak256};
|
||||
|
||||
pub type BlockId = [u8; 32];
|
||||
pub type HashOfHashes = [u8; 32];
|
||||
|
||||
pub fn hash_of_hashes(hashes: &[BlockId]) -> HashOfHashes {
|
||||
Keccak256::digest(hashes.concat().as_slice()).into()
|
||||
}
|
|
@ -21,6 +21,7 @@ curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precompu
|
|||
|
||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||
|
||||
indexmap = { workspace = true, features = ["std"] }
|
||||
hex = { workspace = true, features = ["std"] }
|
||||
hex-literal = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
|
|
|
@ -44,19 +44,19 @@ pub enum BlockError {
|
|||
MinerTxError(#[from] MinerTxError),
|
||||
}
|
||||
|
||||
/// A trait to represent the `RandomX` VM.
|
||||
/// A trait to represent the RandomX VM.
|
||||
pub trait RandomX {
|
||||
type Error;
|
||||
|
||||
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>;
|
||||
}
|
||||
|
||||
/// Returns if this height is a `RandomX` seed height.
|
||||
/// Returns if this height is a RandomX seed height.
|
||||
pub const fn is_randomx_seed_height(height: usize) -> bool {
|
||||
height % RX_SEEDHASH_EPOCH_BLOCKS == 0
|
||||
}
|
||||
|
||||
/// Returns the `RandomX` seed height for this block.
|
||||
/// Returns the RandomX seed height for this block.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#randomx-seed>
|
||||
pub const fn randomx_seed_height(height: usize) -> usize {
|
||||
|
|
|
@ -1,17 +1,20 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use curve25519_dalek::EdwardsPoint;
|
||||
use monero_serai::{
|
||||
io::decompress_point,
|
||||
ringct::RctType,
|
||||
transaction::{Input, Output, Timelock, Transaction},
|
||||
};
|
||||
|
||||
pub use cuprate_types::TxVersion;
|
||||
|
||||
use crate::{
|
||||
batch_verifier::BatchVerifier, blocks::penalty_free_zone, check_point_canonically_encoded,
|
||||
is_decomposed_amount, HardFork,
|
||||
};
|
||||
|
||||
// re-export.
|
||||
pub use cuprate_types::TxVersion;
|
||||
|
||||
mod contextual_data;
|
||||
mod ring_ct;
|
||||
mod ring_signatures;
|
||||
|
@ -327,7 +330,10 @@ fn check_key_images(input: &Input) -> Result<(), TransactionError> {
|
|||
match input {
|
||||
Input::ToKey { key_image, .. } => {
|
||||
// this happens in monero-serai but we may as well duplicate the check.
|
||||
if !key_image.is_torsion_free() {
|
||||
if !decompress_point(*key_image)
|
||||
.as_ref()
|
||||
.is_some_and(EdwardsPoint::is_torsion_free)
|
||||
{
|
||||
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
|
||||
}
|
||||
}
|
||||
|
@ -388,7 +394,7 @@ fn check_ring_members_unique(input: &Input, hf: HardFork) -> Result<(), Transact
|
|||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#sorted-inputs>
|
||||
fn check_inputs_sorted(inputs: &[Input], hf: HardFork) -> Result<(), TransactionError> {
|
||||
let get_ki = |inp: &Input| match inp {
|
||||
Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()),
|
||||
Input::ToKey { key_image, .. } => Ok(key_image.to_bytes()),
|
||||
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||
};
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
use std::{
|
||||
cmp::{max, min},
|
||||
collections::{HashMap, HashSet},
|
||||
};
|
||||
use std::cmp::{max, min};
|
||||
|
||||
use curve25519_dalek::EdwardsPoint;
|
||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||
use indexmap::{IndexMap, IndexSet};
|
||||
use monero_serai::transaction::{Input, Timelock};
|
||||
|
||||
use crate::{transactions::TransactionError, HardFork};
|
||||
|
@ -33,7 +31,7 @@ pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, Transa
|
|||
///
|
||||
pub fn insert_ring_member_ids(
|
||||
inputs: &[Input],
|
||||
output_ids: &mut HashMap<u64, HashSet<u64>>,
|
||||
output_ids: &mut IndexMap<u64, IndexSet<u64>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
if inputs.is_empty() {
|
||||
return Err(TransactionError::NoInputs);
|
||||
|
@ -59,9 +57,9 @@ pub fn insert_ring_member_ids(
|
|||
#[derive(Debug)]
|
||||
pub enum Rings {
|
||||
/// Legacy, pre-ringCT, rings.
|
||||
Legacy(Vec<Vec<EdwardsPoint>>),
|
||||
Legacy(Vec<Vec<CompressedEdwardsY>>),
|
||||
/// `RingCT` rings, (outkey, amount commitment).
|
||||
RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
|
||||
RingCT(Vec<Vec<[CompressedEdwardsY; 2]>>),
|
||||
}
|
||||
|
||||
/// Information on the outputs the transaction is referencing for inputs (ring members).
|
||||
|
|
|
@ -2,6 +2,7 @@ use curve25519_dalek::{EdwardsPoint, Scalar};
|
|||
use hex_literal::hex;
|
||||
use monero_serai::{
|
||||
generators::H,
|
||||
io::decompress_point,
|
||||
ringct::{
|
||||
clsag::ClsagError,
|
||||
mlsag::{AggregateRingMatrixBuilder, MlsagError, RingMatrix},
|
||||
|
@ -74,9 +75,21 @@ fn simple_type_balances(rct_sig: &RctProofs) -> Result<(), RingCTError> {
|
|||
}
|
||||
};
|
||||
|
||||
let sum_inputs = pseudo_outs.iter().sum::<EdwardsPoint>();
|
||||
let sum_outputs =
|
||||
rct_sig.base.commitments.iter().sum::<EdwardsPoint>() + Scalar::from(rct_sig.base.fee) * *H;
|
||||
let sum_inputs = pseudo_outs
|
||||
.iter()
|
||||
.copied()
|
||||
.map(decompress_point)
|
||||
.sum::<Option<EdwardsPoint>>()
|
||||
.ok_or(RingCTError::SimpleAmountDoNotBalance)?;
|
||||
let sum_outputs = rct_sig
|
||||
.base
|
||||
.commitments
|
||||
.iter()
|
||||
.copied()
|
||||
.map(decompress_point)
|
||||
.sum::<Option<EdwardsPoint>>()
|
||||
.ok_or(RingCTError::SimpleAmountDoNotBalance)?
|
||||
+ Scalar::from(rct_sig.base.fee) * *H;
|
||||
|
||||
if sum_inputs == sum_outputs {
|
||||
Ok(())
|
||||
|
@ -178,7 +191,7 @@ pub(crate) fn check_input_signatures(
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
let mut matrix =
|
||||
AggregateRingMatrixBuilder::new(&proofs.base.commitments, proofs.base.fee);
|
||||
AggregateRingMatrixBuilder::new(&proofs.base.commitments, proofs.base.fee)?;
|
||||
|
||||
rings.iter().try_for_each(|ring| matrix.push_ring(ring))?;
|
||||
|
||||
|
@ -210,7 +223,7 @@ pub(crate) fn check_input_signatures(
|
|||
panic!("How did we build a ring with no decoys?");
|
||||
};
|
||||
|
||||
Ok(clsags.verify(ring, key_image, pseudo_out, msg)?)
|
||||
Ok(clsags.verify(ring.clone(), key_image, pseudo_out, msg)?)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::ops::Range;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_POINT, EIGHT_TORSION},
|
||||
constants::{ED25519_BASEPOINT_COMPRESSED, EIGHT_TORSION},
|
||||
edwards::CompressedEdwardsY,
|
||||
EdwardsPoint,
|
||||
};
|
||||
|
@ -92,7 +92,7 @@ fn test_decoy_info() {
|
|||
fn test_torsion_ki() {
|
||||
for &key_image in &EIGHT_TORSION[1..] {
|
||||
assert!(check_key_images(&Input::ToKey {
|
||||
key_image,
|
||||
key_image: key_image.compress(),
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
})
|
||||
|
@ -262,13 +262,13 @@ proptest! {
|
|||
#[test]
|
||||
fn test_check_input_has_decoys(key_offsets in vec(any::<u64>(), 1..10_000)) {
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
key_image: ED25519_BASEPOINT_COMPRESSED,
|
||||
amount: None,
|
||||
key_offsets,
|
||||
}).is_ok());
|
||||
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
key_image: ED25519_BASEPOINT_COMPRESSED,
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
}).is_err());
|
||||
|
|
|
@ -36,7 +36,7 @@ mod batch_prepare;
|
|||
mod free;
|
||||
|
||||
pub use alt_block::sanity_check_alt_block;
|
||||
pub use batch_prepare::batch_prepare_main_chain_blocks;
|
||||
pub use batch_prepare::{batch_prepare_main_chain_blocks, BatchPrepareCache};
|
||||
use free::pull_ordered_transactions;
|
||||
|
||||
/// A pre-prepared block with all data needed to verify it, except the block's proof of work.
|
||||
|
@ -154,7 +154,7 @@ impl PreparedBlock {
|
|||
///
|
||||
/// # Panics
|
||||
/// This function will panic if `randomx_vm` is
|
||||
/// [`None`] even though `RandomX` is needed.
|
||||
/// [`None`] even though RandomX is needed.
|
||||
fn new_prepped<R: RandomX>(
|
||||
block: PreparedBlockExPow,
|
||||
randomx_vm: Option<&R>,
|
||||
|
@ -243,7 +243,7 @@ where
|
|||
// Check that the txs included are what we need and that there are not any extra.
|
||||
let ordered_txs = pull_ordered_transactions(&prepped_block.block, txs)?;
|
||||
|
||||
verify_prepped_main_chain_block(prepped_block, ordered_txs, context_svc, database).await
|
||||
verify_prepped_main_chain_block(prepped_block, ordered_txs, context_svc, database, None).await
|
||||
}
|
||||
|
||||
/// Fully verify a block that has already been prepared using [`batch_prepare_main_chain_blocks`].
|
||||
|
@ -252,6 +252,7 @@ pub async fn verify_prepped_main_chain_block<D>(
|
|||
mut txs: Vec<TransactionVerificationData>,
|
||||
context_svc: &mut BlockchainContextService,
|
||||
database: D,
|
||||
batch_prep_cache: Option<&mut BatchPrepareCache>,
|
||||
) -> Result<VerifiedBlockInformation, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Send + 'static,
|
||||
|
@ -283,6 +284,7 @@ where
|
|||
context.current_adjusted_timestamp_for_time_lock(),
|
||||
context.current_hf,
|
||||
database,
|
||||
batch_prep_cache.as_deref(),
|
||||
)
|
||||
.verify()
|
||||
.await?;
|
||||
|
@ -304,7 +306,7 @@ where
|
|||
)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
Ok(VerifiedBlockInformation {
|
||||
let block = VerifiedBlockInformation {
|
||||
block_hash: prepped_block.block_hash,
|
||||
block: prepped_block.block,
|
||||
block_blob: prepped_block.block_blob,
|
||||
|
@ -324,5 +326,11 @@ where
|
|||
height: context.chain_height,
|
||||
long_term_weight: context.next_block_long_term_weight(block_weight),
|
||||
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
||||
})
|
||||
};
|
||||
|
||||
if let Some(batch_prep_cache) = batch_prep_cache {
|
||||
batch_prep_cache.output_cache.add_block_to_cache(&block);
|
||||
}
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
|
|
@ -173,12 +173,12 @@ where
|
|||
block_info.weight,
|
||||
block_info.long_term_weight,
|
||||
block_info.block.header.timestamp,
|
||||
cumulative_difficulty,
|
||||
);
|
||||
|
||||
// Add this alt cache back to the context service.
|
||||
context_svc
|
||||
.oneshot(BlockChainContextRequest::AddAltChainContextCache {
|
||||
prev_id: block_info.block.header.previous,
|
||||
cache: alt_context_cache,
|
||||
_token: AltChainRequestToken,
|
||||
})
|
||||
|
|
|
@ -13,21 +13,41 @@ use cuprate_consensus_rules::{
|
|||
ConsensusError, HardFork,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_types::TransactionVerificationData;
|
||||
use cuprate_types::{output_cache::OutputCache, TransactionVerificationData};
|
||||
|
||||
use crate::{
|
||||
batch_verifier::MultiThreadedBatchVerifier,
|
||||
block::{free::order_transactions, PreparedBlock, PreparedBlockExPow},
|
||||
transactions::start_tx_verification,
|
||||
transactions::{check_kis_unique, contextual_data::get_output_cache, start_tx_verification},
|
||||
BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError,
|
||||
__private::Database,
|
||||
};
|
||||
|
||||
/// Cached state created when batch preparing a group of blocks.
|
||||
///
|
||||
/// This cache is only valid for the set of blocks it was created with, it should not be used for
|
||||
/// other blocks.
|
||||
pub struct BatchPrepareCache {
|
||||
pub(crate) output_cache: OutputCache,
|
||||
/// [`true`] if all the key images in the batch have been checked for double spends in the batch and
|
||||
/// the whole chain.
|
||||
pub(crate) key_images_spent_checked: bool,
|
||||
}
|
||||
|
||||
/// Batch prepares a list of blocks for verification.
|
||||
#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))]
|
||||
pub async fn batch_prepare_main_chain_blocks(
|
||||
#[expect(clippy::type_complexity)]
|
||||
pub async fn batch_prepare_main_chain_blocks<D: Database>(
|
||||
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||
context_svc: &mut BlockchainContextService,
|
||||
) -> Result<Vec<(PreparedBlock, Vec<TransactionVerificationData>)>, ExtendedConsensusError> {
|
||||
mut database: D,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<(PreparedBlock, Vec<TransactionVerificationData>)>,
|
||||
BatchPrepareCache,
|
||||
),
|
||||
ExtendedConsensusError,
|
||||
> {
|
||||
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
|
||||
tracing::debug!("Calculating block hashes.");
|
||||
|
@ -189,5 +209,16 @@ pub async fn batch_prepare_main_chain_blocks(
|
|||
})
|
||||
.await?;
|
||||
|
||||
Ok(blocks)
|
||||
check_kis_unique(blocks.iter().flat_map(|(_, txs)| txs.iter()), &mut database).await?;
|
||||
|
||||
let output_cache =
|
||||
get_output_cache(blocks.iter().flat_map(|(_, txs)| txs.iter()), database).await?;
|
||||
|
||||
Ok((
|
||||
blocks,
|
||||
BatchPrepareCache {
|
||||
output_cache,
|
||||
key_images_spent_checked: true,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
|
|
@ -17,8 +17,12 @@ const TEST_LAG: usize = 2;
|
|||
|
||||
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
|
||||
|
||||
pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
||||
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
|
||||
pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = DifficultyCacheConfig {
|
||||
window: TEST_WINDOW,
|
||||
cut: TEST_CUT,
|
||||
lag: TEST_LAG,
|
||||
fixed_difficulty: None,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> {
|
||||
|
|
|
@ -41,11 +41,13 @@ use cuprate_consensus_rules::{
|
|||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
output_cache::OutputCache,
|
||||
CachedVerificationState, TransactionVerificationData, TxVersion,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
batch_verifier::MultiThreadedBatchVerifier,
|
||||
block::BatchPrepareCache,
|
||||
transactions::contextual_data::{batch_get_decoy_info, batch_get_ring_member_info},
|
||||
Database, ExtendedConsensusError,
|
||||
};
|
||||
|
@ -155,6 +157,7 @@ impl VerificationWanted {
|
|||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
batch_prep_cache: Option<&BatchPrepareCache>,
|
||||
) -> FullVerification<D> {
|
||||
FullVerification {
|
||||
prepped_txs: self.prepped_txs,
|
||||
|
@ -163,6 +166,7 @@ impl VerificationWanted {
|
|||
time_for_time_lock,
|
||||
hf,
|
||||
database,
|
||||
batch_prep_cache,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +212,7 @@ impl SemanticVerification {
|
|||
/// Full transaction verification.
|
||||
///
|
||||
/// [`VerificationWanted::full`]
|
||||
pub struct FullVerification<D> {
|
||||
pub struct FullVerification<'a, D> {
|
||||
prepped_txs: Vec<TransactionVerificationData>,
|
||||
|
||||
current_chain_height: usize,
|
||||
|
@ -216,14 +220,20 @@ pub struct FullVerification<D> {
|
|||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
batch_prep_cache: Option<&'a BatchPrepareCache>,
|
||||
}
|
||||
|
||||
impl<D: Database + Clone> FullVerification<D> {
|
||||
impl<D: Database + Clone> FullVerification<'_, D> {
|
||||
/// Fully verify each transaction.
|
||||
pub async fn verify(
|
||||
mut self,
|
||||
) -> Result<Vec<TransactionVerificationData>, ExtendedConsensusError> {
|
||||
check_kis_unique(&self.prepped_txs, &mut self.database).await?;
|
||||
if self
|
||||
.batch_prep_cache
|
||||
.is_none_or(|c| !c.key_images_spent_checked)
|
||||
{
|
||||
check_kis_unique(self.prepped_txs.iter(), &mut self.database).await?;
|
||||
}
|
||||
|
||||
let hashes_in_main_chain =
|
||||
hashes_referenced_in_main_chain(&self.prepped_txs, &mut self.database).await?;
|
||||
|
@ -250,6 +260,7 @@ impl<D: Database + Clone> FullVerification<D> {
|
|||
}),
|
||||
self.hf,
|
||||
self.database.clone(),
|
||||
self.batch_prep_cache.map(|c| &c.output_cache),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
@ -262,22 +273,23 @@ impl<D: Database + Clone> FullVerification<D> {
|
|||
self.time_for_time_lock,
|
||||
self.hf,
|
||||
self.database,
|
||||
self.batch_prep_cache.map(|c| &c.output_cache),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that each key image used in each transaction is unique in the whole chain.
|
||||
async fn check_kis_unique<D: Database>(
|
||||
txs: &[TransactionVerificationData],
|
||||
pub(crate) async fn check_kis_unique<D: Database>(
|
||||
mut txs: impl Iterator<Item = &TransactionVerificationData>,
|
||||
database: &mut D,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
let mut spent_kis = HashSet::with_capacity(txs.len());
|
||||
let mut spent_kis = HashSet::with_capacity(txs.size_hint().1.unwrap_or(0) * 2);
|
||||
|
||||
txs.iter().try_for_each(|tx| {
|
||||
txs.try_for_each(|tx| {
|
||||
tx.tx.prefix().inputs.iter().try_for_each(|input| {
|
||||
if let Input::ToKey { key_image, .. } = input {
|
||||
if !spent_kis.insert(key_image.compress().0) {
|
||||
if !spent_kis.insert(key_image.0) {
|
||||
tracing::debug!("Duplicate key image found in batch.");
|
||||
return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent));
|
||||
}
|
||||
|
@ -432,13 +444,14 @@ async fn verify_transactions_decoy_info<D: Database>(
|
|||
txs: impl Iterator<Item = &TransactionVerificationData> + Clone,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
output_cache: Option<&OutputCache>,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
// Decoy info is not validated for V1 txs.
|
||||
if hf == HardFork::V1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
batch_get_decoy_info(txs, hf, database)
|
||||
batch_get_decoy_info(txs, hf, database, output_cache)
|
||||
.await?
|
||||
.try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, hf)?)))?;
|
||||
|
||||
|
@ -450,6 +463,7 @@ async fn verify_transactions_decoy_info<D: Database>(
|
|||
/// The inputs to this function are the txs wanted to be verified and a list of [`VerificationNeeded`],
|
||||
/// if any other [`VerificationNeeded`] is specified other than [`VerificationNeeded::Contextual`] or
|
||||
/// [`VerificationNeeded::SemanticAndContextual`], nothing will be verified for that tx.
|
||||
#[expect(clippy::too_many_arguments)]
|
||||
async fn verify_transactions<D>(
|
||||
mut txs: Vec<TransactionVerificationData>,
|
||||
verification_needed: Vec<VerificationNeeded>,
|
||||
|
@ -458,6 +472,7 @@ async fn verify_transactions<D>(
|
|||
current_time_lock_timestamp: u64,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
output_cache: Option<&OutputCache>,
|
||||
) -> Result<Vec<TransactionVerificationData>, ExtendedConsensusError>
|
||||
where
|
||||
D: Database,
|
||||
|
@ -478,6 +493,7 @@ where
|
|||
.map(|(tx, _)| tx),
|
||||
hf,
|
||||
database,
|
||||
output_cache,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
|
@ -10,8 +10,10 @@
|
|||
//!
|
||||
//! Because this data is unique for *every* transaction and the context service is just for blockchain state data.
|
||||
//!
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use std::{borrow::Cow, collections::HashSet};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use monero_serai::transaction::{Input, Timelock};
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
@ -23,8 +25,10 @@ use cuprate_consensus_rules::{
|
|||
},
|
||||
ConsensusError, HardFork, TxVersion,
|
||||
};
|
||||
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
output_cache::OutputCache,
|
||||
OutputOnChain,
|
||||
};
|
||||
|
||||
|
@ -92,27 +96,19 @@ pub fn new_ring_member_info(
|
|||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect(),
|
||||
rings: new_rings(used_outs, tx_version)?,
|
||||
rings: new_rings(used_outs, tx_version),
|
||||
decoy_info,
|
||||
})
|
||||
}
|
||||
|
||||
/// Builds the [`Rings`] for the transaction inputs, from the given outputs.
|
||||
fn new_rings(
|
||||
outputs: Vec<Vec<OutputOnChain>>,
|
||||
tx_version: TxVersion,
|
||||
) -> Result<Rings, TransactionError> {
|
||||
Ok(match tx_version {
|
||||
fn new_rings(outputs: Vec<Vec<OutputOnChain>>, tx_version: TxVersion) -> Rings {
|
||||
match tx_version {
|
||||
TxVersion::RingSignatures => Rings::Legacy(
|
||||
outputs
|
||||
.into_iter()
|
||||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
|
||||
.collect::<Result<Vec<_>, TransactionError>>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, TransactionError>>()?,
|
||||
.map(|inp_outs| inp_outs.into_iter().map(|out| out.key).collect::<Vec<_>>())
|
||||
.collect::<Vec<_>>(),
|
||||
),
|
||||
TxVersion::RingCT => Rings::RingCT(
|
||||
outputs
|
||||
|
@ -120,32 +116,24 @@ fn new_rings(
|
|||
.map(|inp_outs| {
|
||||
inp_outs
|
||||
.into_iter()
|
||||
.map(|out| {
|
||||
Ok([
|
||||
out.key
|
||||
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
|
||||
out.commitment,
|
||||
])
|
||||
})
|
||||
.collect::<Result<_, TransactionError>>()
|
||||
.map(|out| [out.key, out.commitment])
|
||||
.collect::<_>()
|
||||
})
|
||||
.collect::<Result<_, _>>()?,
|
||||
.collect::<_>(),
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
|
||||
/// Retrieves an [`OutputCache`] for the list of transactions.
|
||||
///
|
||||
/// This function batch gets all the ring members for the inputted transactions and fills in data about
|
||||
/// them.
|
||||
pub async fn batch_get_ring_member_info<D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &TransactionVerificationData> + Clone,
|
||||
hf: HardFork,
|
||||
/// The [`OutputCache`] will only contain the outputs currently in the blockchain.
|
||||
pub async fn get_output_cache<D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &TransactionVerificationData>,
|
||||
mut database: D,
|
||||
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
||||
let mut output_ids = HashMap::new();
|
||||
) -> Result<OutputCache, ExtendedConsensusError> {
|
||||
let mut output_ids = IndexMap::new();
|
||||
|
||||
for tx_v_data in txs_verification_data.clone() {
|
||||
for tx_v_data in txs_verification_data {
|
||||
insert_ring_member_ids(&tx_v_data.tx.prefix().inputs, &mut output_ids)
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
}
|
||||
|
@ -156,26 +144,50 @@ pub async fn batch_get_ring_member_info<D: Database>(
|
|||
.call(BlockchainReadRequest::Outputs(output_ids))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
let BlockchainResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NumberOutputsWithAmount(
|
||||
outputs.keys().copied().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
|
||||
///
|
||||
/// This function batch gets all the ring members for the inputted transactions and fills in data about
|
||||
/// them.
|
||||
pub async fn batch_get_ring_member_info<D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &TransactionVerificationData> + Clone,
|
||||
hf: HardFork,
|
||||
mut database: D,
|
||||
cache: Option<&OutputCache>,
|
||||
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
||||
let mut output_ids = IndexMap::new();
|
||||
|
||||
for tx_v_data in txs_verification_data.clone() {
|
||||
insert_ring_member_ids(&tx_v_data.tx.prefix().inputs, &mut output_ids)
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
}
|
||||
|
||||
let outputs = if let Some(cache) = cache {
|
||||
Cow::Borrowed(cache)
|
||||
} else {
|
||||
let BlockchainResponse::Outputs(outputs) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::Outputs(output_ids))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
Cow::Owned(outputs)
|
||||
};
|
||||
|
||||
Ok(txs_verification_data
|
||||
.map(move |tx_v_data| {
|
||||
let numb_outputs = |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0);
|
||||
let numb_outputs = |amt| outputs.number_outs_with_amount(amt);
|
||||
|
||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||
|amt, idx| outputs.get(&amt)?.get(&idx).copied(),
|
||||
|amt, idx| outputs.get_output(amt, idx).copied(),
|
||||
&tx_v_data.tx.prefix().inputs,
|
||||
)
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
|
@ -202,12 +214,13 @@ pub async fn batch_get_ring_member_info<D: Database>(
|
|||
/// This functions panics if `hf == HardFork::V1` as decoy info
|
||||
/// should not be needed for V1.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn batch_get_decoy_info<'a, D: Database>(
|
||||
pub async fn batch_get_decoy_info<'a, 'b, D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &'a TransactionVerificationData> + Clone,
|
||||
hf: HardFork,
|
||||
mut database: D,
|
||||
cache: Option<&'b OutputCache>,
|
||||
) -> Result<
|
||||
impl Iterator<Item = Result<DecoyInfo, ConsensusError>> + sealed::Captures<&'a ()>,
|
||||
impl Iterator<Item = Result<DecoyInfo, ConsensusError>> + sealed::Captures<(&'a (), &'b ())>,
|
||||
ExtendedConsensusError,
|
||||
> {
|
||||
// decoy info is not needed for V1.
|
||||
|
@ -229,15 +242,24 @@ pub async fn batch_get_decoy_info<'a, D: Database>(
|
|||
unique_input_amounts.len()
|
||||
);
|
||||
|
||||
let BlockchainResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
let outputs_with_amount = if let Some(cache) = cache {
|
||||
unique_input_amounts
|
||||
.into_iter()
|
||||
.map(|amount| (amount, cache.number_outs_with_amount(amount)))
|
||||
.collect()
|
||||
} else {
|
||||
let BlockchainResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockchainReadRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
outputs_with_amount
|
||||
};
|
||||
|
||||
Ok(txs_verification_data.map(move |tx_v_data| {
|
||||
|
|
|
@ -7,13 +7,15 @@ use std::{
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY};
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_COMPRESSED, edwards::CompressedEdwardsY};
|
||||
use indexmap::IndexMap;
|
||||
use monero_serai::transaction::{Timelock, Transaction};
|
||||
use tower::service_fn;
|
||||
|
||||
use cuprate_consensus::{__private::Database, transactions::start_tx_verification};
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
output_cache::OutputCache,
|
||||
OutputOnChain,
|
||||
};
|
||||
|
||||
|
@ -32,15 +34,17 @@ fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clon
|
|||
BlockchainReadRequest::Outputs(outs) => {
|
||||
let idxs = &outs[&0];
|
||||
|
||||
let mut ret = HashMap::new();
|
||||
let mut ret = IndexMap::new();
|
||||
|
||||
ret.insert(
|
||||
0_u64,
|
||||
idxs.iter()
|
||||
.map(|idx| (*idx, *outputs.get(idx).unwrap()))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
);
|
||||
|
||||
let ret = OutputCache::new(ret, IndexMap::new(), IndexMap::new());
|
||||
|
||||
BlockchainResponse::Outputs(ret)
|
||||
}
|
||||
BlockchainReadRequest::KeyImagesSpent(_) => BlockchainResponse::KeyImagesSpent(false),
|
||||
|
@ -67,13 +71,8 @@ macro_rules! test_verify_valid_v2_tx {
|
|||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: CompressedEdwardsY::from_slice(&hex_literal::hex!($commitment))
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
commitment: CompressedEdwardsY(hex_literal::hex!($commitment)),
|
||||
key: CompressedEdwardsY(hex_literal::hex!($ring_member)),
|
||||
txid: [0; 32],
|
||||
}),)+)+
|
||||
];
|
||||
|
@ -88,7 +87,7 @@ macro_rules! test_verify_valid_v2_tx {
|
|||
)
|
||||
.prepare()
|
||||
.unwrap()
|
||||
.full(10, [0; 32], u64::MAX, HardFork::$hf, database.clone())
|
||||
.full(10, [0; 32], u64::MAX, HardFork::$hf, database.clone(), None)
|
||||
.verify()
|
||||
.await.is_ok()
|
||||
);
|
||||
|
@ -100,10 +99,8 @@ macro_rules! test_verify_valid_v2_tx {
|
|||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: ED25519_BASEPOINT_POINT,
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
commitment: ED25519_BASEPOINT_COMPRESSED,
|
||||
key: CompressedEdwardsY(hex_literal::hex!($ring_member)),
|
||||
txid: [0; 32],
|
||||
}),)+)+
|
||||
];
|
||||
|
@ -118,7 +115,7 @@ macro_rules! test_verify_valid_v2_tx {
|
|||
)
|
||||
.prepare()
|
||||
.unwrap()
|
||||
.full(10, [0; 32], u64::MAX, HardFork::$hf, database.clone())
|
||||
.full(10, [0; 32], u64::MAX, HardFork::$hf, database.clone(), None)
|
||||
.verify()
|
||||
.await.is_err()
|
||||
);
|
||||
|
|
|
@ -224,8 +224,10 @@ pub(crate) fn key_extend(key_bytes: &[u8; CN_AES_KEY_SIZE]) -> [u128; NUM_AES_RO
|
|||
let w2 = w1 ^ ((pprev_key >> 64) as u32);
|
||||
let w3 = w2 ^ ((pprev_key >> 96) as u32);
|
||||
|
||||
expanded_key[i] =
|
||||
u128::from(w0) | u128::from(w1) << 32 | u128::from(w2) << 64 | u128::from(w3) << 96;
|
||||
expanded_key[i] = u128::from(w0)
|
||||
| (u128::from(w1) << 32)
|
||||
| (u128::from(w2) << 64)
|
||||
| (u128::from(w3) << 96);
|
||||
|
||||
w0_prev = w3;
|
||||
}
|
||||
|
@ -256,7 +258,7 @@ pub(crate) fn round_fwd(state: u128, key: u128) -> u128 {
|
|||
r4 ^= CRYPTONIGHT_SBOX[768 + usize::from((state >> 88) as u8)];
|
||||
|
||||
let mut new_state =
|
||||
u128::from(r4) << 96 | u128::from(r3) << 64 | u128::from(r2) << 32 | u128::from(r1);
|
||||
(u128::from(r4) << 96) | (u128::from(r3) << 64) | (u128::from(r2) << 32) | u128::from(r1);
|
||||
new_state ^= key;
|
||||
new_state
|
||||
}
|
||||
|
|
|
@ -35,18 +35,18 @@ pub(crate) fn variant2_shuffle_add(
|
|||
let chunk1 = &mut long_state[chunk1_start];
|
||||
let sum1 = chunk3_old.wrapping_add(b1) & U64_MASK;
|
||||
let sum2 = (chunk3_old >> 64).wrapping_add(b1 >> 64) & U64_MASK;
|
||||
*chunk1 = sum2 << 64 | sum1; // TODO remove some shifting above
|
||||
*chunk1 = (sum2 << 64) | sum1; // TODO remove some shifting above
|
||||
|
||||
let chunk3 = &mut long_state[chunk3_start];
|
||||
let sum1 = chunk2_old.wrapping_add(a) & U64_MASK;
|
||||
let sum2 = (chunk2_old >> 64).wrapping_add(a >> 64) & U64_MASK;
|
||||
*chunk3 = sum2 << 64 | sum1;
|
||||
*chunk3 = (sum2 << 64) | sum1;
|
||||
|
||||
let b0 = b[0];
|
||||
let chunk2 = &mut long_state[chunk2_start];
|
||||
let sum1 = chunk1_old.wrapping_add(b0) & U64_MASK;
|
||||
let sum2 = (chunk1_old >> 64).wrapping_add(b0 >> 64) & U64_MASK;
|
||||
*chunk2 = sum2 << 64 | sum1;
|
||||
*chunk2 = (sum2 << 64) | sum1;
|
||||
|
||||
if variant == Variant::R {
|
||||
*c1 ^= chunk1_old ^ chunk2_old ^ chunk3_old;
|
||||
|
|
|
@ -401,8 +401,10 @@ pub(crate) fn variant4_random_math(
|
|||
|
||||
v4_random_math(code, r);
|
||||
|
||||
*a1 ^=
|
||||
u128::from(r[2]) | u128::from(r[3]) << 32 | u128::from(r[0]) << 64 | u128::from(r[1]) << 96;
|
||||
*a1 ^= u128::from(r[2])
|
||||
| (u128::from(r[3]) << 32)
|
||||
| (u128::from(r[0]) << 64)
|
||||
| (u128::from(r[1]) << 96);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -138,7 +138,7 @@ fn mul(a: u64, b: u64) -> u128 {
|
|||
let lo = product as u64;
|
||||
|
||||
// swap hi and low, so this isn't just a multiply
|
||||
u128::from(lo) << 64 | u128::from(hi)
|
||||
(u128::from(lo) << 64) | u128::from(hi)
|
||||
}
|
||||
|
||||
/// Original C code:
|
||||
|
@ -153,7 +153,7 @@ fn sum_half_blocks(a: u128, b: u128) -> u128 {
|
|||
let b_high = (b >> 64) as u64;
|
||||
let sum_high = a_high.wrapping_add(b_high);
|
||||
|
||||
u128::from(sum_high) << 64 | u128::from(sum_low)
|
||||
(u128::from(sum_high) << 64) | u128::from(sum_low)
|
||||
}
|
||||
|
||||
/// Original C code:
|
||||
|
|
|
@ -83,7 +83,7 @@ ignore = [
|
|||
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
|
||||
|
||||
# TODO: check this is sorted before a beta release.
|
||||
{ id = "RUSTSEC-2024-0370", reason = "unmaintained crate, not necessarily vulnerable yet." }
|
||||
{ id = "RUSTSEC-2024-0436", reason = "`paste` unmaintained, not necessarily vulnerable yet." }
|
||||
]
|
||||
# If this is true, then cargo deny will use the git executable to fetch advisory database.
|
||||
# If this is false, then it uses a built-in git library.
|
||||
|
|
|
@ -4,8 +4,11 @@
|
|||
use std::sync::LazyLock;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_POINT, edwards::VartimeEdwardsPrecomputation,
|
||||
traits::VartimePrecomputedMultiscalarMul, EdwardsPoint, Scalar,
|
||||
constants::{ED25519_BASEPOINT_COMPRESSED, ED25519_BASEPOINT_POINT},
|
||||
edwards::CompressedEdwardsY,
|
||||
edwards::VartimeEdwardsPrecomputation,
|
||||
traits::VartimePrecomputedMultiscalarMul,
|
||||
Scalar,
|
||||
};
|
||||
use monero_serai::generators::H;
|
||||
|
||||
|
@ -49,15 +52,16 @@ static H_PRECOMP: LazyLock<VartimeEdwardsPrecomputation> =
|
|||
/// # Invariant
|
||||
/// This function assumes that the [`ZERO_COMMITMENT_DECOMPOSED_AMOUNT`]
|
||||
/// table is sorted.
|
||||
pub static ZERO_COMMITMENT_LOOKUP_TABLE: LazyLock<[EdwardsPoint; 172]> = LazyLock::new(|| {
|
||||
let mut lookup_table: [EdwardsPoint; 172] = [ED25519_BASEPOINT_POINT; 172];
|
||||
pub static ZERO_COMMITMENT_LOOKUP_TABLE: LazyLock<[CompressedEdwardsY; 172]> =
|
||||
LazyLock::new(|| {
|
||||
let mut lookup_table: [CompressedEdwardsY; 172] = [ED25519_BASEPOINT_COMPRESSED; 172];
|
||||
|
||||
for (i, amount) in ZERO_COMMITMENT_DECOMPOSED_AMOUNT.into_iter().enumerate() {
|
||||
lookup_table[i] = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount);
|
||||
}
|
||||
for (i, amount) in ZERO_COMMITMENT_DECOMPOSED_AMOUNT.into_iter().enumerate() {
|
||||
lookup_table[i] = (ED25519_BASEPOINT_POINT + *H * Scalar::from(amount)).compress();
|
||||
}
|
||||
|
||||
lookup_table
|
||||
});
|
||||
lookup_table
|
||||
});
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free functions
|
||||
|
||||
|
@ -66,7 +70,7 @@ pub static ZERO_COMMITMENT_LOOKUP_TABLE: LazyLock<[EdwardsPoint; 172]> = LazyLoc
|
|||
/// It will first attempt to lookup into the table of known Pre-RCT value.
|
||||
/// Compute it otherwise.
|
||||
#[expect(clippy::cast_possible_truncation)]
|
||||
pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint {
|
||||
pub fn compute_zero_commitment(amount: u64) -> CompressedEdwardsY {
|
||||
// OPTIMIZATION: Unlike monerod which execute a linear search across its lookup
|
||||
// table (O(n)). Cuprate is making use of an arithmetic based constant time
|
||||
// version (O(1)). It has been benchmarked in both hit and miss scenarios against
|
||||
|
@ -78,7 +82,7 @@ pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint {
|
|||
// the amount without its most significant digit.
|
||||
let Some(log) = amount.checked_ilog10() else {
|
||||
// amount = 0 so H component is 0.
|
||||
return ED25519_BASEPOINT_POINT;
|
||||
return ED25519_BASEPOINT_COMPRESSED;
|
||||
};
|
||||
let div = 10_u64.pow(log);
|
||||
|
||||
|
@ -89,7 +93,9 @@ pub fn compute_zero_commitment(amount: u64) -> EdwardsPoint {
|
|||
// there aren't only trailing zeroes behind the most significant digit.
|
||||
// The amount is not part of the table and can calculated apart.
|
||||
if most_significant_digit * div != amount {
|
||||
return H_PRECOMP.vartime_multiscalar_mul([Scalar::from(amount), Scalar::ONE]);
|
||||
return H_PRECOMP
|
||||
.vartime_multiscalar_mul([Scalar::from(amount), Scalar::ONE])
|
||||
.compress();
|
||||
}
|
||||
|
||||
// Calculating the index back by progressing within the powers of 10.
|
||||
|
@ -116,7 +122,10 @@ mod test {
|
|||
fn compare_lookup_with_computation() {
|
||||
for amount in ZERO_COMMITMENT_DECOMPOSED_AMOUNT {
|
||||
let commitment = H_PRECOMP.vartime_multiscalar_mul([Scalar::from(amount), Scalar::ONE]);
|
||||
assert!(commitment == compute_zero_commitment(amount));
|
||||
assert_eq!(
|
||||
commitment,
|
||||
compute_zero_commitment(amount).decompress().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ pub fn tx_fee(tx: &Transaction) -> u64 {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use curve25519_dalek::{edwards::CompressedEdwardsY, EdwardsPoint};
|
||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||
use monero_serai::transaction::{NotPruned, Output, Timelock, TransactionPrefix};
|
||||
|
||||
use super::*;
|
||||
|
@ -46,7 +46,7 @@ mod test {
|
|||
let input = Input::ToKey {
|
||||
amount: Some(u64::MAX),
|
||||
key_offsets: vec![],
|
||||
key_image: EdwardsPoint::default(),
|
||||
key_image: CompressedEdwardsY::default(),
|
||||
};
|
||||
|
||||
let output = Output {
|
||||
|
|
21
misc/FAST_SYNC_HASHES.md
Normal file
21
misc/FAST_SYNC_HASHES.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Fast sync hashes
|
||||
Cuprate has a binary that generate `fast-sync` hashes and puts them into a binary blob file.
|
||||
|
||||
The code that does so is located at [`consensus/fast-sync`](https://github.com/Cuprate/cuprate/blob/main/consensus/fast-sync).
|
||||
|
||||
To create the hashes, you need a fully synced database generated from `cuprated`.
|
||||
|
||||
After that, build the binary that generates `fast-sync` hashes:
|
||||
```bash
|
||||
cargo build --release --package cuprate-fast-sync
|
||||
```
|
||||
|
||||
Run the binary:
|
||||
```bash
|
||||
./target/release/create-fs-file --height $HEIGHT
|
||||
```
|
||||
where `$HEIGHT` is the top blockchain height.
|
||||
|
||||
The generated `fast_sync_hashes.bin` file should be in the current directory.
|
||||
|
||||
This should be moved to `binaries/cuprated/src/blockchain/fast_sync/fast_sync_hashes.bin`.
|
2
misc/README.md
Normal file
2
misc/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
## Misc
|
||||
Any miscellaneous files, such as documents, GPG keys, assets, etc.
|
34
misc/RELEASE_CHECKLIST.md
Normal file
34
misc/RELEASE_CHECKLIST.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Cuprate release check-list
|
||||
This is a template checklist used to track releases.
|
||||
|
||||
The scheme for release file name is `$BINARY-$VERSION-$OS-$ARCH.$EXTENSION`, for example, `cuprated-0.0.1-linux-x64.tar.gz`.
|
||||
|
||||
---
|
||||
|
||||
- Changelog
|
||||
- [ ] Relevant changes added to `misc/changelogs/cuprated/$VERSION.md`
|
||||
- Fast sync
|
||||
- [ ] Update hashes, see `misc/FAST_SYNC_HASHES.md`
|
||||
- User Book
|
||||
- [ ] Update necessary documentation
|
||||
- [ ] Book title reflects `cuprated`'s version
|
||||
- `cuprated`
|
||||
- [ ] Killswitch timestamp updated
|
||||
- Repository
|
||||
- [ ] Decide specific commit
|
||||
- [ ] Create draft release
|
||||
- [ ] Create version tag
|
||||
- [ ] Build CI binaries
|
||||
- `cuprated` testing
|
||||
- Full-sync from scratch
|
||||
- [ ] x64 Windows
|
||||
- [ ] x64 Linux
|
||||
- [ ] ARM64 macOS
|
||||
- [ ] ARM64 Linux
|
||||
- Release
|
||||
- [ ] Add binaries to release
|
||||
- [ ] Publish `Cuprate/user-book`
|
||||
- [ ] Release
|
||||
- Release announcements
|
||||
- [ ] Reddit
|
||||
- [ ] Matrix
|
2
misc/changelogs/README.md
Normal file
2
misc/changelogs/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Changelogs
|
||||
This directory holds changelog files for binaries/libraries.
|
32
misc/changelogs/cuprated/0.0.1.md
Normal file
32
misc/changelogs/cuprated/0.0.1.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
# cuprated 0.0.1 Molybdenite (2025-03-12)
|
||||
Cuprate is an alternative and independent Monero node implementation that is focused on being fast, user-friendly, and backwards compatible with [`monerod`](https://github.com/monero-project/monero).
|
||||
|
||||
This is the first release of the Cuprate node, `cuprated`.
|
||||
|
||||
To get started, see: <https://user.cuprate.org>.
|
||||
|
||||
For an FAQ on Cuprate, see: <https://user.cuprate.org/#faq>.
|
||||
|
||||
## Downloads
|
||||
For convenience, The following binaries are produced using GitHub CI in a non-reproducible way; it is highly recommended to build `cuprated` from source instead, see <https://user.cuprate.org/getting-started/source>.
|
||||
|
||||
| OS | Architecture | Download |
|
||||
|---------|--------------|----------|
|
||||
| Linux | x64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-linux-x64.tar.gz>
|
||||
| Linux | ARM64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-linux-arm64.tar.gz>
|
||||
| macOS | x64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-macos-x64.tar.gz>
|
||||
| macOS | ARM64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-macos-arm64.tar.gz>
|
||||
| Windows | x64 | <https://github.com/Cuprate/cuprate/releases/download/cuprated-0.0.1/cuprated-0.0.1-windows-x64.zip>
|
||||
|
||||
## Contributors
|
||||
Thank you to everyone who directly contributed to this release:
|
||||
|
||||
- @Asurar0
|
||||
- @Boog900
|
||||
- @dimalinux
|
||||
- @hinto-janai
|
||||
- @jomuel
|
||||
- @kayabaNerve
|
||||
- @SyntheticBird45
|
||||
|
||||
There are other contributors that are not listed here, thank you to them as well.
|
|
@ -243,7 +243,7 @@ pub trait LevinBody: Sized {
|
|||
/// Decodes the message from the data in the header
|
||||
fn decode_message<B: Buf>(
|
||||
body: &mut B,
|
||||
typ: MessageType,
|
||||
ty: MessageType,
|
||||
command: Self::Command,
|
||||
) -> Result<Self, BucketError>;
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
|
|||
message: T,
|
||||
) -> Result<Vec<Bucket<T::Command>>, BucketError> {
|
||||
assert!(
|
||||
fragment_size * 2 >= HEADER_SIZE,
|
||||
fragment_size >= 2 * HEADER_SIZE,
|
||||
"Fragment size: {fragment_size}, is too small, must be at least {}",
|
||||
2 * HEADER_SIZE
|
||||
);
|
||||
|
|
|
@ -395,10 +395,10 @@ impl LevinBody for Message {
|
|||
|
||||
fn decode_message<B: Buf>(
|
||||
body: &mut B,
|
||||
typ: MessageType,
|
||||
ty: MessageType,
|
||||
command: LevinCommand,
|
||||
) -> Result<Self, BucketError> {
|
||||
Ok(match typ {
|
||||
Ok(match ty {
|
||||
MessageType::Request => Self::Request(AdminRequestMessage::decode(body, command)?),
|
||||
MessageType::Response => Self::Response(AdminResponseMessage::decode(body, command)?),
|
||||
MessageType::Notification => Self::Protocol(ProtocolMessage::decode(body, command)?),
|
||||
|
|
|
@ -129,7 +129,7 @@ impl ChainResponse {
|
|||
#[inline]
|
||||
pub const fn cumulative_difficulty(&self) -> u128 {
|
||||
let cumulative_difficulty = self.cumulative_difficulty_top64 as u128;
|
||||
cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128
|
||||
(cumulative_difficulty << 64) | self.cumulative_difficulty_low64 as u128
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ impl DandelionConfig {
|
|||
}
|
||||
|
||||
/// Returns the expected length of a stem.
|
||||
pub fn expected_stem_length(&self) -> f64 {
|
||||
pub const fn expected_stem_length(&self) -> f64 {
|
||||
self.fluff_probability.recip()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ use tokio::{
|
|||
sync::{mpsc, OwnedSemaphorePermit, Semaphore},
|
||||
task::JoinHandle,
|
||||
};
|
||||
use tokio_util::sync::PollSemaphore;
|
||||
use tokio_util::sync::{PollSemaphore, PollSender};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::Instrument;
|
||||
|
||||
|
@ -31,7 +31,7 @@ mod weak;
|
|||
|
||||
pub use connector::{ConnectRequest, Connector};
|
||||
pub use handshaker::{DoHandshakeRequest, HandshakeError, HandshakerBuilder};
|
||||
pub use weak::WeakClient;
|
||||
pub use weak::{WeakBroadcastClient, WeakClient};
|
||||
|
||||
/// An internal identifier for a given peer, will be their address if known
|
||||
/// or a random u128 if not.
|
||||
|
@ -85,7 +85,7 @@ pub struct Client<Z: NetworkZone> {
|
|||
pub info: PeerInformation<Z::Addr>,
|
||||
|
||||
/// The channel to the [`Connection`](connection::Connection) task.
|
||||
connection_tx: mpsc::Sender<connection::ConnectionTaskRequest>,
|
||||
connection_tx: PollSender<connection::ConnectionTaskRequest>,
|
||||
/// The [`JoinHandle`] of the spawned connection task.
|
||||
connection_handle: JoinHandle<()>,
|
||||
/// The [`JoinHandle`] of the spawned timeout monitor task.
|
||||
|
@ -100,6 +100,12 @@ pub struct Client<Z: NetworkZone> {
|
|||
error: SharedError<PeerError>,
|
||||
}
|
||||
|
||||
impl<Z: NetworkZone> Drop for Client<Z> {
|
||||
fn drop(&mut self) {
|
||||
self.info.handle.send_close_signal();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Z: NetworkZone> Client<Z> {
|
||||
/// Creates a new [`Client`].
|
||||
pub(crate) fn new(
|
||||
|
@ -112,7 +118,7 @@ impl<Z: NetworkZone> Client<Z> {
|
|||
) -> Self {
|
||||
Self {
|
||||
info,
|
||||
connection_tx,
|
||||
connection_tx: PollSender::new(connection_tx),
|
||||
timeout_handle,
|
||||
semaphore: PollSemaphore::new(semaphore),
|
||||
permit: None,
|
||||
|
@ -135,7 +141,7 @@ impl<Z: NetworkZone> Client<Z> {
|
|||
pub fn downgrade(&self) -> WeakClient<Z> {
|
||||
WeakClient {
|
||||
info: self.info.clone(),
|
||||
connection_tx: self.connection_tx.downgrade(),
|
||||
connection_tx: self.connection_tx.clone(),
|
||||
semaphore: self.semaphore.clone(),
|
||||
permit: None,
|
||||
error: self.error.clone(),
|
||||
|
@ -158,14 +164,17 @@ impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> {
|
|||
return Poll::Ready(Err(err));
|
||||
}
|
||||
|
||||
if self.permit.is_some() {
|
||||
return Poll::Ready(Ok(()));
|
||||
if self.permit.is_none() {
|
||||
let permit = ready!(self.semaphore.poll_acquire(cx))
|
||||
.expect("Client semaphore should not be closed!");
|
||||
|
||||
self.permit = Some(permit);
|
||||
}
|
||||
|
||||
let permit = ready!(self.semaphore.poll_acquire(cx))
|
||||
.expect("Client semaphore should not be closed!");
|
||||
|
||||
self.permit = Some(permit);
|
||||
if ready!(self.connection_tx.poll_reserve(cx)).is_err() {
|
||||
let err = self.set_err(PeerError::ClientChannelClosed);
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
@ -183,19 +192,13 @@ impl<Z: NetworkZone> Service<PeerRequest> for Client<Z> {
|
|||
permit: Some(permit),
|
||||
};
|
||||
|
||||
if let Err(e) = self.connection_tx.try_send(req) {
|
||||
if let Err(req) = self.connection_tx.send_item(req) {
|
||||
// The connection task could have closed between a call to `poll_ready` and the call to
|
||||
// `call`, which means if we don't handle the error here the receiver would panic.
|
||||
use mpsc::error::TrySendError;
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
match e {
|
||||
TrySendError::Closed(req) | TrySendError::Full(req) => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.response_channel.send(resp));
|
||||
}
|
||||
}
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.into_inner().unwrap().response_channel.send(resp));
|
||||
}
|
||||
|
||||
rx.into()
|
||||
|
|
|
@ -17,9 +17,9 @@ use tokio_stream::wrappers::ReceiverStream;
|
|||
|
||||
use cuprate_wire::{LevinCommand, Message, ProtocolMessage};
|
||||
|
||||
use crate::client::request_handler::PeerRequestHandler;
|
||||
use crate::{
|
||||
constants::{REQUEST_TIMEOUT, SENDING_TIMEOUT},
|
||||
client::request_handler::PeerRequestHandler,
|
||||
constants::{REQUEST_HANDLER_TIMEOUT, REQUEST_TIMEOUT, SENDING_TIMEOUT},
|
||||
handles::ConnectionGuard,
|
||||
AddressBook, BroadcastMessage, CoreSyncSvc, MessageID, NetworkZone, PeerError, PeerRequest,
|
||||
PeerResponse, ProtocolRequestHandler, ProtocolResponse, SharedError,
|
||||
|
@ -46,7 +46,7 @@ pub(crate) enum State {
|
|||
/// The channel to send the response down.
|
||||
tx: oneshot::Sender<Result<PeerResponse, tower::BoxError>>,
|
||||
/// A permit for this request.
|
||||
_req_permit: Option<OwnedSemaphorePermit>,
|
||||
_req_permit: OwnedSemaphorePermit,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ where
|
|||
self.send_message_to_peer(Message::Protocol(ProtocolMessage::NewFluffyBlock(block)))
|
||||
.await
|
||||
}
|
||||
BroadcastMessage::NewTransaction(txs) => {
|
||||
BroadcastMessage::NewTransactions(txs) => {
|
||||
self.send_message_to_peer(Message::Protocol(ProtocolMessage::NewTransactions(txs)))
|
||||
.await
|
||||
}
|
||||
|
@ -153,10 +153,17 @@ where
|
|||
tracing::debug!("handling client request, id: {:?}", req.request.id());
|
||||
|
||||
if req.request.needs_response() {
|
||||
assert!(
|
||||
!matches!(self.state, State::WaitingForResponse { .. }),
|
||||
"cannot handle more than 1 request at the same time"
|
||||
);
|
||||
|
||||
self.state = State::WaitingForResponse {
|
||||
request_id: req.request.id(),
|
||||
tx: req.response_channel,
|
||||
_req_permit: req.permit,
|
||||
_req_permit: req
|
||||
.permit
|
||||
.expect("Client request should have a permit if a response is needed"),
|
||||
};
|
||||
|
||||
self.send_message_to_peer(req.request.into()).await?;
|
||||
|
@ -165,7 +172,7 @@ where
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
// INVARIANT: This function cannot exit early without sending a response back down the
|
||||
// INVARIANT: From now this function cannot exit early without sending a response back down the
|
||||
// response channel.
|
||||
let res = self.send_message_to_peer(req.request.into()).await;
|
||||
|
||||
|
@ -188,7 +195,15 @@ where
|
|||
async fn handle_peer_request(&mut self, req: PeerRequest) -> Result<(), PeerError> {
|
||||
tracing::debug!("Received peer request: {:?}", req.id());
|
||||
|
||||
let res = self.peer_request_handler.handle_peer_request(req).await?;
|
||||
let res = timeout(
|
||||
REQUEST_HANDLER_TIMEOUT,
|
||||
self.peer_request_handler.handle_peer_request(req),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
tracing::warn!("Timed-out handling peer request, closing connection.");
|
||||
PeerError::TimedOut
|
||||
})??;
|
||||
|
||||
// This will be an error if a response does not need to be sent
|
||||
if let Ok(res) = res.try_into() {
|
||||
|
@ -249,6 +264,10 @@ where
|
|||
|
||||
tokio::select! {
|
||||
biased;
|
||||
() = self.connection_guard.should_shutdown() => {
|
||||
tracing::debug!("connection guard has shutdown, shutting down connection.");
|
||||
Err(PeerError::ConnectionClosed)
|
||||
}
|
||||
broadcast_req = self.broadcast_stream.next() => {
|
||||
if let Some(broadcast_req) = broadcast_req {
|
||||
self.handle_client_broadcast(broadcast_req).await
|
||||
|
@ -282,6 +301,10 @@ where
|
|||
|
||||
tokio::select! {
|
||||
biased;
|
||||
() = self.connection_guard.should_shutdown() => {
|
||||
tracing::debug!("connection guard has shutdown, shutting down connection.");
|
||||
Err(PeerError::ConnectionClosed)
|
||||
}
|
||||
() = self.request_timeout.as_mut().expect("Request timeout was not set!") => {
|
||||
Err(PeerError::ClientChannelClosed)
|
||||
}
|
||||
|
@ -292,11 +315,19 @@ where
|
|||
Err(PeerError::ClientChannelClosed)
|
||||
}
|
||||
}
|
||||
// We don't wait for client requests as we are already handling one.
|
||||
client_req = self.client_rx.next() => {
|
||||
// Although we can only handle 1 request from the client at a time, this channel is also used
|
||||
// for specific broadcasts to this peer so we need to handle those here as well.
|
||||
if let Some(client_req) = client_req {
|
||||
self.handle_client_request(client_req).await
|
||||
} else {
|
||||
Err(PeerError::ClientChannelClosed)
|
||||
}
|
||||
},
|
||||
peer_message = stream.next() => {
|
||||
if let Some(peer_message) = peer_message {
|
||||
self.handle_potential_response(peer_message?).await
|
||||
}else {
|
||||
} else {
|
||||
Err(PeerError::ClientChannelClosed)
|
||||
}
|
||||
},
|
||||
|
@ -331,11 +362,6 @@ where
|
|||
}
|
||||
|
||||
loop {
|
||||
if self.connection_guard.should_shutdown() {
|
||||
tracing::debug!("connection guard has shutdown, shutting down connection.");
|
||||
return self.shutdown(PeerError::ConnectionClosed);
|
||||
}
|
||||
|
||||
let res = match self.state {
|
||||
State::WaitingForRequest => self.state_waiting_for_request(&mut stream).await,
|
||||
State::WaitingForResponse { .. } => {
|
||||
|
|
|
@ -36,8 +36,8 @@ use crate::{
|
|||
timeout_monitor::connection_timeout_monitor_task, Client, InternalPeerID, PeerInformation,
|
||||
},
|
||||
constants::{
|
||||
HANDSHAKE_TIMEOUT, MAX_EAGER_PROTOCOL_MESSAGES, MAX_PEERS_IN_PEER_LIST_MESSAGE,
|
||||
PING_TIMEOUT,
|
||||
CLIENT_QUEUE_SIZE, HANDSHAKE_TIMEOUT, MAX_EAGER_PROTOCOL_MESSAGES,
|
||||
MAX_PEERS_IN_PEER_LIST_MESSAGE, PING_TIMEOUT,
|
||||
},
|
||||
handles::HandleBuilder,
|
||||
AddressBook, AddressBookRequest, AddressBookResponse, BroadcastMessage, ConnectionDirection,
|
||||
|
@ -448,7 +448,7 @@ where
|
|||
|
||||
// Set up the connection data.
|
||||
let error_slot = SharedError::new();
|
||||
let (connection_tx, client_rx) = mpsc::channel(1);
|
||||
let (connection_tx, client_rx) = mpsc::channel(CLIENT_QUEUE_SIZE);
|
||||
|
||||
let info = PeerInformation {
|
||||
id: addr,
|
||||
|
|
|
@ -54,7 +54,13 @@ where
|
|||
interval.tick().await;
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
tokio::select! {
|
||||
() = peer_information.handle.closed() => {
|
||||
tracing::debug!("Closing timeout monitor, connection disconnected.");
|
||||
return Ok(());
|
||||
}
|
||||
_ = interval.tick() => ()
|
||||
}
|
||||
|
||||
tracing::trace!("timeout monitor tick.");
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
use std::task::{ready, Context, Poll};
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use tokio::sync::{mpsc, OwnedSemaphorePermit};
|
||||
use tokio_util::sync::PollSemaphore;
|
||||
use tokio::sync::OwnedSemaphorePermit;
|
||||
use tokio_util::sync::{PollSemaphore, PollSender};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
|
||||
use crate::{
|
||||
client::{connection, PeerInformation},
|
||||
NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError,
|
||||
BroadcastMessage, NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError,
|
||||
};
|
||||
|
||||
/// A weak handle to a [`Client`](super::Client).
|
||||
|
@ -20,7 +20,7 @@ pub struct WeakClient<N: NetworkZone> {
|
|||
pub info: PeerInformation<N::Addr>,
|
||||
|
||||
/// The channel to the [`Connection`](connection::Connection) task.
|
||||
pub(super) connection_tx: mpsc::WeakSender<connection::ConnectionTaskRequest>,
|
||||
pub(super) connection_tx: PollSender<connection::ConnectionTaskRequest>,
|
||||
|
||||
/// The semaphore that limits the requests sent to the peer.
|
||||
pub(super) semaphore: PollSemaphore,
|
||||
|
@ -41,6 +41,13 @@ impl<N: NetworkZone> WeakClient<N> {
|
|||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Create a [`WeakBroadcastClient`] from this [`WeakClient`].
|
||||
///
|
||||
/// See the docs for [`WeakBroadcastClient`] for what this type can do.
|
||||
pub fn broadcast_client(&mut self) -> WeakBroadcastClient<'_, N> {
|
||||
WeakBroadcastClient(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Z: NetworkZone> Service<PeerRequest> for WeakClient<Z> {
|
||||
|
@ -53,24 +60,21 @@ impl<Z: NetworkZone> Service<PeerRequest> for WeakClient<Z> {
|
|||
return Poll::Ready(Err(err.to_string().into()));
|
||||
}
|
||||
|
||||
if self.connection_tx.strong_count() == 0 {
|
||||
if self.permit.is_none() {
|
||||
let permit = ready!(self.semaphore.poll_acquire(cx))
|
||||
.expect("Client semaphore should not be closed!");
|
||||
|
||||
self.permit = Some(permit);
|
||||
}
|
||||
|
||||
if ready!(self.connection_tx.poll_reserve(cx)).is_err() {
|
||||
let err = self.set_err(PeerError::ClientChannelClosed);
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
|
||||
if self.permit.is_some() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
let permit = ready!(self.semaphore.poll_acquire(cx))
|
||||
.expect("Client semaphore should not be closed!");
|
||||
|
||||
self.permit = Some(permit);
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[expect(clippy::significant_drop_tightening)]
|
||||
fn call(&mut self, request: PeerRequest) -> Self::Future {
|
||||
let permit = self
|
||||
.permit
|
||||
|
@ -84,29 +88,76 @@ impl<Z: NetworkZone> Service<PeerRequest> for WeakClient<Z> {
|
|||
permit: Some(permit),
|
||||
};
|
||||
|
||||
match self.connection_tx.upgrade() {
|
||||
None => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
if let Err(req) = self.connection_tx.send_item(req) {
|
||||
// The connection task could have closed between a call to `poll_ready` and the call to
|
||||
// `call`, which means if we don't handle the error here the receiver would panic.
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.response_channel.send(resp));
|
||||
}
|
||||
Some(sender) => {
|
||||
if let Err(e) = sender.try_send(req) {
|
||||
// The connection task could have closed between a call to `poll_ready` and the call to
|
||||
// `call`, which means if we don't handle the error here the receiver would panic.
|
||||
use mpsc::error::TrySendError;
|
||||
|
||||
match e {
|
||||
TrySendError::Closed(req) | TrySendError::Full(req) => {
|
||||
self.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.response_channel.send(resp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.into_inner().unwrap().response_channel.send(resp));
|
||||
}
|
||||
|
||||
rx.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// A client used to send [`BroadcastMessage`]s directly to a single peer, although these messages
|
||||
/// can be sent using a [`WeakClient`] or [`Client`](super::Client), using this client type allows
|
||||
/// bypassing the single request being handled at a time.
|
||||
///
|
||||
/// This means that if another [`WeakClient`] has a request in progress [`WeakBroadcastClient`] can
|
||||
/// still send messages and does not need to wait for the other [`WeakClient`] to finish.
|
||||
///
|
||||
/// A thing to note is that a call to [`WeakBroadcastClient::poll_ready`] will still reserve a slot
|
||||
/// in the queue, this should be kept in mind as many [`WeakBroadcastClient`]s calling [`WeakBroadcastClient::poll_ready`]
|
||||
/// without [`WeakBroadcastClient::call`] will stop other [`WeakBroadcastClient`]s & the other types
|
||||
/// of clients.
|
||||
///
|
||||
/// This type is kept in state with the [`WeakClient`] it was produced from, allowing you to do this:
|
||||
///
|
||||
/// ```rust, ignore
|
||||
/// weak_client.broadcast_client().poll_ready(cx)
|
||||
///
|
||||
/// weak_client.broadcast_client().call(req)
|
||||
/// ```
|
||||
pub struct WeakBroadcastClient<'a, N: NetworkZone>(&'a mut WeakClient<N>);
|
||||
|
||||
impl<N: NetworkZone> Service<BroadcastMessage> for WeakBroadcastClient<'_, N> {
|
||||
type Response = PeerResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.permit.take();
|
||||
|
||||
if let Some(err) = self.0.error.try_get_err() {
|
||||
return Poll::Ready(Err(err.to_string().into()));
|
||||
}
|
||||
|
||||
if ready!(self.0.connection_tx.poll_reserve(cx)).is_err() {
|
||||
let err = self.0.set_err(PeerError::ClientChannelClosed);
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: BroadcastMessage) -> Self::Future {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let req = connection::ConnectionTaskRequest {
|
||||
response_channel: tx,
|
||||
request: request.into(),
|
||||
// We don't need a permit as we only accept `BroadcastMessage`, which does not require a response.
|
||||
permit: None,
|
||||
};
|
||||
|
||||
if let Err(req) = self.0.connection_tx.send_item(req) {
|
||||
// The connection task could have closed between a call to `poll_ready` and the call to
|
||||
// `call`, which means if we don't handle the error here the receiver would panic.
|
||||
self.0.set_err(PeerError::ClientChannelClosed);
|
||||
|
||||
let resp = Err(PeerError::ClientChannelClosed.into());
|
||||
drop(req.into_inner().unwrap().response_channel.send(resp));
|
||||
}
|
||||
|
||||
rx.into()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue