mirror of
https://github.com/hinto-janai/cuprate.git
synced 2024-11-16 15:58:14 +00:00
update monero-serai (#201)
* update monero-serai * update monero-serai + change height to `usize` * fix merge * fix merge * fix doc * fix clippy take 2 * misc changes * move RPC imports to dev deps * handle miner txs when calculating fee * Update consensus/rules/src/blocks.rs Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * Update consensus/rules/src/transactions.rs Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * Update storage/blockchain/src/ops/tx.rs Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * Update test-utils/src/data/free.rs Co-authored-by: hinto-janai <hinto.janai@protonmail.com> * fixes * fix clippy --------- Co-authored-by: hinto-janai <hinto.janai@protonmail.com>
This commit is contained in:
parent
27767690ca
commit
8227c28604
63 changed files with 808 additions and 784 deletions
292
Cargo.lock
generated
292
Cargo.lock
generated
|
@ -50,17 +50,6 @@ version = "1.0.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
|
checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "async-lock"
|
|
||||||
version = "3.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18"
|
|
||||||
dependencies = [
|
|
||||||
"event-listener",
|
|
||||||
"event-listener-strategy",
|
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-stream"
|
name = "async-stream"
|
||||||
version = "0.3.5"
|
version = "0.3.5"
|
||||||
|
@ -176,28 +165,12 @@ dependencies = [
|
||||||
"rustc-demangle",
|
"rustc-demangle",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "base58-monero"
|
|
||||||
version = "2.0.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "978e81a45367d2409ecd33369a45dda2e9a3ca516153ec194de1fbda4b9fb79d"
|
|
||||||
dependencies = [
|
|
||||||
"thiserror",
|
|
||||||
"tiny-keccak",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "base64"
|
name = "base64"
|
||||||
version = "0.22.1"
|
version = "0.22.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "base64ct"
|
|
||||||
version = "1.6.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bincode"
|
name = "bincode"
|
||||||
version = "1.3.3"
|
version = "1.3.3"
|
||||||
|
@ -401,15 +374,6 @@ version = "0.7.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "concurrent-queue"
|
|
||||||
version = "2.5.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
|
|
||||||
dependencies = [
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "core-foundation"
|
name = "core-foundation"
|
||||||
version = "0.9.4"
|
version = "0.9.4"
|
||||||
|
@ -500,12 +464,6 @@ version = "0.8.20"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
|
checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crunchy"
|
|
||||||
version = "0.2.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crypto-bigint"
|
name = "crypto-bigint"
|
||||||
version = "0.5.5"
|
version = "0.5.5"
|
||||||
|
@ -513,6 +471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76"
|
checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"subtle",
|
"subtle",
|
||||||
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -589,12 +548,10 @@ dependencies = [
|
||||||
"cuprate-test-utils",
|
"cuprate-test-utils",
|
||||||
"cuprate-types",
|
"cuprate-types",
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"dalek-ff-group",
|
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
"multiexp",
|
|
||||||
"proptest",
|
"proptest",
|
||||||
"proptest-derive",
|
"proptest-derive",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -617,11 +574,9 @@ dependencies = [
|
||||||
"cuprate-cryptonight",
|
"cuprate-cryptonight",
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"dalek-ff-group",
|
|
||||||
"hex",
|
"hex",
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
"multiexp",
|
|
||||||
"proptest",
|
"proptest",
|
||||||
"proptest-derive",
|
"proptest-derive",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -879,7 +834,9 @@ dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
|
"monero-rpc",
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
|
"monero-simple-request-rpc",
|
||||||
"paste",
|
"paste",
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"serde",
|
"serde",
|
||||||
|
@ -897,6 +854,7 @@ version = "0.0.0"
|
||||||
name = "cuprate-types"
|
name = "cuprate-types"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"borsh",
|
||||||
"bytes",
|
"bytes",
|
||||||
"cuprate-epee-encoding",
|
"cuprate-epee-encoding",
|
||||||
"cuprate-fixed-bytes",
|
"cuprate-fixed-bytes",
|
||||||
|
@ -951,7 +909,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dalek-ff-group"
|
name = "dalek-ff-group"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crypto-bigint",
|
"crypto-bigint",
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
|
@ -1070,27 +1028,6 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "event-listener"
|
|
||||||
version = "5.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba"
|
|
||||||
dependencies = [
|
|
||||||
"concurrent-queue",
|
|
||||||
"parking",
|
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "event-listener-strategy"
|
|
||||||
version = "0.5.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1"
|
|
||||||
dependencies = [
|
|
||||||
"event-listener",
|
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastrand"
|
name = "fastrand"
|
||||||
version = "2.1.0"
|
version = "2.1.0"
|
||||||
|
@ -1127,7 +1064,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flexible-transcript"
|
name = "flexible-transcript"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"blake2",
|
"blake2",
|
||||||
"digest",
|
"digest",
|
||||||
|
@ -1285,7 +1222,7 @@ dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"http",
|
"http",
|
||||||
"indexmap 2.2.6",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
|
@ -1363,15 +1300,6 @@ version = "0.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46"
|
checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hmac"
|
|
||||||
version = "0.12.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
|
||||||
dependencies = [
|
|
||||||
"digest",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
|
@ -1791,63 +1719,163 @@ dependencies = [
|
||||||
"windows-sys 0.48.0",
|
"windows-sys 0.48.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-address"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"monero-io",
|
||||||
|
"monero-primitives",
|
||||||
|
"std-shims",
|
||||||
|
"thiserror",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-borromean"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"monero-generators",
|
||||||
|
"monero-io",
|
||||||
|
"monero-primitives",
|
||||||
|
"std-shims",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-bulletproofs"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"monero-generators",
|
||||||
|
"monero-io",
|
||||||
|
"monero-primitives",
|
||||||
|
"rand_core",
|
||||||
|
"std-shims",
|
||||||
|
"thiserror",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-clsag"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"dalek-ff-group",
|
||||||
|
"flexible-transcript",
|
||||||
|
"group",
|
||||||
|
"monero-generators",
|
||||||
|
"monero-io",
|
||||||
|
"monero-primitives",
|
||||||
|
"rand_chacha",
|
||||||
|
"rand_core",
|
||||||
|
"std-shims",
|
||||||
|
"subtle",
|
||||||
|
"thiserror",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "monero-generators"
|
name = "monero-generators"
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"dalek-ff-group",
|
"dalek-ff-group",
|
||||||
"group",
|
"group",
|
||||||
|
"monero-io",
|
||||||
"sha3",
|
"sha3",
|
||||||
"std-shims",
|
"std-shims",
|
||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-io"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"std-shims",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-mlsag"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"monero-generators",
|
||||||
|
"monero-io",
|
||||||
|
"monero-primitives",
|
||||||
|
"std-shims",
|
||||||
|
"thiserror",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-primitives"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek",
|
||||||
|
"monero-generators",
|
||||||
|
"monero-io",
|
||||||
|
"sha3",
|
||||||
|
"std-shims",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "monero-rpc"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"curve25519-dalek",
|
||||||
|
"hex",
|
||||||
|
"monero-address",
|
||||||
|
"monero-serai",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"std-shims",
|
||||||
|
"thiserror",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "monero-serai"
|
name = "monero-serai"
|
||||||
version = "0.1.4-alpha"
|
version = "0.1.4-alpha"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-lock",
|
|
||||||
"async-trait",
|
|
||||||
"base58-monero",
|
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"dalek-ff-group",
|
|
||||||
"digest_auth",
|
|
||||||
"flexible-transcript",
|
|
||||||
"group",
|
|
||||||
"hex",
|
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
|
"monero-borromean",
|
||||||
|
"monero-bulletproofs",
|
||||||
|
"monero-clsag",
|
||||||
"monero-generators",
|
"monero-generators",
|
||||||
"multiexp",
|
"monero-io",
|
||||||
"pbkdf2",
|
"monero-mlsag",
|
||||||
"rand",
|
"monero-primitives",
|
||||||
"rand_chacha",
|
|
||||||
"rand_core",
|
|
||||||
"rand_distr",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"sha3",
|
|
||||||
"simple-request",
|
|
||||||
"std-shims",
|
"std-shims",
|
||||||
"subtle",
|
|
||||||
"thiserror",
|
|
||||||
"tokio",
|
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "multiexp"
|
name = "monero-simple-request-rpc"
|
||||||
version = "0.4.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ff",
|
"async-trait",
|
||||||
"group",
|
"digest_auth",
|
||||||
"rand_core",
|
"hex",
|
||||||
"rustversion",
|
"monero-rpc",
|
||||||
"std-shims",
|
"simple-request",
|
||||||
"zeroize",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1907,12 +1935,6 @@ dependencies = [
|
||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "parking"
|
|
||||||
version = "2.2.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parking_lot"
|
name = "parking_lot"
|
||||||
version = "0.12.3"
|
version = "0.12.3"
|
||||||
|
@ -1936,35 +1958,12 @@ dependencies = [
|
||||||
"windows-targets 0.52.5",
|
"windows-targets 0.52.5",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "password-hash"
|
|
||||||
version = "0.5.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
|
|
||||||
dependencies = [
|
|
||||||
"base64ct",
|
|
||||||
"rand_core",
|
|
||||||
"subtle",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "paste"
|
name = "paste"
|
||||||
version = "1.0.15"
|
version = "1.0.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pbkdf2"
|
|
||||||
version = "0.12.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
|
||||||
dependencies = [
|
|
||||||
"digest",
|
|
||||||
"hmac",
|
|
||||||
"password-hash",
|
|
||||||
"sha2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "percent-encoding"
|
name = "percent-encoding"
|
||||||
version = "2.3.1"
|
version = "2.3.1"
|
||||||
|
@ -2540,7 +2539,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "simple-request"
|
name = "simple-request"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"hyper",
|
"hyper",
|
||||||
|
@ -2596,7 +2595,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "std-shims"
|
name = "std-shims"
|
||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hashbrown",
|
"hashbrown",
|
||||||
"spin",
|
"spin",
|
||||||
|
@ -2722,15 +2721,6 @@ dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tiny-keccak"
|
|
||||||
version = "2.0.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
|
|
||||||
dependencies = [
|
|
||||||
"crunchy",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tinystr"
|
name = "tinystr"
|
||||||
version = "0.7.6"
|
version = "0.7.6"
|
||||||
|
|
16
Cargo.toml
16
Cargo.toml
|
@ -58,15 +58,13 @@ chrono = { version = "0.4.31", default-features = false }
|
||||||
crypto-bigint = { version = "0.5.5", default-features = false }
|
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||||
crossbeam = { version = "0.8.4", default-features = false }
|
crossbeam = { version = "0.8.4", default-features = false }
|
||||||
curve25519-dalek = { version = "4.1.3", default-features = false }
|
curve25519-dalek = { version = "4.1.3", default-features = false }
|
||||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
|
||||||
dashmap = { version = "5.5.3", default-features = false }
|
dashmap = { version = "5.5.3", default-features = false }
|
||||||
dirs = { version = "5.0.1", default-features = false }
|
dirs = { version = "5.0.1", default-features = false }
|
||||||
futures = { version = "0.3.29", default-features = false }
|
futures = { version = "0.3.29", default-features = false }
|
||||||
hex = { version = "0.4.3", default-features = false }
|
hex = { version = "0.4.3", default-features = false }
|
||||||
hex-literal = { version = "0.4", default-features = false }
|
hex-literal = { version = "0.4", default-features = false }
|
||||||
indexmap = { version = "2.2.5", default-features = false }
|
indexmap = { version = "2.2.5", default-features = false }
|
||||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false }
|
||||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
|
||||||
paste = { version = "1.0.14", default-features = false }
|
paste = { version = "1.0.14", default-features = false }
|
||||||
pin-project = { version = "1.1.3", default-features = false }
|
pin-project = { version = "1.1.3", default-features = false }
|
||||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||||
|
@ -86,11 +84,13 @@ tracing-subscriber = { version = "0.3.17", default-features = false }
|
||||||
tracing = { version = "0.1.40", default-features = false }
|
tracing = { version = "0.1.40", default-features = false }
|
||||||
|
|
||||||
## workspace.dev-dependencies
|
## workspace.dev-dependencies
|
||||||
tempfile = { version = "3" }
|
monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||||
pretty_assertions = { version = "1.4.0" }
|
monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" }
|
||||||
proptest = { version = "1" }
|
tempfile = { version = "3" }
|
||||||
proptest-derive = { version = "0.4.0" }
|
pretty_assertions = { version = "1.4.0" }
|
||||||
tokio-test = { version = "0.4.4" }
|
proptest = { version = "1" }
|
||||||
|
proptest-derive = { version = "0.4.0" }
|
||||||
|
tokio-test = { version = "0.4.4" }
|
||||||
|
|
||||||
## TODO:
|
## TODO:
|
||||||
## Potential dependencies.
|
## Potential dependencies.
|
||||||
|
|
|
@ -19,8 +19,6 @@ futures = { workspace = true, features = ["std", "async-await"] }
|
||||||
|
|
||||||
randomx-rs = { workspace = true }
|
randomx-rs = { workspace = true }
|
||||||
monero-serai = { workspace = true, features = ["std"] }
|
monero-serai = { workspace = true, features = ["std"] }
|
||||||
multiexp = { workspace = true }
|
|
||||||
dalek-ff-group = { workspace = true }
|
|
||||||
curve25519-dalek = { workspace = true }
|
curve25519-dalek = { workspace = true }
|
||||||
|
|
||||||
rayon = { workspace = true }
|
rayon = { workspace = true }
|
||||||
|
|
|
@ -13,13 +13,13 @@ use cuprate_types::{
|
||||||
|
|
||||||
use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes};
|
use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes};
|
||||||
|
|
||||||
const BATCH_SIZE: u64 = 512;
|
const BATCH_SIZE: usize = 512;
|
||||||
|
|
||||||
async fn read_batch(
|
async fn read_batch(
|
||||||
handle: &mut BlockchainReadHandle,
|
handle: &mut BlockchainReadHandle,
|
||||||
height_from: u64,
|
height_from: usize,
|
||||||
) -> Result<Vec<BlockId>, RuntimeError> {
|
) -> Result<Vec<BlockId>, RuntimeError> {
|
||||||
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE as usize);
|
let mut block_ids = Vec::<BlockId>::with_capacity(BATCH_SIZE);
|
||||||
|
|
||||||
for height in height_from..(height_from + BATCH_SIZE) {
|
for height in height_from..(height_from + BATCH_SIZE) {
|
||||||
let request = BlockchainReadRequest::BlockHash(height, Chain::Main);
|
let request = BlockchainReadRequest::BlockHash(height, Chain::Main);
|
||||||
|
@ -53,7 +53,7 @@ fn generate_hex(hashes: &[HashOfHashes]) -> String {
|
||||||
#[command(version, about, long_about = None)]
|
#[command(version, about, long_about = None)]
|
||||||
struct Args {
|
struct Args {
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
height: u64,
|
height: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
|
@ -67,7 +67,7 @@ async fn main() {
|
||||||
|
|
||||||
let mut hashes_of_hashes = Vec::new();
|
let mut hashes_of_hashes = Vec::new();
|
||||||
|
|
||||||
let mut height = 0u64;
|
let mut height = 0_usize;
|
||||||
|
|
||||||
while height < height_target {
|
while height < height_target {
|
||||||
match read_batch(&mut read_handle, height).await {
|
match read_batch(&mut read_handle, height).await {
|
||||||
|
|
|
@ -244,7 +244,7 @@ where
|
||||||
|
|
||||||
let block_blob = block.serialize();
|
let block_blob = block.serialize();
|
||||||
|
|
||||||
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
|
let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
|
||||||
return Err(FastSyncError::MinerTx(MinerTxError::InputNotOfTypeGen));
|
return Err(FastSyncError::MinerTx(MinerTxError::InputNotOfTypeGen));
|
||||||
};
|
};
|
||||||
if *height != block_chain_ctx.chain_height {
|
if *height != block_chain_ctx.chain_height {
|
||||||
|
@ -252,7 +252,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verified_txs = Vec::with_capacity(txs.len());
|
let mut verified_txs = Vec::with_capacity(txs.len());
|
||||||
for tx in &block.txs {
|
for tx in &block.transactions {
|
||||||
let tx = txs
|
let tx = txs
|
||||||
.remove(tx)
|
.remove(tx)
|
||||||
.ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?;
|
.ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?;
|
||||||
|
@ -269,8 +269,8 @@ where
|
||||||
|
|
||||||
let total_fees = verified_txs.iter().map(|tx| tx.fee).sum::<u64>();
|
let total_fees = verified_txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||||
let total_outputs = block
|
let total_outputs = block
|
||||||
.miner_tx
|
.miner_transaction
|
||||||
.prefix
|
.prefix()
|
||||||
.outputs
|
.outputs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|output| output.amount.unwrap_or(0))
|
.map(|output| output.amount.unwrap_or(0))
|
||||||
|
@ -278,8 +278,8 @@ where
|
||||||
|
|
||||||
let generated_coins = total_outputs - total_fees;
|
let generated_coins = total_outputs - total_fees;
|
||||||
|
|
||||||
let weight =
|
let weight = block.miner_transaction.weight()
|
||||||
block.miner_tx.weight() + verified_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
+ verified_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||||
|
|
||||||
Ok(FastSyncResponse::ValidateBlock(VerifiedBlockInformation {
|
Ok(FastSyncResponse::ValidateBlock(VerifiedBlockInformation {
|
||||||
block_blob,
|
block_blob,
|
||||||
|
|
|
@ -15,8 +15,6 @@ cuprate-helper = { path = "../../helper", default-features = false, features = [
|
||||||
cuprate-cryptonight = {path = "../../cryptonight"}
|
cuprate-cryptonight = {path = "../../cryptonight"}
|
||||||
|
|
||||||
monero-serai = { workspace = true, features = ["std"] }
|
monero-serai = { workspace = true, features = ["std"] }
|
||||||
multiexp = { workspace = true, features = ["std", "batch"] }
|
|
||||||
dalek-ff-group = { workspace = true, features = ["std"] }
|
|
||||||
curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precomputed-tables"] }
|
curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precomputed-tables"] }
|
||||||
|
|
||||||
rand = { workspace = true, features = ["std", "std_rng"] }
|
rand = { workspace = true, features = ["std", "std_rng"] }
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use multiexp::BatchVerifier as InternalBatchVerifier;
|
use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier;
|
||||||
|
|
||||||
/// This trait represents a batch verifier.
|
/// This trait represents a batch verifier.
|
||||||
///
|
///
|
||||||
|
@ -12,18 +12,12 @@ pub trait BatchVerifier {
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`.
|
/// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`.
|
||||||
// TODO: remove the panics by adding a generic API upstream.
|
// TODO: remove the panics by adding a generic API upstream.
|
||||||
fn queue_statement<R>(
|
fn queue_statement<R>(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R;
|
||||||
&mut self,
|
|
||||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
|
||||||
) -> R;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// impl this for a single threaded batch verifier.
|
// impl this for a single threaded batch verifier.
|
||||||
impl BatchVerifier for &'_ mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint> {
|
impl BatchVerifier for &'_ mut InternalBatchVerifier {
|
||||||
fn queue_statement<R>(
|
fn queue_statement<R>(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R {
|
||||||
&mut self,
|
|
||||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
|
||||||
) -> R {
|
|
||||||
stmt(self)
|
stmt(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,8 @@ pub const PENALTY_FREE_ZONE_1: usize = 20000;
|
||||||
pub const PENALTY_FREE_ZONE_2: usize = 60000;
|
pub const PENALTY_FREE_ZONE_2: usize = 60000;
|
||||||
pub const PENALTY_FREE_ZONE_5: usize = 300000;
|
pub const PENALTY_FREE_ZONE_5: usize = 300000;
|
||||||
|
|
||||||
pub const RX_SEEDHASH_EPOCH_BLOCKS: u64 = 2048;
|
pub const RX_SEEDHASH_EPOCH_BLOCKS: usize = 2048;
|
||||||
pub const RX_SEEDHASH_EPOCH_LAG: u64 = 64;
|
pub const RX_SEEDHASH_EPOCH_LAG: usize = 64;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
|
||||||
pub enum BlockError {
|
pub enum BlockError {
|
||||||
|
@ -52,14 +52,14 @@ pub trait RandomX {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns if this height is a RandomX seed height.
|
/// Returns if this height is a RandomX seed height.
|
||||||
pub fn is_randomx_seed_height(height: u64) -> bool {
|
pub fn is_randomx_seed_height(height: usize) -> bool {
|
||||||
height % RX_SEEDHASH_EPOCH_BLOCKS == 0
|
height % RX_SEEDHASH_EPOCH_BLOCKS == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the RandomX seed height for this block.
|
/// Returns the RandomX seed height for this block.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#randomx-seed>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#randomx-seed>
|
||||||
pub fn randomx_seed_height(height: u64) -> u64 {
|
pub fn randomx_seed_height(height: usize) -> usize {
|
||||||
if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG {
|
if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
|
@ -75,7 +75,7 @@ pub fn randomx_seed_height(height: u64) -> u64 {
|
||||||
pub fn calculate_pow_hash<R: RandomX>(
|
pub fn calculate_pow_hash<R: RandomX>(
|
||||||
randomx_vm: Option<&R>,
|
randomx_vm: Option<&R>,
|
||||||
buf: &[u8],
|
buf: &[u8],
|
||||||
height: u64,
|
height: usize,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<[u8; 32], BlockError> {
|
) -> Result<[u8; 32], BlockError> {
|
||||||
if height == 202612 {
|
if height == 202612 {
|
||||||
|
@ -89,7 +89,8 @@ pub fn calculate_pow_hash<R: RandomX>(
|
||||||
} else if hf < &HardFork::V10 {
|
} else if hf < &HardFork::V10 {
|
||||||
cryptonight_hash_v2(buf)
|
cryptonight_hash_v2(buf)
|
||||||
} else if hf < &HardFork::V12 {
|
} else if hf < &HardFork::V12 {
|
||||||
cryptonight_hash_r(buf, height)
|
// FIXME: https://github.com/Cuprate/cuprate/issues/167.
|
||||||
|
cryptonight_hash_r(buf, height as u64)
|
||||||
} else {
|
} else {
|
||||||
randomx_vm
|
randomx_vm
|
||||||
.expect("RandomX VM needed from hf 12")
|
.expect("RandomX VM needed from hf 12")
|
||||||
|
@ -220,7 +221,7 @@ pub struct ContextToVerifyBlock {
|
||||||
/// Contains the median timestamp over the last 60 blocks, if there is less than 60 blocks this should be [`None`]
|
/// Contains the median timestamp over the last 60 blocks, if there is less than 60 blocks this should be [`None`]
|
||||||
pub median_block_timestamp: Option<u64>,
|
pub median_block_timestamp: Option<u64>,
|
||||||
/// The current chain height.
|
/// The current chain height.
|
||||||
pub chain_height: u64,
|
pub chain_height: usize,
|
||||||
/// The current hard-fork.
|
/// The current hard-fork.
|
||||||
pub current_hf: HardFork,
|
pub current_hf: HardFork,
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/difficulty.html#calculating-difficulty>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/difficulty.html#calculating-difficulty>
|
||||||
|
@ -263,11 +264,11 @@ pub fn check_block(
|
||||||
check_block_weight(block_weight, block_chain_ctx.median_weight_for_block_reward)?;
|
check_block_weight(block_weight, block_chain_ctx.median_weight_for_block_reward)?;
|
||||||
block_size_sanity_check(block_blob_len, block_chain_ctx.effective_median_weight)?;
|
block_size_sanity_check(block_blob_len, block_chain_ctx.effective_median_weight)?;
|
||||||
|
|
||||||
check_amount_txs(block.txs.len())?;
|
check_amount_txs(block.transactions.len())?;
|
||||||
check_txs_unique(&block.txs)?;
|
check_txs_unique(&block.transactions)?;
|
||||||
|
|
||||||
let generated_coins = check_miner_tx(
|
let generated_coins = check_miner_tx(
|
||||||
&block.miner_tx,
|
&block.miner_transaction,
|
||||||
total_fees,
|
total_fees,
|
||||||
block_chain_ctx.chain_height,
|
block_chain_ctx.chain_height,
|
||||||
block_weight,
|
block_weight,
|
||||||
|
|
|
@ -29,14 +29,14 @@ fn genesis_miner_tx(network: &Network) -> Transaction {
|
||||||
pub fn generate_genesis_block(network: &Network) -> Block {
|
pub fn generate_genesis_block(network: &Network) -> Block {
|
||||||
Block {
|
Block {
|
||||||
header: BlockHeader {
|
header: BlockHeader {
|
||||||
major_version: 1,
|
hardfork_version: 1,
|
||||||
minor_version: 0,
|
hardfork_signal: 0,
|
||||||
timestamp: 0,
|
timestamp: 0,
|
||||||
previous: [0; 32],
|
previous: [0; 32],
|
||||||
nonce: genesis_nonce(network),
|
nonce: genesis_nonce(network),
|
||||||
},
|
},
|
||||||
miner_tx: genesis_miner_tx(network),
|
miner_transaction: genesis_miner_tx(network),
|
||||||
txs: vec![],
|
transactions: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,11 +40,11 @@ pub enum HardForkError {
|
||||||
/// Information about a given hard-fork.
|
/// Information about a given hard-fork.
|
||||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||||
pub struct HFInfo {
|
pub struct HFInfo {
|
||||||
height: u64,
|
height: usize,
|
||||||
threshold: u64,
|
threshold: usize,
|
||||||
}
|
}
|
||||||
impl HFInfo {
|
impl HFInfo {
|
||||||
pub const fn new(height: u64, threshold: u64) -> HFInfo {
|
pub const fn new(height: usize, threshold: usize) -> HFInfo {
|
||||||
HFInfo { height, threshold }
|
HFInfo { height, threshold }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,8 +202,8 @@ impl HardFork {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> {
|
pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> {
|
||||||
Ok((
|
Ok((
|
||||||
HardFork::from_version(header.major_version)?,
|
HardFork::from_version(header.hardfork_version)?,
|
||||||
HardFork::from_vote(header.minor_version),
|
HardFork::from_vote(header.hardfork_signal),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,7 +245,7 @@ impl HardFork {
|
||||||
/// A struct holding the current voting state of the blockchain.
|
/// A struct holding the current voting state of the blockchain.
|
||||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
pub struct HFVotes {
|
pub struct HFVotes {
|
||||||
votes: [u64; NUMB_OF_HARD_FORKS],
|
votes: [usize; NUMB_OF_HARD_FORKS],
|
||||||
vote_list: VecDeque<HardFork>,
|
vote_list: VecDeque<HardFork>,
|
||||||
window_size: usize,
|
window_size: usize,
|
||||||
}
|
}
|
||||||
|
@ -318,13 +318,13 @@ impl HFVotes {
|
||||||
/// Returns the total votes for a hard-fork.
|
/// Returns the total votes for a hard-fork.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
||||||
pub fn votes_for_hf(&self, hf: &HardFork) -> u64 {
|
pub fn votes_for_hf(&self, hf: &HardFork) -> usize {
|
||||||
self.votes[*hf as usize - 1..].iter().sum()
|
self.votes[*hf as usize - 1..].iter().sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total amount of votes being tracked
|
/// Returns the total amount of votes being tracked
|
||||||
pub fn total_votes(&self) -> u64 {
|
pub fn total_votes(&self) -> usize {
|
||||||
self.votes.iter().sum()
|
self.vote_list.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if a future hard fork should be activated, returning the next hard-fork that should be
|
/// Checks if a future hard fork should be activated, returning the next hard-fork that should be
|
||||||
|
@ -334,8 +334,8 @@ impl HFVotes {
|
||||||
pub fn current_fork(
|
pub fn current_fork(
|
||||||
&self,
|
&self,
|
||||||
current_hf: &HardFork,
|
current_hf: &HardFork,
|
||||||
current_height: u64,
|
current_height: usize,
|
||||||
window: u64,
|
window: usize,
|
||||||
hfs_info: &HFsInfo,
|
hfs_info: &HFsInfo,
|
||||||
) -> HardFork {
|
) -> HardFork {
|
||||||
let mut current_hf = *current_hf;
|
let mut current_hf = *current_hf;
|
||||||
|
@ -361,6 +361,6 @@ impl HFVotes {
|
||||||
/// Returns the votes needed for a hard-fork.
|
/// Returns the votes needed for a hard-fork.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
||||||
pub fn votes_needed(threshold: u64, window: u64) -> u64 {
|
pub fn votes_needed(threshold: usize, window: usize) -> usize {
|
||||||
(threshold * window).div_ceil(100)
|
(threshold * window).div_ceil(100)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ use proptest::{arbitrary::any, prop_assert_eq, prop_compose, proptest};
|
||||||
|
|
||||||
use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS};
|
use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS};
|
||||||
|
|
||||||
const TEST_WINDOW_SIZE: u64 = 25;
|
const TEST_WINDOW_SIZE: usize = 25;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn target_block_time() {
|
fn target_block_time() {
|
||||||
|
@ -35,9 +35,9 @@ prop_compose! {
|
||||||
fn arb_full_hf_votes()
|
fn arb_full_hf_votes()
|
||||||
(
|
(
|
||||||
// we can't use HardFork as for some reason it overflows the stack, so we use u8.
|
// we can't use HardFork as for some reason it overflows the stack, so we use u8.
|
||||||
votes in any::<[u8; TEST_WINDOW_SIZE as usize]>()
|
votes in any::<[u8; TEST_WINDOW_SIZE]>()
|
||||||
) -> HFVotes {
|
) -> HFVotes {
|
||||||
let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE as usize);
|
let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE);
|
||||||
for vote in votes {
|
for vote in votes {
|
||||||
vote_count.add_vote_for_hf(&HardFork::from_vote(vote % 17));
|
vote_count.add_vote_for_hf(&HardFork::from_vote(vote % 17));
|
||||||
}
|
}
|
||||||
|
@ -48,9 +48,9 @@ prop_compose! {
|
||||||
proptest! {
|
proptest! {
|
||||||
#[test]
|
#[test]
|
||||||
fn hf_vote_counter_total_correct(hf_votes in arb_full_hf_votes()) {
|
fn hf_vote_counter_total_correct(hf_votes in arb_full_hf_votes()) {
|
||||||
prop_assert_eq!(hf_votes.total_votes(), u64::try_from(hf_votes.vote_list.len()).unwrap());
|
prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len());
|
||||||
|
|
||||||
let mut votes = [0_u64; NUMB_OF_HARD_FORKS];
|
let mut votes = [0_usize; NUMB_OF_HARD_FORKS];
|
||||||
for vote in hf_votes.vote_list.iter() {
|
for vote in hf_votes.vote_list.iter() {
|
||||||
// manually go through the list of votes tallying
|
// manually go through the list of votes tallying
|
||||||
votes[*vote as usize - 1] += 1;
|
votes[*vote as usize - 1] += 1;
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
use monero_serai::{
|
use monero_serai::transaction::{Input, Output, Timelock, Transaction};
|
||||||
ringct::RctType,
|
|
||||||
transaction::{Input, Output, Timelock, Transaction},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{is_decomposed_amount, transactions::check_output_types, HardFork, TxVersion};
|
use crate::{is_decomposed_amount, transactions::check_output_types, HardFork, TxVersion};
|
||||||
|
|
||||||
|
@ -35,7 +32,7 @@ const MONEY_SUPPLY: u64 = u64::MAX;
|
||||||
/// The minimum block reward per minute, "tail-emission"
|
/// The minimum block reward per minute, "tail-emission"
|
||||||
const MINIMUM_REWARD_PER_MIN: u64 = 3 * 10_u64.pow(11);
|
const MINIMUM_REWARD_PER_MIN: u64 = 3 * 10_u64.pow(11);
|
||||||
/// The value which `lock_time` should be for a coinbase output.
|
/// The value which `lock_time` should be for a coinbase output.
|
||||||
const MINER_TX_TIME_LOCKED_BLOCKS: u64 = 60;
|
const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60;
|
||||||
|
|
||||||
/// Calculates the base block reward without taking away the penalty for expanding
|
/// Calculates the base block reward without taking away the penalty for expanding
|
||||||
/// the block.
|
/// the block.
|
||||||
|
@ -88,7 +85,7 @@ fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), M
|
||||||
/// Checks the miner transactions inputs.
|
/// Checks the miner transactions inputs.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#input>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#input>
|
||||||
fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError> {
|
fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxError> {
|
||||||
if inputs.len() != 1 {
|
if inputs.len() != 1 {
|
||||||
return Err(MinerTxError::IncorrectNumbOfInputs);
|
return Err(MinerTxError::IncorrectNumbOfInputs);
|
||||||
}
|
}
|
||||||
|
@ -108,15 +105,15 @@ fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError>
|
||||||
/// Checks the miner transaction has a correct time lock.
|
/// Checks the miner transaction has a correct time lock.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#unlock-time>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#unlock-time>
|
||||||
fn check_time_lock(time_lock: &Timelock, chain_height: u64) -> Result<(), MinerTxError> {
|
fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> {
|
||||||
match time_lock {
|
match time_lock {
|
||||||
Timelock::Block(till_height) => {
|
&Timelock::Block(till_height) => {
|
||||||
// Lock times above this amount are timestamps not blocks.
|
// Lock times above this amount are timestamps not blocks.
|
||||||
// This is just for safety though and shouldn't actually be hit.
|
// This is just for safety though and shouldn't actually be hit.
|
||||||
if till_height > &500_000_000 {
|
if till_height > 500_000_000 {
|
||||||
Err(MinerTxError::InvalidLockTime)?;
|
Err(MinerTxError::InvalidLockTime)?;
|
||||||
}
|
}
|
||||||
if u64::try_from(*till_height).unwrap() != chain_height + MINER_TX_TIME_LOCKED_BLOCKS {
|
if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS {
|
||||||
Err(MinerTxError::InvalidLockTime)
|
Err(MinerTxError::InvalidLockTime)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -182,28 +179,33 @@ fn check_total_output_amt(
|
||||||
pub fn check_miner_tx(
|
pub fn check_miner_tx(
|
||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
total_fees: u64,
|
total_fees: u64,
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
median_bw: usize,
|
median_bw: usize,
|
||||||
already_generated_coins: u64,
|
already_generated_coins: u64,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<u64, MinerTxError> {
|
) -> Result<u64, MinerTxError> {
|
||||||
let tx_version = TxVersion::from_raw(tx.prefix.version).ok_or(MinerTxError::VersionInvalid)?;
|
let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?;
|
||||||
check_miner_tx_version(&tx_version, hf)?;
|
check_miner_tx_version(&tx_version, hf)?;
|
||||||
|
|
||||||
// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#ringct-type>
|
// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#ringct-type>
|
||||||
if hf >= &HardFork::V12 && tx.rct_signatures.rct_type() != RctType::Null {
|
match tx {
|
||||||
return Err(MinerTxError::RCTTypeNotNULL);
|
Transaction::V1 { .. } => (),
|
||||||
|
Transaction::V2 { proofs, .. } => {
|
||||||
|
if hf >= &HardFork::V12 && proofs.is_some() {
|
||||||
|
return Err(MinerTxError::RCTTypeNotNULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
check_time_lock(&tx.prefix.timelock, chain_height)?;
|
check_time_lock(&tx.prefix().additional_timelock, chain_height)?;
|
||||||
|
|
||||||
check_inputs(&tx.prefix.inputs, chain_height)?;
|
check_inputs(&tx.prefix().inputs, chain_height)?;
|
||||||
|
|
||||||
check_output_types(&tx.prefix.outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?;
|
check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?;
|
||||||
|
|
||||||
let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf);
|
let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf);
|
||||||
let total_outs = sum_outputs(&tx.prefix.outputs, hf, &tx_version)?;
|
let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?;
|
||||||
|
|
||||||
check_total_output_amt(total_outs, reward, total_fees, hf)
|
check_total_output_amt(total_outs, reward, total_fees, hf)
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ impl TxVersion {
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#version>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions.html#version>
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#version>
|
/// && <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#version>
|
||||||
pub fn from_raw(version: u64) -> Option<TxVersion> {
|
pub fn from_raw(version: u8) -> Option<TxVersion> {
|
||||||
Some(match version {
|
Some(match version {
|
||||||
1 => TxVersion::RingSignatures,
|
1 => TxVersion::RingSignatures,
|
||||||
2 => TxVersion::RingCT,
|
2 => TxVersion::RingCT,
|
||||||
|
@ -205,7 +205,7 @@ fn check_number_of_outputs(
|
||||||
outputs: usize,
|
outputs: usize,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: &TxVersion,
|
||||||
rct_type: &RctType,
|
bp_or_bpp: bool,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
if tx_version == &TxVersion::RingSignatures {
|
if tx_version == &TxVersion::RingSignatures {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -215,18 +215,10 @@ fn check_number_of_outputs(
|
||||||
return Err(TransactionError::InvalidNumberOfOutputs);
|
return Err(TransactionError::InvalidNumberOfOutputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
match rct_type {
|
if bp_or_bpp && outputs > MAX_BULLETPROOFS_OUTPUTS {
|
||||||
RctType::Bulletproofs
|
Err(TransactionError::InvalidNumberOfOutputs)
|
||||||
| RctType::BulletproofsCompactAmount
|
} else {
|
||||||
| RctType::Clsag
|
Ok(())
|
||||||
| RctType::BulletproofsPlus => {
|
|
||||||
if outputs <= MAX_BULLETPROOFS_OUTPUTS {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(TransactionError::InvalidNumberOfOutputs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Ok(()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,11 +231,11 @@ fn check_outputs_semantics(
|
||||||
outputs: &[Output],
|
outputs: &[Output],
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: &TxVersion,
|
||||||
rct_type: &RctType,
|
bp_or_bpp: bool,
|
||||||
) -> Result<u64, TransactionError> {
|
) -> Result<u64, TransactionError> {
|
||||||
check_output_types(outputs, hf)?;
|
check_output_types(outputs, hf)?;
|
||||||
check_output_keys(outputs)?;
|
check_output_keys(outputs)?;
|
||||||
check_number_of_outputs(outputs.len(), hf, tx_version, rct_type)?;
|
check_number_of_outputs(outputs.len(), hf, tx_version, bp_or_bpp)?;
|
||||||
|
|
||||||
sum_outputs(outputs, hf, tx_version)
|
sum_outputs(outputs, hf, tx_version)
|
||||||
}
|
}
|
||||||
|
@ -255,14 +247,14 @@ fn check_outputs_semantics(
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
||||||
pub fn output_unlocked(
|
pub fn output_unlocked(
|
||||||
time_lock: &Timelock,
|
time_lock: &Timelock,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
match *time_lock {
|
match *time_lock {
|
||||||
Timelock::None => true,
|
Timelock::None => true,
|
||||||
Timelock::Block(unlock_height) => {
|
Timelock::Block(unlock_height) => {
|
||||||
check_block_time_lock(unlock_height.try_into().unwrap(), current_chain_height)
|
check_block_time_lock(unlock_height, current_chain_height)
|
||||||
}
|
}
|
||||||
Timelock::Time(unlock_time) => {
|
Timelock::Time(unlock_time) => {
|
||||||
check_timestamp_time_lock(unlock_time, current_time_lock_timestamp, hf)
|
check_timestamp_time_lock(unlock_time, current_time_lock_timestamp, hf)
|
||||||
|
@ -273,7 +265,7 @@ pub fn output_unlocked(
|
||||||
/// Returns if a locked output, which uses a block height, can be spent.
|
/// Returns if a locked output, which uses a block height, can be spent.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#block-height>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#block-height>
|
||||||
fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool {
|
fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool {
|
||||||
// current_chain_height = 1 + top height
|
// current_chain_height = 1 + top height
|
||||||
unlock_height <= current_chain_height
|
unlock_height <= current_chain_height
|
||||||
}
|
}
|
||||||
|
@ -297,7 +289,7 @@ fn check_timestamp_time_lock(
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#the-output-must-not-be-locked>
|
/// <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#the-output-must-not-be-locked>
|
||||||
fn check_all_time_locks(
|
fn check_all_time_locks(
|
||||||
time_locks: &[Timelock],
|
time_locks: &[Timelock],
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
|
@ -442,8 +434,8 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#10-block-lock>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#10-block-lock>
|
||||||
fn check_10_block_lock(
|
fn check_10_block_lock(
|
||||||
youngest_used_out_height: u64,
|
youngest_used_out_height: usize,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
if hf >= &HardFork::V12 {
|
if hf >= &HardFork::V12 {
|
||||||
|
@ -510,7 +502,7 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result<u64, Transa
|
||||||
fn check_inputs_contextual(
|
fn check_inputs_contextual(
|
||||||
inputs: &[Input],
|
inputs: &[Input],
|
||||||
tx_ring_members_info: &TxRingMembersInfo,
|
tx_ring_members_info: &TxRingMembersInfo,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
||||||
|
@ -615,28 +607,41 @@ pub fn check_transaction_semantic(
|
||||||
Err(TransactionError::TooBig)?;
|
Err(TransactionError::TooBig)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx_version = TxVersion::from_raw(tx.prefix.version)
|
let tx_version =
|
||||||
.ok_or(TransactionError::TransactionVersionInvalid)?;
|
TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||||
|
|
||||||
let outputs_sum = check_outputs_semantics(
|
let bp_or_bpp = match tx {
|
||||||
&tx.prefix.outputs,
|
Transaction::V2 {
|
||||||
hf,
|
proofs: Some(proofs),
|
||||||
&tx_version,
|
..
|
||||||
&tx.rct_signatures.rct_type(),
|
} => match proofs.rct_type() {
|
||||||
)?;
|
RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => false,
|
||||||
let inputs_sum = check_inputs_semantics(&tx.prefix.inputs, hf)?;
|
RctType::MlsagBulletproofs
|
||||||
|
| RctType::MlsagBulletproofsCompactAmount
|
||||||
|
| RctType::ClsagBulletproof
|
||||||
|
| RctType::ClsagBulletproofPlus => true,
|
||||||
|
},
|
||||||
|
Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false,
|
||||||
|
};
|
||||||
|
|
||||||
let fee = match tx_version {
|
let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?;
|
||||||
TxVersion::RingSignatures => {
|
let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?;
|
||||||
|
|
||||||
|
let fee = match tx {
|
||||||
|
Transaction::V1 { .. } => {
|
||||||
if outputs_sum >= inputs_sum {
|
if outputs_sum >= inputs_sum {
|
||||||
Err(TransactionError::OutputsTooHigh)?;
|
Err(TransactionError::OutputsTooHigh)?;
|
||||||
}
|
}
|
||||||
inputs_sum - outputs_sum
|
inputs_sum - outputs_sum
|
||||||
}
|
}
|
||||||
TxVersion::RingCT => {
|
Transaction::V2 { proofs, .. } => {
|
||||||
ring_ct::ring_ct_semantic_checks(tx, tx_hash, verifier, hf)?;
|
let proofs = proofs
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||||
|
|
||||||
tx.rct_signatures.base.fee
|
ring_ct::ring_ct_semantic_checks(proofs, tx_hash, verifier, hf)?;
|
||||||
|
|
||||||
|
proofs.base.fee
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -654,15 +659,15 @@ pub fn check_transaction_semantic(
|
||||||
pub fn check_transaction_contextual(
|
pub fn check_transaction_contextual(
|
||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
tx_ring_members_info: &TxRingMembersInfo,
|
tx_ring_members_info: &TxRingMembersInfo,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
let tx_version = TxVersion::from_raw(tx.prefix.version)
|
let tx_version =
|
||||||
.ok_or(TransactionError::TransactionVersionInvalid)?;
|
TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||||
|
|
||||||
check_inputs_contextual(
|
check_inputs_contextual(
|
||||||
&tx.prefix.inputs,
|
&tx.prefix().inputs,
|
||||||
tx_ring_members_info,
|
tx_ring_members_info,
|
||||||
current_chain_height,
|
current_chain_height,
|
||||||
hf,
|
hf,
|
||||||
|
@ -676,17 +681,22 @@ pub fn check_transaction_contextual(
|
||||||
hf,
|
hf,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
match tx_version {
|
match &tx {
|
||||||
TxVersion::RingSignatures => ring_signatures::check_input_signatures(
|
Transaction::V1 { prefix, signatures } => ring_signatures::check_input_signatures(
|
||||||
&tx.prefix.inputs,
|
&prefix.inputs,
|
||||||
&tx.signatures,
|
signatures,
|
||||||
&tx_ring_members_info.rings,
|
&tx_ring_members_info.rings,
|
||||||
&tx.signature_hash(),
|
// This will only return None on v2 miner txs.
|
||||||
|
&tx.signature_hash()
|
||||||
|
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||||
),
|
),
|
||||||
TxVersion::RingCT => Ok(ring_ct::check_input_signatures(
|
Transaction::V2 { prefix, proofs } => Ok(ring_ct::check_input_signatures(
|
||||||
&tx.signature_hash(),
|
&tx.signature_hash()
|
||||||
&tx.prefix.inputs,
|
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||||
&tx.rct_signatures,
|
&prefix.inputs,
|
||||||
|
proofs
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||||
&tx_ring_members_info.rings,
|
&tx_ring_members_info.rings,
|
||||||
)?),
|
)?),
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ pub struct TxRingMembersInfo {
|
||||||
pub rings: Rings,
|
pub rings: Rings,
|
||||||
/// Information on the structure of the decoys, must be [`None`] for txs before [`HardFork::V1`]
|
/// Information on the structure of the decoys, must be [`None`] for txs before [`HardFork::V1`]
|
||||||
pub decoy_info: Option<DecoyInfo>,
|
pub decoy_info: Option<DecoyInfo>,
|
||||||
pub youngest_used_out_height: u64,
|
pub youngest_used_out_height: usize,
|
||||||
pub time_locked_outs: Vec<Timelock>,
|
pub time_locked_outs: Vec<Timelock>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use curve25519_dalek::{EdwardsPoint, Scalar};
|
use curve25519_dalek::{EdwardsPoint, Scalar};
|
||||||
use hex_literal::hex;
|
use hex_literal::hex;
|
||||||
use monero_serai::{
|
use monero_serai::{
|
||||||
|
generators::H,
|
||||||
ringct::{
|
ringct::{
|
||||||
clsag::ClsagError,
|
clsag::ClsagError,
|
||||||
mlsag::{AggregateRingMatrixBuilder, MlsagError, RingMatrix},
|
mlsag::{AggregateRingMatrixBuilder, MlsagError, RingMatrix},
|
||||||
RctPrunable, RctSignatures, RctType,
|
RctProofs, RctPrunable, RctType,
|
||||||
},
|
},
|
||||||
transaction::{Input, Transaction},
|
transaction::Input,
|
||||||
H,
|
|
||||||
};
|
};
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
#[cfg(feature = "rayon")]
|
#[cfg(feature = "rayon")]
|
||||||
|
@ -48,12 +48,12 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(),
|
||||||
use RctType as T;
|
use RctType as T;
|
||||||
|
|
||||||
match ty {
|
match ty {
|
||||||
T::MlsagAggregate | T::MlsagIndividual if hf >= F::V4 && hf < F::V9 => Ok(()),
|
T::AggregateMlsagBorromean | T::MlsagBorromean if hf >= F::V4 && hf < F::V9 => Ok(()),
|
||||||
T::Bulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()),
|
T::MlsagBulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()),
|
||||||
T::BulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()),
|
T::MlsagBulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()),
|
||||||
T::BulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()),
|
T::MlsagBulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()),
|
||||||
T::Clsag if hf >= F::V13 && hf < F::V16 => Ok(()),
|
T::ClsagBulletproof if hf >= F::V13 && hf < F::V16 => Ok(()),
|
||||||
T::BulletproofsPlus if hf >= F::V15 => Ok(()),
|
T::ClsagBulletproofPlus if hf >= F::V15 => Ok(()),
|
||||||
_ => Err(RingCTError::TypeNotAllowed),
|
_ => Err(RingCTError::TypeNotAllowed),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,20 +61,22 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(),
|
||||||
/// Checks that the pseudo-outs sum to the same point as the output commitments.
|
/// Checks that the pseudo-outs sum to the same point as the output commitments.
|
||||||
///
|
///
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct.html#pseudo-outs-outpks-balance>
|
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct.html#pseudo-outs-outpks-balance>
|
||||||
fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> {
|
fn simple_type_balances(rct_sig: &RctProofs) -> Result<(), RingCTError> {
|
||||||
let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagIndividual {
|
let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagBorromean {
|
||||||
&rct_sig.base.pseudo_outs
|
&rct_sig.base.pseudo_outs
|
||||||
} else {
|
} else {
|
||||||
match &rct_sig.prunable {
|
match &rct_sig.prunable {
|
||||||
RctPrunable::Clsag { pseudo_outs, .. }
|
RctPrunable::Clsag { pseudo_outs, .. }
|
||||||
|
| RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. }
|
||||||
| RctPrunable::MlsagBulletproofs { pseudo_outs, .. } => pseudo_outs,
|
| RctPrunable::MlsagBulletproofs { pseudo_outs, .. } => pseudo_outs,
|
||||||
_ => panic!("RingCT type is not simple!"),
|
RctPrunable::MlsagBorromean { .. } => &rct_sig.base.pseudo_outs,
|
||||||
|
RctPrunable::AggregateMlsagBorromean { .. } => panic!("RingCT type is not simple!"),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let sum_inputs = pseudo_outs.iter().sum::<EdwardsPoint>();
|
let sum_inputs = pseudo_outs.iter().sum::<EdwardsPoint>();
|
||||||
let sum_outputs = rct_sig.base.commitments.iter().sum::<EdwardsPoint>()
|
let sum_outputs =
|
||||||
+ Scalar::from(rct_sig.base.fee) * H();
|
rct_sig.base.commitments.iter().sum::<EdwardsPoint>() + Scalar::from(rct_sig.base.fee) * *H;
|
||||||
|
|
||||||
if sum_inputs == sum_outputs {
|
if sum_inputs == sum_outputs {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -89,13 +91,12 @@ fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> {
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs.html>
|
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs.html>
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs+.html>
|
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct/bulletproofs+.html>
|
||||||
fn check_output_range_proofs(
|
fn check_output_range_proofs(
|
||||||
rct_sig: &RctSignatures,
|
proofs: &RctProofs,
|
||||||
mut verifier: impl BatchVerifier,
|
mut verifier: impl BatchVerifier,
|
||||||
) -> Result<(), RingCTError> {
|
) -> Result<(), RingCTError> {
|
||||||
let commitments = &rct_sig.base.commitments;
|
let commitments = &proofs.base.commitments;
|
||||||
|
|
||||||
match &rct_sig.prunable {
|
match &proofs.prunable {
|
||||||
RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?,
|
|
||||||
RctPrunable::MlsagBorromean { borromean, .. }
|
RctPrunable::MlsagBorromean { borromean, .. }
|
||||||
| RctPrunable::AggregateMlsagBorromean { borromean, .. } => try_par_iter(borromean)
|
| RctPrunable::AggregateMlsagBorromean { borromean, .. } => try_par_iter(borromean)
|
||||||
.zip(commitments)
|
.zip(commitments)
|
||||||
|
@ -106,10 +107,11 @@ fn check_output_range_proofs(
|
||||||
Err(RingCTError::BorromeanRangeInvalid)
|
Err(RingCTError::BorromeanRangeInvalid)
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. }
|
RctPrunable::MlsagBulletproofs { bulletproof, .. }
|
||||||
| RctPrunable::Clsag { bulletproofs, .. } => {
|
| RctPrunable::MlsagBulletproofsCompactAmount { bulletproof, .. }
|
||||||
|
| RctPrunable::Clsag { bulletproof, .. } => {
|
||||||
if verifier.queue_statement(|verifier| {
|
if verifier.queue_statement(|verifier| {
|
||||||
bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments)
|
bulletproof.batch_verify(&mut thread_rng(), verifier, commitments)
|
||||||
}) {
|
}) {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
@ -120,18 +122,18 @@ fn check_output_range_proofs(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn ring_ct_semantic_checks(
|
pub(crate) fn ring_ct_semantic_checks(
|
||||||
tx: &Transaction,
|
proofs: &RctProofs,
|
||||||
tx_hash: &[u8; 32],
|
tx_hash: &[u8; 32],
|
||||||
verifier: impl BatchVerifier,
|
verifier: impl BatchVerifier,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
) -> Result<(), RingCTError> {
|
) -> Result<(), RingCTError> {
|
||||||
let rct_type = tx.rct_signatures.rct_type();
|
let rct_type = proofs.rct_type();
|
||||||
|
|
||||||
check_rct_type(&rct_type, *hf, tx_hash)?;
|
check_rct_type(&rct_type, *hf, tx_hash)?;
|
||||||
check_output_range_proofs(&tx.rct_signatures, verifier)?;
|
check_output_range_proofs(proofs, verifier)?;
|
||||||
|
|
||||||
if rct_type != RctType::MlsagAggregate {
|
if rct_type != RctType::AggregateMlsagBorromean {
|
||||||
simple_type_balances(&tx.rct_signatures)?;
|
simple_type_balances(proofs)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -144,7 +146,7 @@ pub(crate) fn ring_ct_semantic_checks(
|
||||||
pub(crate) fn check_input_signatures(
|
pub(crate) fn check_input_signatures(
|
||||||
msg: &[u8; 32],
|
msg: &[u8; 32],
|
||||||
inputs: &[Input],
|
inputs: &[Input],
|
||||||
rct_sig: &RctSignatures,
|
proofs: &RctProofs,
|
||||||
rings: &Rings,
|
rings: &Rings,
|
||||||
) -> Result<(), RingCTError> {
|
) -> Result<(), RingCTError> {
|
||||||
let Rings::RingCT(rings) = rings else {
|
let Rings::RingCT(rings) = rings else {
|
||||||
|
@ -155,15 +157,15 @@ pub(crate) fn check_input_signatures(
|
||||||
Err(RingCTError::RingInvalid)?;
|
Err(RingCTError::RingInvalid)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let pseudo_outs = match &rct_sig.prunable {
|
let pseudo_outs = match &proofs.prunable {
|
||||||
RctPrunable::MlsagBulletproofs { pseudo_outs, .. }
|
RctPrunable::MlsagBulletproofs { pseudo_outs, .. }
|
||||||
|
| RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. }
|
||||||
| RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(),
|
| RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(),
|
||||||
RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(),
|
RctPrunable::MlsagBorromean { .. } => proofs.base.pseudo_outs.as_slice(),
|
||||||
RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[],
|
RctPrunable::AggregateMlsagBorromean { .. } => &[],
|
||||||
};
|
};
|
||||||
|
|
||||||
match &rct_sig.prunable {
|
match &proofs.prunable {
|
||||||
RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?,
|
|
||||||
RctPrunable::AggregateMlsagBorromean { mlsag, .. } => {
|
RctPrunable::AggregateMlsagBorromean { mlsag, .. } => {
|
||||||
let key_images = inputs
|
let key_images = inputs
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -176,11 +178,14 @@ pub(crate) fn check_input_signatures(
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let mut matrix =
|
let mut matrix =
|
||||||
AggregateRingMatrixBuilder::new(&rct_sig.base.commitments, rct_sig.base.fee);
|
AggregateRingMatrixBuilder::new(&proofs.base.commitments, proofs.base.fee);
|
||||||
|
|
||||||
rings.iter().try_for_each(|ring| matrix.push_ring(ring))?;
|
rings.iter().try_for_each(|ring| matrix.push_ring(ring))?;
|
||||||
|
|
||||||
Ok(mlsag.verify(msg, &matrix.build()?, &key_images)?)
|
Ok(mlsag.verify(msg, &matrix.build()?, &key_images)?)
|
||||||
}
|
}
|
||||||
RctPrunable::MlsagBorromean { mlsags, .. }
|
RctPrunable::MlsagBorromean { mlsags, .. }
|
||||||
|
| RctPrunable::MlsagBulletproofsCompactAmount { mlsags, .. }
|
||||||
| RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags)
|
| RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags)
|
||||||
.zip(pseudo_outs)
|
.zip(pseudo_outs)
|
||||||
.zip(inputs)
|
.zip(inputs)
|
||||||
|
@ -216,18 +221,21 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn grandfathered_bulletproofs2() {
|
fn grandfathered_bulletproofs2() {
|
||||||
assert!(
|
assert!(check_rct_type(
|
||||||
check_rct_type(&RctType::BulletproofsCompactAmount, HardFork::V14, &[0; 32]).is_err()
|
&RctType::MlsagBulletproofsCompactAmount,
|
||||||
);
|
HardFork::V14,
|
||||||
|
&[0; 32]
|
||||||
|
)
|
||||||
|
.is_err());
|
||||||
|
|
||||||
assert!(check_rct_type(
|
assert!(check_rct_type(
|
||||||
&RctType::BulletproofsCompactAmount,
|
&RctType::MlsagBulletproofsCompactAmount,
|
||||||
HardFork::V14,
|
HardFork::V14,
|
||||||
&GRANDFATHERED_TRANSACTIONS[0]
|
&GRANDFATHERED_TRANSACTIONS[0]
|
||||||
)
|
)
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert!(check_rct_type(
|
assert!(check_rct_type(
|
||||||
&RctType::BulletproofsCompactAmount,
|
&RctType::MlsagBulletproofsCompactAmount,
|
||||||
HardFork::V14,
|
HardFork::V14,
|
||||||
&GRANDFATHERED_TRANSACTIONS[1]
|
&GRANDFATHERED_TRANSACTIONS[1]
|
||||||
)
|
)
|
||||||
|
|
|
@ -97,31 +97,6 @@ fn test_torsion_ki() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a strategy that resolves to a [`RctType`] that uses
|
|
||||||
/// BPs(+).
|
|
||||||
#[allow(unreachable_code)]
|
|
||||||
#[allow(clippy::diverging_sub_expression)]
|
|
||||||
fn bulletproof_rct_type() -> BoxedStrategy<RctType> {
|
|
||||||
return prop_oneof![
|
|
||||||
Just(RctType::Bulletproofs),
|
|
||||||
Just(RctType::BulletproofsCompactAmount),
|
|
||||||
Just(RctType::Clsag),
|
|
||||||
Just(RctType::BulletproofsPlus),
|
|
||||||
]
|
|
||||||
.boxed();
|
|
||||||
|
|
||||||
// Here to make sure this is updated when needed.
|
|
||||||
match unreachable!() {
|
|
||||||
RctType::Null => {}
|
|
||||||
RctType::MlsagAggregate => {}
|
|
||||||
RctType::MlsagIndividual => {}
|
|
||||||
RctType::Bulletproofs => {}
|
|
||||||
RctType::BulletproofsCompactAmount => {}
|
|
||||||
RctType::Clsag => {}
|
|
||||||
RctType::BulletproofsPlus => {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
prop_compose! {
|
prop_compose! {
|
||||||
/// Returns a valid prime-order point.
|
/// Returns a valid prime-order point.
|
||||||
fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint {
|
fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint {
|
||||||
|
@ -240,13 +215,13 @@ proptest! {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) {
|
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) {
|
||||||
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok());
|
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) {
|
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) {
|
||||||
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err());
|
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -256,7 +231,7 @@ proptest! {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_block_unlock_time(height in 1..u64::MAX) {
|
fn test_block_unlock_time(height in 1..usize::MAX) {
|
||||||
prop_assert!(check_block_time_lock(height, height));
|
prop_assert!(check_block_time_lock(height, height));
|
||||||
prop_assert!(!check_block_time_lock(height, height - 1));
|
prop_assert!(!check_block_time_lock(height, height - 1));
|
||||||
prop_assert!(check_block_time_lock(height, height+1));
|
prop_assert!(check_block_time_lock(height, height+1));
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
use std::{cell::RefCell, ops::DerefMut};
|
use std::{cell::RefCell, ops::DerefMut};
|
||||||
|
|
||||||
use multiexp::BatchVerifier as InternalBatchVerifier;
|
use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use thread_local::ThreadLocal;
|
use thread_local::ThreadLocal;
|
||||||
|
|
||||||
|
use cuprate_consensus_rules::batch_verifier::BatchVerifier;
|
||||||
|
|
||||||
/// A multithreaded batch verifier.
|
/// A multithreaded batch verifier.
|
||||||
pub struct MultiThreadedBatchVerifier {
|
pub struct MultiThreadedBatchVerifier {
|
||||||
internal: ThreadLocal<RefCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
|
internal: ThreadLocal<RefCell<InternalBatchVerifier>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MultiThreadedBatchVerifier {
|
impl MultiThreadedBatchVerifier {
|
||||||
|
@ -22,19 +24,22 @@ impl MultiThreadedBatchVerifier {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(RefCell::into_inner)
|
.map(RefCell::into_inner)
|
||||||
.par_bridge()
|
.par_bridge()
|
||||||
.find_any(|batch_verifier| !batch_verifier.verify_vartime())
|
.try_for_each(|batch_verifier| {
|
||||||
.is_none()
|
if batch_verifier.verify() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.is_ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl cuprate_consensus_rules::batch_verifier::BatchVerifier for &'_ MultiThreadedBatchVerifier {
|
impl BatchVerifier for &'_ MultiThreadedBatchVerifier {
|
||||||
fn queue_statement<R>(
|
fn queue_statement<R>(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R {
|
||||||
&mut self,
|
|
||||||
stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R,
|
|
||||||
) -> R {
|
|
||||||
let mut verifier = self
|
let mut verifier = self
|
||||||
.internal
|
.internal
|
||||||
.get_or(|| RefCell::new(InternalBatchVerifier::new(32)))
|
.get_or(|| RefCell::new(InternalBatchVerifier::new()))
|
||||||
.borrow_mut();
|
.borrow_mut();
|
||||||
|
|
||||||
stmt(verifier.deref_mut())
|
stmt(verifier.deref_mut())
|
||||||
|
|
|
@ -57,7 +57,7 @@ pub struct PreparedBlockExPow {
|
||||||
/// The block's hash.
|
/// The block's hash.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// The height of the block.
|
/// The height of the block.
|
||||||
pub height: u64,
|
pub height: usize,
|
||||||
|
|
||||||
/// The weight of the block's miner transaction.
|
/// The weight of the block's miner transaction.
|
||||||
pub miner_tx_weight: usize,
|
pub miner_tx_weight: usize,
|
||||||
|
@ -74,7 +74,7 @@ impl PreparedBlockExPow {
|
||||||
let (hf_version, hf_vote) =
|
let (hf_version, hf_vote) =
|
||||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||||
|
|
||||||
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
|
let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
)))?
|
||||||
|
@ -88,7 +88,7 @@ impl PreparedBlockExPow {
|
||||||
block_hash: block.hash(),
|
block_hash: block.hash(),
|
||||||
height: *height,
|
height: *height,
|
||||||
|
|
||||||
miner_tx_weight: block.miner_tx.weight(),
|
miner_tx_weight: block.miner_transaction.weight(),
|
||||||
block,
|
block,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ impl PreparedBlock {
|
||||||
let (hf_version, hf_vote) =
|
let (hf_version, hf_vote) =
|
||||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||||
|
|
||||||
let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else {
|
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
)))?
|
||||||
|
@ -142,12 +142,12 @@ impl PreparedBlock {
|
||||||
block_hash: block.hash(),
|
block_hash: block.hash(),
|
||||||
pow_hash: calculate_pow_hash(
|
pow_hash: calculate_pow_hash(
|
||||||
randomx_vm,
|
randomx_vm,
|
||||||
&block.serialize_hashable(),
|
&block.serialize_pow_hash(),
|
||||||
*height,
|
*height,
|
||||||
&hf_version,
|
&hf_version,
|
||||||
)?,
|
)?,
|
||||||
|
|
||||||
miner_tx_weight: block.miner_tx.weight(),
|
miner_tx_weight: block.miner_transaction.weight(),
|
||||||
block,
|
block,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -172,12 +172,12 @@ impl PreparedBlock {
|
||||||
block_hash: block.block_hash,
|
block_hash: block.block_hash,
|
||||||
pow_hash: calculate_pow_hash(
|
pow_hash: calculate_pow_hash(
|
||||||
randomx_vm,
|
randomx_vm,
|
||||||
&block.block.serialize_hashable(),
|
&block.block.serialize_pow_hash(),
|
||||||
block.height,
|
block.height,
|
||||||
&block.hf_version,
|
&block.hf_version,
|
||||||
)?,
|
)?,
|
||||||
|
|
||||||
miner_tx_weight: block.block.miner_tx.weight(),
|
miner_tx_weight: block.block.miner_transaction.weight(),
|
||||||
block: block.block,
|
block: block.block,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -359,8 +359,8 @@ where
|
||||||
|
|
||||||
// Set up the block and just pass it to [`verify_prepped_main_chain_block`]
|
// Set up the block and just pass it to [`verify_prepped_main_chain_block`]
|
||||||
|
|
||||||
// We just use the raw `major_version` here, no need to turn it into a `HardFork`.
|
// We just use the raw `hardfork_version` here, no need to turn it into a `HardFork`.
|
||||||
let rx_vms = if block.header.major_version < 12 {
|
let rx_vms = if block.header.hardfork_version < 12 {
|
||||||
HashMap::new()
|
HashMap::new()
|
||||||
} else {
|
} else {
|
||||||
let BlockChainContextResponse::RxVms(rx_vms) = context_svc
|
let BlockChainContextResponse::RxVms(rx_vms) = context_svc
|
||||||
|
@ -443,12 +443,12 @@ where
|
||||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||||
.map_err(ConsensusError::Block)?;
|
.map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
if prepped_block.block.txs.len() != txs.len() {
|
if prepped_block.block.transactions.len() != txs.len() {
|
||||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !prepped_block.block.txs.is_empty() {
|
if !prepped_block.block.transactions.is_empty() {
|
||||||
for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) {
|
for (expected_tx_hash, tx) in prepped_block.block.transactions.iter().zip(txs.iter()) {
|
||||||
if expected_tx_hash != &tx.tx_hash {
|
if expected_tx_hash != &tx.tx_hash {
|
||||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check if the block's miner input is formed correctly.
|
// Check if the block's miner input is formed correctly.
|
||||||
let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else {
|
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
)))?
|
||||||
|
@ -79,7 +79,7 @@ where
|
||||||
let prepped_block = {
|
let prepped_block = {
|
||||||
let rx_vm = alt_rx_vm(
|
let rx_vm = alt_rx_vm(
|
||||||
alt_context_cache.chain_height,
|
alt_context_cache.chain_height,
|
||||||
block.header.major_version,
|
block.header.hardfork_version,
|
||||||
alt_context_cache.parent_chain,
|
alt_context_cache.parent_chain,
|
||||||
&mut alt_context_cache,
|
&mut alt_context_cache,
|
||||||
&mut context_svc,
|
&mut context_svc,
|
||||||
|
@ -188,7 +188,7 @@ where
|
||||||
///
|
///
|
||||||
/// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned.
|
/// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned.
|
||||||
async fn alt_rx_vm<C>(
|
async fn alt_rx_vm<C>(
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
hf: u8,
|
hf: u8,
|
||||||
parent_chain: Chain,
|
parent_chain: Chain,
|
||||||
alt_chain_context: &mut AltChainContextCache,
|
alt_chain_context: &mut AltChainContextCache,
|
||||||
|
|
|
@ -12,14 +12,14 @@ pub(crate) fn pull_ordered_transactions(
|
||||||
block: &Block,
|
block: &Block,
|
||||||
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
|
mut txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||||
) -> Result<Vec<TransactionVerificationData>, ExtendedConsensusError> {
|
) -> Result<Vec<TransactionVerificationData>, ExtendedConsensusError> {
|
||||||
if block.txs.len() != txs.len() {
|
if block.transactions.len() != txs.len() {
|
||||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut ordered_txs = Vec::with_capacity(txs.len());
|
let mut ordered_txs = Vec::with_capacity(txs.len());
|
||||||
|
|
||||||
if !block.txs.is_empty() {
|
if !block.transactions.is_empty() {
|
||||||
for tx_hash in &block.txs {
|
for tx_hash in &block.transactions {
|
||||||
let tx = txs
|
let tx = txs
|
||||||
.remove(tx_hash)
|
.remove(tx_hash)
|
||||||
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
.ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
|
||||||
|
|
|
@ -202,7 +202,7 @@ pub struct NewBlockData {
|
||||||
/// The blocks hash.
|
/// The blocks hash.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// The blocks height.
|
/// The blocks height.
|
||||||
pub height: u64,
|
pub height: usize,
|
||||||
/// The blocks timestamp.
|
/// The blocks timestamp.
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
/// The blocks weight.
|
/// The blocks weight.
|
||||||
|
@ -246,7 +246,7 @@ pub enum BlockChainContextRequest {
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// This will panic if the number of blocks will pop the genesis block.
|
/// This will panic if the number of blocks will pop the genesis block.
|
||||||
numb_blocks: u64,
|
numb_blocks: usize,
|
||||||
},
|
},
|
||||||
/// Clear the alt chain context caches.
|
/// Clear the alt chain context caches.
|
||||||
ClearAltCache,
|
ClearAltCache,
|
||||||
|
@ -289,7 +289,7 @@ pub enum BlockChainContextRequest {
|
||||||
/// handle getting the randomX VM of an alt chain.
|
/// handle getting the randomX VM of an alt chain.
|
||||||
AltChainRxVM {
|
AltChainRxVM {
|
||||||
/// The height the RandomX VM is needed for.
|
/// The height the RandomX VM is needed for.
|
||||||
height: u64,
|
height: usize,
|
||||||
/// The chain to look in for the seed.
|
/// The chain to look in for the seed.
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
/// An internal token to prevent external crates calling this request.
|
/// An internal token to prevent external crates calling this request.
|
||||||
|
@ -313,7 +313,7 @@ pub enum BlockChainContextResponse {
|
||||||
/// Blockchain context response.
|
/// Blockchain context response.
|
||||||
Context(BlockChainContext),
|
Context(BlockChainContext),
|
||||||
/// A map of seed height to RandomX VMs.
|
/// A map of seed height to RandomX VMs.
|
||||||
RxVms(HashMap<u64, Arc<RandomXVM>>),
|
RxVms(HashMap<usize, Arc<RandomXVM>>),
|
||||||
/// A list of difficulties.
|
/// A list of difficulties.
|
||||||
BatchDifficulties(Vec<u128>),
|
BatchDifficulties(Vec<u128>),
|
||||||
/// An alt chain context cache.
|
/// An alt chain context cache.
|
||||||
|
|
|
@ -32,10 +32,10 @@ pub struct AltChainContextCache {
|
||||||
pub difficulty_cache: Option<DifficultyCache>,
|
pub difficulty_cache: Option<DifficultyCache>,
|
||||||
|
|
||||||
/// A cached RX VM.
|
/// A cached RX VM.
|
||||||
pub cached_rx_vm: Option<(u64, Arc<RandomXVM>)>,
|
pub cached_rx_vm: Option<(usize, Arc<RandomXVM>)>,
|
||||||
|
|
||||||
/// The chain height of the alt chain.
|
/// The chain height of the alt chain.
|
||||||
pub chain_height: u64,
|
pub chain_height: usize,
|
||||||
/// The top hash of the alt chain.
|
/// The top hash of the alt chain.
|
||||||
pub top_hash: [u8; 32],
|
pub top_hash: [u8; 32],
|
||||||
/// The [`ChainID`] of the alt chain.
|
/// The [`ChainID`] of the alt chain.
|
||||||
|
@ -48,7 +48,7 @@ impl AltChainContextCache {
|
||||||
/// Add a new block to the cache.
|
/// Add a new block to the cache.
|
||||||
pub fn add_new_block(
|
pub fn add_new_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
height: u64,
|
height: usize,
|
||||||
block_hash: [u8; 32],
|
block_hash: [u8; 32],
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
long_term_block_weight: usize,
|
long_term_block_weight: usize,
|
||||||
|
|
|
@ -48,8 +48,8 @@ impl DifficultyCacheConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total amount of blocks we need to track to calculate difficulty
|
/// Returns the total amount of blocks we need to track to calculate difficulty
|
||||||
pub fn total_block_count(&self) -> u64 {
|
pub fn total_block_count(&self) -> usize {
|
||||||
(self.window + self.lag).try_into().unwrap()
|
self.window + self.lag
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The amount of blocks we account for after removing the outliers.
|
/// The amount of blocks we account for after removing the outliers.
|
||||||
|
@ -78,7 +78,7 @@ pub struct DifficultyCache {
|
||||||
/// The current cumulative difficulty of the chain.
|
/// The current cumulative difficulty of the chain.
|
||||||
pub(crate) cumulative_difficulties: VecDeque<u128>,
|
pub(crate) cumulative_difficulties: VecDeque<u128>,
|
||||||
/// The last height we accounted for.
|
/// The last height we accounted for.
|
||||||
pub(crate) last_accounted_height: u64,
|
pub(crate) last_accounted_height: usize,
|
||||||
/// The config
|
/// The config
|
||||||
pub(crate) config: DifficultyCacheConfig,
|
pub(crate) config: DifficultyCacheConfig,
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ impl DifficultyCache {
|
||||||
/// Initialize the difficulty cache from the specified chain height.
|
/// Initialize the difficulty cache from the specified chain height.
|
||||||
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
|
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
|
||||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
config: DifficultyCacheConfig,
|
config: DifficultyCacheConfig,
|
||||||
database: D,
|
database: D,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
|
@ -104,7 +104,7 @@ impl DifficultyCache {
|
||||||
let (timestamps, cumulative_difficulties) =
|
let (timestamps, cumulative_difficulties) =
|
||||||
get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?;
|
get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?;
|
||||||
|
|
||||||
debug_assert_eq!(timestamps.len() as u64, chain_height - block_start);
|
debug_assert_eq!(timestamps.len(), chain_height - block_start);
|
||||||
|
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
"Current chain height: {}, accounting for {} blocks timestamps",
|
"Current chain height: {}, accounting for {} blocks timestamps",
|
||||||
|
@ -132,14 +132,10 @@ impl DifficultyCache {
|
||||||
#[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))]
|
#[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))]
|
||||||
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
||||||
&mut self,
|
&mut self,
|
||||||
numb_blocks: u64,
|
numb_blocks: usize,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<(), ExtendedConsensusError> {
|
) -> Result<(), ExtendedConsensusError> {
|
||||||
let Some(retained_blocks) = self
|
let Some(retained_blocks) = self.timestamps.len().checked_sub(numb_blocks) else {
|
||||||
.timestamps
|
|
||||||
.len()
|
|
||||||
.checked_sub(usize::try_from(numb_blocks).unwrap())
|
|
||||||
else {
|
|
||||||
// More blocks to pop than we have in the cache, so just restart a new cache.
|
// More blocks to pop than we have in the cache, so just restart a new cache.
|
||||||
*self = Self::init_from_chain_height(
|
*self = Self::init_from_chain_height(
|
||||||
self.last_accounted_height - numb_blocks + 1,
|
self.last_accounted_height - numb_blocks + 1,
|
||||||
|
@ -167,7 +163,7 @@ impl DifficultyCache {
|
||||||
database,
|
database,
|
||||||
new_start_height
|
new_start_height
|
||||||
// current_chain_height - self.timestamps.len() blocks are already in the cache.
|
// current_chain_height - self.timestamps.len() blocks are already in the cache.
|
||||||
..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()),
|
..(current_chain_height - self.timestamps.len()),
|
||||||
Chain::Main,
|
Chain::Main,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -187,7 +183,7 @@ impl DifficultyCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new block to the difficulty cache.
|
/// Add a new block to the difficulty cache.
|
||||||
pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) {
|
pub fn new_block(&mut self, height: usize, timestamp: u64, cumulative_difficulty: u128) {
|
||||||
assert_eq!(self.last_accounted_height + 1, height);
|
assert_eq!(self.last_accounted_height + 1, height);
|
||||||
self.last_accounted_height += 1;
|
self.last_accounted_height += 1;
|
||||||
|
|
||||||
|
@ -199,7 +195,7 @@ impl DifficultyCache {
|
||||||
self.cumulative_difficulties
|
self.cumulative_difficulties
|
||||||
.push_back(cumulative_difficulty);
|
.push_back(cumulative_difficulty);
|
||||||
|
|
||||||
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
|
if self.timestamps.len() > self.config.total_block_count() {
|
||||||
self.timestamps.pop_front();
|
self.timestamps.pop_front();
|
||||||
self.cumulative_difficulties.pop_front();
|
self.cumulative_difficulties.pop_front();
|
||||||
}
|
}
|
||||||
|
@ -244,7 +240,7 @@ impl DifficultyCache {
|
||||||
let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1);
|
let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1);
|
||||||
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
||||||
|
|
||||||
if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() {
|
if timestamps.len() > self.config.total_block_count() {
|
||||||
diff_info_popped.push((
|
diff_info_popped.push((
|
||||||
timestamps.pop_front().unwrap(),
|
timestamps.pop_front().unwrap(),
|
||||||
cumulative_difficulties.pop_front().unwrap(),
|
cumulative_difficulties.pop_front().unwrap(),
|
||||||
|
@ -266,22 +262,21 @@ impl DifficultyCache {
|
||||||
///
|
///
|
||||||
/// Will return [`None`] if there aren't enough blocks.
|
/// Will return [`None`] if there aren't enough blocks.
|
||||||
pub fn median_timestamp(&self, numb_blocks: usize) -> Option<u64> {
|
pub fn median_timestamp(&self, numb_blocks: usize) -> Option<u64> {
|
||||||
let mut timestamps =
|
let mut timestamps = if self.last_accounted_height + 1 == numb_blocks {
|
||||||
if self.last_accounted_height + 1 == u64::try_from(numb_blocks).unwrap() {
|
// if the chain height is equal to `numb_blocks` add the genesis block.
|
||||||
// if the chain height is equal to `numb_blocks` add the genesis block.
|
// otherwise if the chain height is less than `numb_blocks` None is returned
|
||||||
// otherwise if the chain height is less than `numb_blocks` None is returned
|
// and if it's more it would be excluded from calculations.
|
||||||
// and if it's more it would be excluded from calculations.
|
let mut timestamps = self.timestamps.clone();
|
||||||
let mut timestamps = self.timestamps.clone();
|
// all genesis blocks have a timestamp of 0.
|
||||||
// all genesis blocks have a timestamp of 0.
|
// https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html
|
||||||
// https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html
|
timestamps.push_front(0);
|
||||||
timestamps.push_front(0);
|
timestamps.into()
|
||||||
timestamps.into()
|
} else {
|
||||||
} else {
|
self.timestamps
|
||||||
self.timestamps
|
.range(self.timestamps.len().checked_sub(numb_blocks)?..)
|
||||||
.range(self.timestamps.len().checked_sub(numb_blocks)?..)
|
.copied()
|
||||||
.copied()
|
.collect::<Vec<_>>()
|
||||||
.collect::<Vec<_>>()
|
};
|
||||||
};
|
|
||||||
timestamps.sort_unstable();
|
timestamps.sort_unstable();
|
||||||
debug_assert_eq!(timestamps.len(), numb_blocks);
|
debug_assert_eq!(timestamps.len(), numb_blocks);
|
||||||
|
|
||||||
|
@ -368,7 +363,7 @@ fn get_window_start_and_end(
|
||||||
#[instrument(name = "get_blocks_timestamps", skip(database), level = "info")]
|
#[instrument(name = "get_blocks_timestamps", skip(database), level = "info")]
|
||||||
async fn get_blocks_in_pow_info<D: Database + Clone>(
|
async fn get_blocks_in_pow_info<D: Database + Clone>(
|
||||||
database: D,
|
database: D,
|
||||||
block_heights: Range<u64>,
|
block_heights: Range<usize>,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
|
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
|
||||||
tracing::info!("Getting blocks timestamps");
|
tracing::info!("Getting blocks timestamps");
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::{Database, ExtendedConsensusError};
|
||||||
/// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
|
/// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
|
||||||
///
|
///
|
||||||
/// ref: <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
|
/// ref: <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
|
||||||
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
|
const DEFAULT_WINDOW_SIZE: usize = 10080; // supermajority window check length - a week
|
||||||
|
|
||||||
/// Configuration for hard-forks.
|
/// Configuration for hard-forks.
|
||||||
///
|
///
|
||||||
|
@ -23,7 +23,7 @@ pub struct HardForkConfig {
|
||||||
/// The network we are on.
|
/// The network we are on.
|
||||||
pub(crate) info: HFsInfo,
|
pub(crate) info: HFsInfo,
|
||||||
/// The amount of votes we are taking into account to decide on a fork activation.
|
/// The amount of votes we are taking into account to decide on a fork activation.
|
||||||
pub(crate) window: u64,
|
pub(crate) window: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HardForkConfig {
|
impl HardForkConfig {
|
||||||
|
@ -64,14 +64,14 @@ pub struct HardForkState {
|
||||||
pub(crate) votes: HFVotes,
|
pub(crate) votes: HFVotes,
|
||||||
|
|
||||||
/// The last block height accounted for.
|
/// The last block height accounted for.
|
||||||
pub(crate) last_height: u64,
|
pub(crate) last_height: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HardForkState {
|
impl HardForkState {
|
||||||
/// Initialize the [`HardForkState`] from the specified chain height.
|
/// Initialize the [`HardForkState`] from the specified chain height.
|
||||||
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
|
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
|
||||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
config: HardForkConfig,
|
config: HardForkConfig,
|
||||||
mut database: D,
|
mut database: D,
|
||||||
) -> Result<Self, ExtendedConsensusError> {
|
) -> Result<Self, ExtendedConsensusError> {
|
||||||
|
@ -79,12 +79,8 @@ impl HardForkState {
|
||||||
|
|
||||||
let block_start = chain_height.saturating_sub(config.window);
|
let block_start = chain_height.saturating_sub(config.window);
|
||||||
|
|
||||||
let votes = get_votes_in_range(
|
let votes =
|
||||||
database.clone(),
|
get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?;
|
||||||
block_start..chain_height,
|
|
||||||
usize::try_from(config.window).unwrap(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if chain_height > config.window {
|
if chain_height > config.window {
|
||||||
debug_assert_eq!(votes.total_votes(), config.window)
|
debug_assert_eq!(votes.total_votes(), config.window)
|
||||||
|
@ -129,7 +125,7 @@ impl HardForkState {
|
||||||
/// This _must_ only be used on a main-chain cache.
|
/// This _must_ only be used on a main-chain cache.
|
||||||
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
||||||
&mut self,
|
&mut self,
|
||||||
numb_blocks: u64,
|
numb_blocks: usize,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<(), ExtendedConsensusError> {
|
) -> Result<(), ExtendedConsensusError> {
|
||||||
let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else {
|
let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else {
|
||||||
|
@ -153,19 +149,18 @@ impl HardForkState {
|
||||||
..current_chain_height
|
..current_chain_height
|
||||||
.saturating_sub(numb_blocks)
|
.saturating_sub(numb_blocks)
|
||||||
.saturating_sub(retained_blocks),
|
.saturating_sub(retained_blocks),
|
||||||
usize::try_from(numb_blocks).unwrap(),
|
numb_blocks,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.votes
|
self.votes.reverse_blocks(numb_blocks, oldest_votes);
|
||||||
.reverse_blocks(usize::try_from(numb_blocks).unwrap(), oldest_votes);
|
|
||||||
self.last_height -= numb_blocks;
|
self.last_height -= numb_blocks;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new block to the cache.
|
/// Add a new block to the cache.
|
||||||
pub fn new_block(&mut self, vote: HardFork, height: u64) {
|
pub fn new_block(&mut self, vote: HardFork, height: usize) {
|
||||||
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
||||||
// of blocks.
|
// of blocks.
|
||||||
assert_eq!(self.last_height + 1, height);
|
assert_eq!(self.last_height + 1, height);
|
||||||
|
@ -209,7 +204,7 @@ impl HardForkState {
|
||||||
#[instrument(name = "get_votes", skip(database))]
|
#[instrument(name = "get_votes", skip(database))]
|
||||||
async fn get_votes_in_range<D: Database>(
|
async fn get_votes_in_range<D: Database>(
|
||||||
database: D,
|
database: D,
|
||||||
block_heights: Range<u64>,
|
block_heights: Range<usize>,
|
||||||
window_size: usize,
|
window_size: usize,
|
||||||
) -> Result<HFVotes, ExtendedConsensusError> {
|
) -> Result<HFVotes, ExtendedConsensusError> {
|
||||||
let mut votes = HFVotes::new(window_size);
|
let mut votes = HFVotes::new(window_size);
|
||||||
|
|
|
@ -74,9 +74,9 @@ impl RandomX for RandomXVM {
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RandomXVMCache {
|
pub struct RandomXVMCache {
|
||||||
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
||||||
pub(crate) seeds: VecDeque<(u64, [u8; 32])>,
|
pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
|
||||||
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
||||||
pub(crate) vms: HashMap<u64, Arc<RandomXVM>>,
|
pub(crate) vms: HashMap<usize, Arc<RandomXVM>>,
|
||||||
|
|
||||||
/// A single cached VM that was given to us from a part of Cuprate.
|
/// A single cached VM that was given to us from a part of Cuprate.
|
||||||
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
|
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
|
||||||
|
@ -85,7 +85,7 @@ pub struct RandomXVMCache {
|
||||||
impl RandomXVMCache {
|
impl RandomXVMCache {
|
||||||
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
|
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
|
||||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
hf: &HardFork,
|
hf: &HardFork,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<Self, ExtendedConsensusError> {
|
) -> Result<Self, ExtendedConsensusError> {
|
||||||
|
@ -94,7 +94,8 @@ impl RandomXVMCache {
|
||||||
|
|
||||||
tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",);
|
tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",);
|
||||||
|
|
||||||
let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect();
|
let seeds: VecDeque<(usize, [u8; 32])> =
|
||||||
|
seed_heights.into_iter().zip(seed_hashes).collect();
|
||||||
|
|
||||||
let vms = if hf >= &HardFork::V12 {
|
let vms = if hf >= &HardFork::V12 {
|
||||||
tracing::debug!("Creating RandomX VMs");
|
tracing::debug!("Creating RandomX VMs");
|
||||||
|
@ -132,7 +133,7 @@ impl RandomXVMCache {
|
||||||
/// of them first.
|
/// of them first.
|
||||||
pub async fn get_alt_vm<D: Database>(
|
pub async fn get_alt_vm<D: Database>(
|
||||||
&mut self,
|
&mut self,
|
||||||
height: u64,
|
height: usize,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<Arc<RandomXVM>, ExtendedConsensusError> {
|
) -> Result<Arc<RandomXVM>, ExtendedConsensusError> {
|
||||||
|
@ -161,7 +162,7 @@ impl RandomXVMCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the main-chain RandomX VMs.
|
/// Get the main-chain RandomX VMs.
|
||||||
pub async fn get_vms(&mut self) -> HashMap<u64, Arc<RandomXVM>> {
|
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVM>> {
|
||||||
match self.seeds.len().checked_sub(self.vms.len()) {
|
match self.seeds.len().checked_sub(self.vms.len()) {
|
||||||
// No difference in the amount of seeds to VMs.
|
// No difference in the amount of seeds to VMs.
|
||||||
Some(0) => (),
|
Some(0) => (),
|
||||||
|
@ -213,7 +214,7 @@ impl RandomXVMCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes all the RandomX VMs above the `new_height`.
|
/// Removes all the RandomX VMs above the `new_height`.
|
||||||
pub fn pop_blocks_main_chain(&mut self, new_height: u64) {
|
pub fn pop_blocks_main_chain(&mut self, new_height: usize) {
|
||||||
self.seeds.retain(|(height, _)| *height < new_height);
|
self.seeds.retain(|(height, _)| *height < new_height);
|
||||||
self.vms.retain(|height, _| *height < new_height);
|
self.vms.retain(|height, _| *height < new_height);
|
||||||
}
|
}
|
||||||
|
@ -221,7 +222,7 @@ impl RandomXVMCache {
|
||||||
/// Add a new block to the VM cache.
|
/// Add a new block to the VM cache.
|
||||||
///
|
///
|
||||||
/// hash is the block hash not the blocks PoW hash.
|
/// hash is the block hash not the blocks PoW hash.
|
||||||
pub fn new_block(&mut self, height: u64, hash: &[u8; 32]) {
|
pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) {
|
||||||
if is_randomx_seed_height(height) {
|
if is_randomx_seed_height(height) {
|
||||||
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
||||||
|
|
||||||
|
@ -242,7 +243,7 @@ impl RandomXVMCache {
|
||||||
|
|
||||||
/// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block
|
/// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block
|
||||||
/// in the chain as VMs include some lag before a seed activates.
|
/// in the chain as VMs include some lag before a seed activates.
|
||||||
pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec<u64> {
|
pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize) -> Vec<usize> {
|
||||||
let mut seeds = Vec::with_capacity(amount);
|
let mut seeds = Vec::with_capacity(amount);
|
||||||
if is_randomx_seed_height(last_height) {
|
if is_randomx_seed_height(last_height) {
|
||||||
seeds.push(last_height);
|
seeds.push(last_height);
|
||||||
|
@ -265,7 +266,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize)
|
||||||
|
|
||||||
/// Gets the block hashes for the heights specified.
|
/// Gets the block hashes for the heights specified.
|
||||||
async fn get_block_hashes<D: Database + Clone>(
|
async fn get_block_hashes<D: Database + Clone>(
|
||||||
heights: Vec<u64>,
|
heights: Vec<usize>,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<Vec<[u8; 32]>, ExtendedConsensusError> {
|
) -> Result<Vec<[u8; 32]>, ExtendedConsensusError> {
|
||||||
let mut fut = FuturesOrdered::new();
|
let mut fut = FuturesOrdered::new();
|
||||||
|
|
|
@ -52,7 +52,7 @@ pub struct ContextTask<D: Database> {
|
||||||
alt_chain_cache_map: AltChainMap,
|
alt_chain_cache_map: AltChainMap,
|
||||||
|
|
||||||
/// The current chain height.
|
/// The current chain height.
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
/// The top block hash.
|
/// The top block hash.
|
||||||
top_block_hash: [u8; 32],
|
top_block_hash: [u8; 32],
|
||||||
/// The total amount of coins generated.
|
/// The total amount of coins generated.
|
||||||
|
|
|
@ -24,21 +24,21 @@ use cuprate_types::{
|
||||||
use crate::{Database, ExtendedConsensusError, HardFork};
|
use crate::{Database, ExtendedConsensusError, HardFork};
|
||||||
|
|
||||||
/// The short term block weight window.
|
/// The short term block weight window.
|
||||||
const SHORT_TERM_WINDOW: u64 = 100;
|
const SHORT_TERM_WINDOW: usize = 100;
|
||||||
/// The long term block weight window.
|
/// The long term block weight window.
|
||||||
const LONG_TERM_WINDOW: u64 = 100000;
|
const LONG_TERM_WINDOW: usize = 100000;
|
||||||
|
|
||||||
/// Configuration for the block weight cache.
|
/// Configuration for the block weight cache.
|
||||||
///
|
///
|
||||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||||
pub struct BlockWeightsCacheConfig {
|
pub struct BlockWeightsCacheConfig {
|
||||||
short_term_window: u64,
|
short_term_window: usize,
|
||||||
long_term_window: u64,
|
long_term_window: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockWeightsCacheConfig {
|
impl BlockWeightsCacheConfig {
|
||||||
/// Creates a new [`BlockWeightsCacheConfig`]
|
/// Creates a new [`BlockWeightsCacheConfig`]
|
||||||
pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
|
pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig {
|
||||||
BlockWeightsCacheConfig {
|
BlockWeightsCacheConfig {
|
||||||
short_term_window,
|
short_term_window,
|
||||||
long_term_window,
|
long_term_window,
|
||||||
|
@ -67,7 +67,7 @@ pub struct BlockWeightsCache {
|
||||||
long_term_weights: RollingMedian<usize>,
|
long_term_weights: RollingMedian<usize>,
|
||||||
|
|
||||||
/// The height of the top block.
|
/// The height of the top block.
|
||||||
pub(crate) tip_height: u64,
|
pub(crate) tip_height: usize,
|
||||||
|
|
||||||
pub(crate) config: BlockWeightsCacheConfig,
|
pub(crate) config: BlockWeightsCacheConfig,
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ impl BlockWeightsCache {
|
||||||
/// Initialize the [`BlockWeightsCache`] at the the given chain height.
|
/// Initialize the [`BlockWeightsCache`] at the the given chain height.
|
||||||
#[instrument(name = "init_weight_cache", level = "info", skip(database, config))]
|
#[instrument(name = "init_weight_cache", level = "info", skip(database, config))]
|
||||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||||
chain_height: u64,
|
chain_height: usize,
|
||||||
config: BlockWeightsCacheConfig,
|
config: BlockWeightsCacheConfig,
|
||||||
database: D,
|
database: D,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
|
@ -101,17 +101,11 @@ impl BlockWeightsCache {
|
||||||
|
|
||||||
Ok(BlockWeightsCache {
|
Ok(BlockWeightsCache {
|
||||||
short_term_block_weights: rayon_spawn_async(move || {
|
short_term_block_weights: rayon_spawn_async(move || {
|
||||||
RollingMedian::from_vec(
|
RollingMedian::from_vec(short_term_block_weights, config.short_term_window)
|
||||||
short_term_block_weights,
|
|
||||||
usize::try_from(config.short_term_window).unwrap(),
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await,
|
.await,
|
||||||
long_term_weights: rayon_spawn_async(move || {
|
long_term_weights: rayon_spawn_async(move || {
|
||||||
RollingMedian::from_vec(
|
RollingMedian::from_vec(long_term_weights, config.long_term_window)
|
||||||
long_term_weights,
|
|
||||||
usize::try_from(config.long_term_window).unwrap(),
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.await,
|
.await,
|
||||||
tip_height: chain_height - 1,
|
tip_height: chain_height - 1,
|
||||||
|
@ -125,10 +119,10 @@ impl BlockWeightsCache {
|
||||||
#[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))]
|
#[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))]
|
||||||
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
||||||
&mut self,
|
&mut self,
|
||||||
numb_blocks: u64,
|
numb_blocks: usize,
|
||||||
database: D,
|
database: D,
|
||||||
) -> Result<(), ExtendedConsensusError> {
|
) -> Result<(), ExtendedConsensusError> {
|
||||||
if self.long_term_weights.window_len() <= usize::try_from(numb_blocks).unwrap() {
|
if self.long_term_weights.window_len() <= numb_blocks {
|
||||||
// More blocks to pop than we have in the cache, so just restart a new cache.
|
// More blocks to pop than we have in the cache, so just restart a new cache.
|
||||||
*self = Self::init_from_chain_height(
|
*self = Self::init_from_chain_height(
|
||||||
self.tip_height - numb_blocks + 1,
|
self.tip_height - numb_blocks + 1,
|
||||||
|
@ -150,7 +144,7 @@ impl BlockWeightsCache {
|
||||||
let old_long_term_weights = get_long_term_weight_in_range(
|
let old_long_term_weights = get_long_term_weight_in_range(
|
||||||
new_long_term_start_height
|
new_long_term_start_height
|
||||||
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
|
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
|
||||||
..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()),
|
..(chain_height - self.long_term_weights.window_len()),
|
||||||
database.clone(),
|
database.clone(),
|
||||||
Chain::Main,
|
Chain::Main,
|
||||||
)
|
)
|
||||||
|
@ -163,11 +157,11 @@ impl BlockWeightsCache {
|
||||||
let old_short_term_weights = get_blocks_weight_in_range(
|
let old_short_term_weights = get_blocks_weight_in_range(
|
||||||
new_short_term_start_height
|
new_short_term_start_height
|
||||||
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
|
// current_chain_height - self.long_term_weights.len() blocks are already in the cache.
|
||||||
..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()),
|
..(chain_height - self.short_term_block_weights.window_len()),
|
||||||
database,
|
database,
|
||||||
Chain::Main
|
Chain::Main,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
for _ in 0..numb_blocks {
|
for _ in 0..numb_blocks {
|
||||||
self.short_term_block_weights.pop_back();
|
self.short_term_block_weights.pop_back();
|
||||||
|
@ -186,7 +180,7 @@ impl BlockWeightsCache {
|
||||||
///
|
///
|
||||||
/// The block_height **MUST** be one more than the last height the cache has
|
/// The block_height **MUST** be one more than the last height the cache has
|
||||||
/// seen.
|
/// seen.
|
||||||
pub fn new_block(&mut self, block_height: u64, block_weight: usize, long_term_weight: usize) {
|
pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) {
|
||||||
assert_eq!(self.tip_height + 1, block_height);
|
assert_eq!(self.tip_height + 1, block_height);
|
||||||
self.tip_height += 1;
|
self.tip_height += 1;
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
|
@ -290,7 +284,7 @@ pub fn calculate_block_long_term_weight(
|
||||||
/// Gets the block weights from the blocks with heights in the range provided.
|
/// Gets the block weights from the blocks with heights in the range provided.
|
||||||
#[instrument(name = "get_block_weights", skip(database))]
|
#[instrument(name = "get_block_weights", skip(database))]
|
||||||
async fn get_blocks_weight_in_range<D: Database + Clone>(
|
async fn get_blocks_weight_in_range<D: Database + Clone>(
|
||||||
range: Range<u64>,
|
range: Range<usize>,
|
||||||
database: D,
|
database: D,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
||||||
|
@ -314,7 +308,7 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
|
||||||
/// Gets the block long term weights from the blocks with heights in the range provided.
|
/// Gets the block long term weights from the blocks with heights in the range provided.
|
||||||
#[instrument(name = "get_long_term_weights", skip(database), level = "info")]
|
#[instrument(name = "get_long_term_weights", skip(database), level = "info")]
|
||||||
async fn get_long_term_weight_in_range<D: Database + Clone>(
|
async fn get_long_term_weight_in_range<D: Database + Clone>(
|
||||||
range: Range<u64>,
|
range: Range<usize>,
|
||||||
database: D,
|
database: D,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
) -> Result<Vec<usize>, ExtendedConsensusError> {
|
||||||
|
|
|
@ -29,10 +29,10 @@ const TEST_CONTEXT_CONFIG: ContextConfig = ContextConfig {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
||||||
const BLOCKCHAIN_HEIGHT: u64 = 6000;
|
const BLOCKCHAIN_HEIGHT: usize = 6000;
|
||||||
|
|
||||||
let mut runner = TestRunner::default();
|
let mut runner = TestRunner::default();
|
||||||
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap())
|
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT)
|
||||||
.new_tree(&mut runner)
|
.new_tree(&mut runner)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.current();
|
.current();
|
||||||
|
@ -71,10 +71,10 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn context_height_correct() -> Result<(), tower::BoxError> {
|
async fn context_height_correct() -> Result<(), tower::BoxError> {
|
||||||
const BLOCKCHAIN_HEIGHT: u64 = 6000;
|
const BLOCKCHAIN_HEIGHT: usize = 6000;
|
||||||
|
|
||||||
let mut runner = TestRunner::default();
|
let mut runner = TestRunner::default();
|
||||||
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap())
|
let db = arb_dummy_database(BLOCKCHAIN_HEIGHT)
|
||||||
.new_tree(&mut runner)
|
.new_tree(&mut runner)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.current();
|
.current();
|
||||||
|
|
|
@ -63,10 +63,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> {
|
||||||
let cfg = DifficultyCacheConfig::main_net();
|
let cfg = DifficultyCacheConfig::main_net();
|
||||||
|
|
||||||
let mut db_builder = DummyDatabaseBuilder::default();
|
let mut db_builder = DummyDatabaseBuilder::default();
|
||||||
for (cum_dif, timestamp) in DIF_3000000_3002000
|
for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) {
|
||||||
.iter()
|
|
||||||
.take(cfg.total_block_count() as usize)
|
|
||||||
{
|
|
||||||
db_builder.add_block(
|
db_builder.add_block(
|
||||||
DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif),
|
DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif),
|
||||||
)
|
)
|
||||||
|
@ -82,14 +79,14 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> {
|
||||||
|
|
||||||
for (i, diff_info) in DIF_3000000_3002000
|
for (i, diff_info) in DIF_3000000_3002000
|
||||||
.windows(2)
|
.windows(2)
|
||||||
.skip(cfg.total_block_count() as usize - 1)
|
.skip(cfg.total_block_count() - 1)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
{
|
{
|
||||||
let diff = diff_info[1].0 - diff_info[0].0;
|
let diff = diff_info[1].0 - diff_info[0].0;
|
||||||
|
|
||||||
assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff);
|
assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff);
|
||||||
|
|
||||||
diff_cache.new_block(3_000_720 + i as u64, diff_info[1].1, diff_info[1].0);
|
diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -104,7 +101,7 @@ prop_compose! {
|
||||||
let (timestamps, mut cumulative_difficulties): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
let (timestamps, mut cumulative_difficulties): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||||
cumulative_difficulties.sort_unstable();
|
cumulative_difficulties.sort_unstable();
|
||||||
DifficultyCache {
|
DifficultyCache {
|
||||||
last_accounted_height: timestamps.len().try_into().unwrap(),
|
last_accounted_height: timestamps.len(),
|
||||||
config: TEST_DIFFICULTY_CONFIG,
|
config: TEST_DIFFICULTY_CONFIG,
|
||||||
timestamps: timestamps.into(),
|
timestamps: timestamps.into(),
|
||||||
// we generate cumulative_difficulties in range 0..u64::MAX as if the generated values are close to u128::MAX
|
// we generate cumulative_difficulties in range 0..u64::MAX as if the generated values are close to u128::MAX
|
||||||
|
@ -165,7 +162,7 @@ proptest! {
|
||||||
let mut timestamps: VecDeque<u64> = timestamps.into();
|
let mut timestamps: VecDeque<u64> = timestamps.into();
|
||||||
|
|
||||||
let diff_cache = DifficultyCache {
|
let diff_cache = DifficultyCache {
|
||||||
last_accounted_height: (TEST_WINDOW -1).try_into().unwrap(),
|
last_accounted_height: TEST_WINDOW -1,
|
||||||
config: TEST_DIFFICULTY_CONFIG,
|
config: TEST_DIFFICULTY_CONFIG,
|
||||||
timestamps: timestamps.clone(),
|
timestamps: timestamps.clone(),
|
||||||
// we dont need cumulative_difficulties
|
// we dont need cumulative_difficulties
|
||||||
|
@ -234,7 +231,7 @@ proptest! {
|
||||||
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
}
|
}
|
||||||
|
|
||||||
new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?;
|
new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?;
|
||||||
|
|
||||||
prop_assert_eq!(new_cache, old_cache);
|
prop_assert_eq!(new_cache, old_cache);
|
||||||
|
|
||||||
|
@ -258,7 +255,7 @@ proptest! {
|
||||||
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
}
|
}
|
||||||
|
|
||||||
new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?;
|
new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?;
|
||||||
|
|
||||||
prop_assert_eq!(new_cache, old_cache);
|
prop_assert_eq!(new_cache, old_cache);
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::{
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const TEST_WINDOW_SIZE: u64 = 25;
|
const TEST_WINDOW_SIZE: usize = 25;
|
||||||
|
|
||||||
const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [
|
const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [
|
||||||
HFInfo::new(0, 0),
|
HFInfo::new(0, 0),
|
||||||
|
@ -79,7 +79,7 @@ async fn hf_v15_v16_correct() {
|
||||||
|
|
||||||
for (i, (_, vote)) in HFS_2688888_2689608.into_iter().enumerate() {
|
for (i, (_, vote)) in HFS_2688888_2689608.into_iter().enumerate() {
|
||||||
assert_eq!(state.current_hardfork, HardFork::V15);
|
assert_eq!(state.current_hardfork, HardFork::V15);
|
||||||
state.new_block(vote, (2688888 + i) as u64);
|
state.new_block(vote, 2688888 + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(state.current_hardfork, HardFork::V16);
|
assert_eq!(state.current_hardfork, HardFork::V16);
|
||||||
|
@ -91,8 +91,8 @@ proptest! {
|
||||||
extra_hfs in vec(any::<HardFork>(), 0..100)
|
extra_hfs in vec(any::<HardFork>(), 0..100)
|
||||||
) {
|
) {
|
||||||
tokio_test::block_on(async move {
|
tokio_test::block_on(async move {
|
||||||
let numb_hfs = hfs.len() as u64;
|
let numb_hfs = hfs.len();
|
||||||
let numb_pop_blocks = extra_hfs.len() as u64;
|
let numb_pop_blocks = extra_hfs.len();
|
||||||
|
|
||||||
let mut db_builder = DummyDatabaseBuilder::default();
|
let mut db_builder = DummyDatabaseBuilder::default();
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ proptest! {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let db = db_builder.finish(Some(numb_hfs as usize));
|
let db = db_builder.finish(Some(numb_hfs ));
|
||||||
|
|
||||||
let mut state = HardForkState::init_from_chain_height(
|
let mut state = HardForkState::init_from_chain_height(
|
||||||
numb_hfs,
|
numb_hfs,
|
||||||
|
@ -114,7 +114,7 @@ proptest! {
|
||||||
let state_clone = state.clone();
|
let state_clone = state.clone();
|
||||||
|
|
||||||
for (i, hf) in extra_hfs.into_iter().enumerate() {
|
for (i, hf) in extra_hfs.into_iter().enumerate() {
|
||||||
state.new_block(hf, state.last_height + u64::try_from(i).unwrap() + 1);
|
state.new_block(hf, state.last_height + i + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
state.pop_blocks_main_chain(numb_pop_blocks, db).await?;
|
state.pop_blocks_main_chain(numb_pop_blocks, db).await?;
|
||||||
|
|
|
@ -123,14 +123,14 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError>
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
for height in 1..=100 {
|
for height in 1..=100 {
|
||||||
weight_cache.new_block(height as u64, height, height);
|
weight_cache.new_block(height, height, height);
|
||||||
|
|
||||||
assert_eq!(weight_cache.median_short_term_weight(), height / 2);
|
assert_eq!(weight_cache.median_short_term_weight(), height / 2);
|
||||||
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
for height in 101..=5000 {
|
for height in 101..=5000 {
|
||||||
weight_cache.new_block(height as u64, height, height);
|
weight_cache.new_block(height, height, height);
|
||||||
|
|
||||||
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
assert_eq!(weight_cache.median_long_term_weight(), height / 2);
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ async fn calc_bw_ltw_2850000_3050000() {
|
||||||
weight_cache.median_long_term_weight(),
|
weight_cache.median_long_term_weight(),
|
||||||
);
|
);
|
||||||
assert_eq!(calc_ltw, *ltw);
|
assert_eq!(calc_ltw, *ltw);
|
||||||
weight_cache.new_block((2950000 + i) as u64, *weight, *ltw);
|
weight_cache.new_block(2950000 + i, *weight, *ltw);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ impl Service<BlockchainReadRequest> for DummyDatabase {
|
||||||
async move {
|
async move {
|
||||||
Ok(match req {
|
Ok(match req {
|
||||||
BlockchainReadRequest::BlockExtendedHeader(id) => {
|
BlockchainReadRequest::BlockExtendedHeader(id) => {
|
||||||
let mut id = usize::try_from(id).unwrap();
|
let mut id = id;
|
||||||
if let Some(dummy_height) = dummy_height {
|
if let Some(dummy_height) = dummy_height {
|
||||||
let block_len = blocks.read().unwrap().len();
|
let block_len = blocks.read().unwrap().len();
|
||||||
|
|
||||||
|
@ -173,8 +173,8 @@ impl Service<BlockchainReadRequest> for DummyDatabase {
|
||||||
BlockchainResponse::BlockHash(hash)
|
BlockchainResponse::BlockHash(hash)
|
||||||
}
|
}
|
||||||
BlockchainReadRequest::BlockExtendedHeaderInRange(range, _) => {
|
BlockchainReadRequest::BlockExtendedHeaderInRange(range, _) => {
|
||||||
let mut end = usize::try_from(range.end).unwrap();
|
let mut end = range.end;
|
||||||
let mut start = usize::try_from(range.start).unwrap();
|
let mut start = range.start;
|
||||||
|
|
||||||
if let Some(dummy_height) = dummy_height {
|
if let Some(dummy_height) = dummy_height {
|
||||||
let block_len = blocks.read().unwrap().len();
|
let block_len = blocks.read().unwrap().len();
|
||||||
|
@ -196,10 +196,7 @@ impl Service<BlockchainReadRequest> for DummyDatabase {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
BlockchainReadRequest::ChainHeight => {
|
BlockchainReadRequest::ChainHeight => {
|
||||||
let height: u64 = dummy_height
|
let height = dummy_height.unwrap_or(blocks.read().unwrap().len());
|
||||||
.unwrap_or(blocks.read().unwrap().len())
|
|
||||||
.try_into()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut top_hash = [0; 32];
|
let mut top_hash = [0; 32];
|
||||||
top_hash[0..8].copy_from_slice(&height.to_le_bytes());
|
top_hash[0..8].copy_from_slice(&height.to_le_bytes());
|
||||||
|
|
|
@ -12,10 +12,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
use monero_serai::{
|
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||||
ringct::RctType,
|
|
||||||
transaction::{Input, Timelock, Transaction},
|
|
||||||
};
|
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use tower::{Service, ServiceExt};
|
use tower::{Service, ServiceExt};
|
||||||
use tracing::instrument;
|
use tracing::instrument;
|
||||||
|
@ -37,6 +34,7 @@ use crate::{
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod contextual_data;
|
pub mod contextual_data;
|
||||||
|
mod free;
|
||||||
|
|
||||||
/// A struct representing the type of validation that needs to be completed for this transaction.
|
/// A struct representing the type of validation that needs to be completed for this transaction.
|
||||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
@ -103,22 +101,17 @@ impl TransactionVerificationData {
|
||||||
let tx_hash = tx.hash();
|
let tx_hash = tx.hash();
|
||||||
let tx_blob = tx.serialize();
|
let tx_blob = tx.serialize();
|
||||||
|
|
||||||
// the tx weight is only different from the blobs length for bp(+) txs.
|
let tx_weight = free::tx_weight(&tx, &tx_blob);
|
||||||
let tx_weight = match tx.rct_signatures.rct_type() {
|
|
||||||
RctType::Bulletproofs
|
let fee = free::tx_fee(&tx)?;
|
||||||
| RctType::BulletproofsCompactAmount
|
|
||||||
| RctType::Clsag
|
|
||||||
| RctType::BulletproofsPlus => tx.weight(),
|
|
||||||
_ => tx_blob.len(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(TransactionVerificationData {
|
Ok(TransactionVerificationData {
|
||||||
tx_hash,
|
tx_hash,
|
||||||
tx_blob,
|
tx_blob,
|
||||||
tx_weight,
|
tx_weight,
|
||||||
fee: tx.rct_signatures.base.fee,
|
fee,
|
||||||
cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified),
|
cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified),
|
||||||
version: TxVersion::from_raw(tx.prefix.version)
|
version: TxVersion::from_raw(tx.version())
|
||||||
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||||
tx,
|
tx,
|
||||||
})
|
})
|
||||||
|
@ -133,7 +126,7 @@ pub enum VerifyTxRequest {
|
||||||
// TODO: Can we use references to remove the Vec? wont play nicely with Service though
|
// TODO: Can we use references to remove the Vec? wont play nicely with Service though
|
||||||
txs: Vec<Arc<TransactionVerificationData>>,
|
txs: Vec<Arc<TransactionVerificationData>>,
|
||||||
/// The current chain height.
|
/// The current chain height.
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
/// The top block hash.
|
/// The top block hash.
|
||||||
top_hash: [u8; 32],
|
top_hash: [u8; 32],
|
||||||
/// The value for time to use to check time locked outputs.
|
/// The value for time to use to check time locked outputs.
|
||||||
|
@ -147,7 +140,7 @@ pub enum VerifyTxRequest {
|
||||||
/// The transactions to verify.
|
/// The transactions to verify.
|
||||||
txs: Vec<Transaction>,
|
txs: Vec<Transaction>,
|
||||||
/// The current chain height.
|
/// The current chain height.
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
/// The top block hash.
|
/// The top block hash.
|
||||||
top_hash: [u8; 32],
|
top_hash: [u8; 32],
|
||||||
/// The value for time to use to check time locked outputs.
|
/// The value for time to use to check time locked outputs.
|
||||||
|
@ -246,7 +239,7 @@ where
|
||||||
async fn prep_and_verify_transactions<D>(
|
async fn prep_and_verify_transactions<D>(
|
||||||
database: D,
|
database: D,
|
||||||
txs: Vec<Transaction>,
|
txs: Vec<Transaction>,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
top_hash: [u8; 32],
|
top_hash: [u8; 32],
|
||||||
time_for_time_lock: u64,
|
time_for_time_lock: u64,
|
||||||
hf: HardFork,
|
hf: HardFork,
|
||||||
|
@ -281,7 +274,7 @@ where
|
||||||
async fn verify_prepped_transactions<D>(
|
async fn verify_prepped_transactions<D>(
|
||||||
mut database: D,
|
mut database: D,
|
||||||
txs: &[Arc<TransactionVerificationData>],
|
txs: &[Arc<TransactionVerificationData>],
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
top_hash: [u8; 32],
|
top_hash: [u8; 32],
|
||||||
time_for_time_lock: u64,
|
time_for_time_lock: u64,
|
||||||
hf: HardFork,
|
hf: HardFork,
|
||||||
|
@ -296,7 +289,7 @@ where
|
||||||
let mut spent_kis = HashSet::with_capacity(txs.len());
|
let mut spent_kis = HashSet::with_capacity(txs.len());
|
||||||
|
|
||||||
txs.iter().try_for_each(|tx| {
|
txs.iter().try_for_each(|tx| {
|
||||||
tx.tx.prefix.inputs.iter().try_for_each(|input| {
|
tx.tx.prefix().inputs.iter().try_for_each(|input| {
|
||||||
if let Input::ToKey { key_image, .. } = input {
|
if let Input::ToKey { key_image, .. } = input {
|
||||||
if !spent_kis.insert(key_image.compress().0) {
|
if !spent_kis.insert(key_image.compress().0) {
|
||||||
tracing::debug!("Duplicate key image found in batch.");
|
tracing::debug!("Duplicate key image found in batch.");
|
||||||
|
@ -382,7 +375,7 @@ fn transactions_needing_verification(
|
||||||
txs: &[Arc<TransactionVerificationData>],
|
txs: &[Arc<TransactionVerificationData>],
|
||||||
hashes_in_main_chain: HashSet<[u8; 32]>,
|
hashes_in_main_chain: HashSet<[u8; 32]>,
|
||||||
current_hf: &HardFork,
|
current_hf: &HardFork,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
time_for_time_lock: u64,
|
time_for_time_lock: u64,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(
|
(
|
||||||
|
@ -473,7 +466,7 @@ where
|
||||||
|
|
||||||
async fn verify_transactions<D>(
|
async fn verify_transactions<D>(
|
||||||
txs: Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
txs: Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
||||||
current_chain_height: u64,
|
current_chain_height: usize,
|
||||||
top_hash: [u8; 32],
|
top_hash: [u8; 32],
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: HardFork,
|
hf: HardFork,
|
||||||
|
@ -501,7 +494,7 @@ where
|
||||||
&hf,
|
&hf,
|
||||||
&batch_verifier,
|
&batch_verifier,
|
||||||
)?;
|
)?;
|
||||||
// make sure monero-serai calculated the same fee.
|
// make sure we calculated the right fee.
|
||||||
assert_eq!(fee, tx.fee);
|
assert_eq!(fee, tx.fee);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ pub async fn batch_get_ring_member_info<D: Database>(
|
||||||
let mut output_ids = HashMap::new();
|
let mut output_ids = HashMap::new();
|
||||||
|
|
||||||
for tx_v_data in txs_verification_data.clone() {
|
for tx_v_data in txs_verification_data.clone() {
|
||||||
insert_ring_member_ids(&tx_v_data.tx.prefix.inputs, &mut output_ids)
|
insert_ring_member_ids(&tx_v_data.tx.prefix().inputs, &mut output_ids)
|
||||||
.map_err(ConsensusError::Transaction)?;
|
.map_err(ConsensusError::Transaction)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,14 +179,14 @@ pub async fn batch_get_ring_member_info<D: Database>(
|
||||||
|
|
||||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||||
|amt, idx| outputs.get(&amt)?.get(&idx).copied(),
|
|amt, idx| outputs.get(&amt)?.get(&idx).copied(),
|
||||||
&tx_v_data.tx.prefix.inputs,
|
&tx_v_data.tx.prefix().inputs,
|
||||||
)
|
)
|
||||||
.map_err(ConsensusError::Transaction)?;
|
.map_err(ConsensusError::Transaction)?;
|
||||||
|
|
||||||
let decoy_info = if hf != &HardFork::V1 {
|
let decoy_info = if hf != &HardFork::V1 {
|
||||||
// this data is only needed after hard-fork 1.
|
// this data is only needed after hard-fork 1.
|
||||||
Some(
|
Some(
|
||||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf)
|
DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf)
|
||||||
.map_err(ConsensusError::Transaction)?,
|
.map_err(ConsensusError::Transaction)?,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
|
@ -222,7 +222,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||||
let unique_input_amounts = txs_verification_data
|
let unique_input_amounts = txs_verification_data
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|tx_info| {
|
.flat_map(|tx_info| {
|
||||||
tx_info.tx.prefix.inputs.iter().map(|input| match input {
|
tx_info.tx.prefix().inputs.iter().map(|input| match input {
|
||||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||||
_ => 0,
|
_ => 0,
|
||||||
})
|
})
|
||||||
|
@ -247,7 +247,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||||
|
|
||||||
Ok(txs_verification_data.iter().map(move |tx_v_data| {
|
Ok(txs_verification_data.iter().map(move |tx_v_data| {
|
||||||
DecoyInfo::new(
|
DecoyInfo::new(
|
||||||
&tx_v_data.tx.prefix.inputs,
|
&tx_v_data.tx.prefix().inputs,
|
||||||
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
||||||
&hf,
|
&hf,
|
||||||
)
|
)
|
||||||
|
|
64
consensus/src/transactions/free.rs
Normal file
64
consensus/src/transactions/free.rs
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
use monero_serai::{
|
||||||
|
ringct::{bulletproofs::Bulletproof, RctType},
|
||||||
|
transaction::{Input, Transaction},
|
||||||
|
};
|
||||||
|
|
||||||
|
use cuprate_consensus_rules::transactions::TransactionError;
|
||||||
|
|
||||||
|
/// Calculates the weight of a [`Transaction`].
|
||||||
|
///
|
||||||
|
/// This is more efficient that [`Transaction::weight`] if you already have the transaction blob.
|
||||||
|
pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize {
|
||||||
|
// the tx weight is only different from the blobs length for bp(+) txs.
|
||||||
|
|
||||||
|
match &tx {
|
||||||
|
Transaction::V1 { .. } | Transaction::V2 { proofs: None, .. } => tx_blob.len(),
|
||||||
|
Transaction::V2 {
|
||||||
|
proofs: Some(proofs),
|
||||||
|
..
|
||||||
|
} => match proofs.rct_type() {
|
||||||
|
RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => tx_blob.len(),
|
||||||
|
RctType::MlsagBulletproofs
|
||||||
|
| RctType::MlsagBulletproofsCompactAmount
|
||||||
|
| RctType::ClsagBulletproof => {
|
||||||
|
tx_blob.len()
|
||||||
|
+ Bulletproof::calculate_bp_clawback(false, tx.prefix().outputs.len()).0
|
||||||
|
}
|
||||||
|
RctType::ClsagBulletproofPlus => {
|
||||||
|
tx_blob.len()
|
||||||
|
+ Bulletproof::calculate_bp_clawback(true, tx.prefix().outputs.len()).0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculates the fee of the [`Transaction`].
|
||||||
|
pub fn tx_fee(tx: &Transaction) -> Result<u64, TransactionError> {
|
||||||
|
let mut fee = 0_u64;
|
||||||
|
|
||||||
|
match &tx {
|
||||||
|
Transaction::V1 { prefix, .. } => {
|
||||||
|
for input in &prefix.inputs {
|
||||||
|
if let Input::ToKey { amount, .. } = input {
|
||||||
|
fee = fee
|
||||||
|
.checked_add(amount.unwrap_or(0))
|
||||||
|
.ok_or(TransactionError::InputsOverflow)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for output in &prefix.outputs {
|
||||||
|
fee.checked_sub(output.amount.unwrap_or(0))
|
||||||
|
.ok_or(TransactionError::OutputsTooHigh)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Transaction::V2 { proofs, .. } => {
|
||||||
|
fee = proofs
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(TransactionError::TransactionVersionInvalid)?
|
||||||
|
.base
|
||||||
|
.fee;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(fee)
|
||||||
|
}
|
|
@ -260,7 +260,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
|
|
||||||
fn take_random_white_peer(
|
fn take_random_white_peer(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
||||||
tracing::debug!("Retrieving random white peer");
|
tracing::debug!("Retrieving random white peer");
|
||||||
self.white_list
|
self.white_list
|
||||||
|
@ -269,7 +269,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
|
|
||||||
fn take_random_gray_peer(
|
fn take_random_gray_peer(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
||||||
tracing::debug!("Retrieving random gray peer");
|
tracing::debug!("Retrieving random gray peer");
|
||||||
self.gray_list
|
self.gray_list
|
||||||
|
|
|
@ -88,7 +88,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
pub fn take_random_peer<R: Rng>(
|
pub fn take_random_peer<R: Rng>(
|
||||||
&mut self,
|
&mut self,
|
||||||
r: &mut R,
|
r: &mut R,
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
must_keep_peers: &HashSet<Z::Addr>,
|
must_keep_peers: &HashSet<Z::Addr>,
|
||||||
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
||||||
// Take a random peer and see if it's in the list of must_keep_peers, if it is try again.
|
// Take a random peer and see if it's in the list of must_keep_peers, if it is try again.
|
||||||
|
|
|
@ -14,7 +14,7 @@ pub enum PeerSyncRequest<N: NetworkZone> {
|
||||||
/// claim to have a higher cumulative difficulty.
|
/// claim to have a higher cumulative difficulty.
|
||||||
PeersToSyncFrom {
|
PeersToSyncFrom {
|
||||||
current_cumulative_difficulty: u128,
|
current_cumulative_difficulty: u128,
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
},
|
},
|
||||||
/// Add/update a peer's core sync data.
|
/// Add/update a peer's core sync data.
|
||||||
IncomingCoreSyncData(InternalPeerID<N::Addr>, ConnectionHandle, CoreSyncData),
|
IncomingCoreSyncData(InternalPeerID<N::Addr>, ConnectionHandle, CoreSyncData),
|
||||||
|
@ -115,18 +115,18 @@ pub enum AddressBookRequest<Z: NetworkZone> {
|
||||||
/// Takes a random white peer from the peer list. If height is specified
|
/// Takes a random white peer from the peer list. If height is specified
|
||||||
/// then the peer list should retrieve a peer that should have a full
|
/// then the peer list should retrieve a peer that should have a full
|
||||||
/// block at that height according to it's pruning seed
|
/// block at that height according to it's pruning seed
|
||||||
TakeRandomWhitePeer { height: Option<u64> },
|
TakeRandomWhitePeer { height: Option<usize> },
|
||||||
/// Takes a random gray peer from the peer list. If height is specified
|
/// Takes a random gray peer from the peer list. If height is specified
|
||||||
/// then the peer list should retrieve a peer that should have a full
|
/// then the peer list should retrieve a peer that should have a full
|
||||||
/// block at that height according to it's pruning seed
|
/// block at that height according to it's pruning seed
|
||||||
TakeRandomGrayPeer { height: Option<u64> },
|
TakeRandomGrayPeer { height: Option<usize> },
|
||||||
/// Takes a random peer from the peer list. If height is specified
|
/// Takes a random peer from the peer list. If height is specified
|
||||||
/// then the peer list should retrieve a peer that should have a full
|
/// then the peer list should retrieve a peer that should have a full
|
||||||
/// block at that height according to it's pruning seed.
|
/// block at that height according to it's pruning seed.
|
||||||
///
|
///
|
||||||
/// The address book will look in the white peer list first, then the gray
|
/// The address book will look in the white peer list first, then the gray
|
||||||
/// one if no peer is found.
|
/// one if no peer is found.
|
||||||
TakeRandomPeer { height: Option<u64> },
|
TakeRandomPeer { height: Option<usize> },
|
||||||
/// Gets the specified number of white peers, or less if we don't have enough.
|
/// Gets the specified number of white peers, or less if we don't have enough.
|
||||||
GetWhitePeers(usize),
|
GetWhitePeers(usize),
|
||||||
/// Checks if the given peer is banned.
|
/// Checks if the given peer is banned.
|
||||||
|
|
|
@ -121,7 +121,7 @@ pub enum ChainSvcResponse {
|
||||||
/// The response for [`ChainSvcRequest::FindFirstUnknown`].
|
/// The response for [`ChainSvcRequest::FindFirstUnknown`].
|
||||||
///
|
///
|
||||||
/// Contains the index of the first unknown block and its expected height.
|
/// Contains the index of the first unknown block and its expected height.
|
||||||
FindFirstUnknown(Option<(usize, u64)>),
|
FindFirstUnknown(Option<(usize, usize)>),
|
||||||
/// The response for [`ChainSvcRequest::CumulativeDifficulty`].
|
/// The response for [`ChainSvcRequest::CumulativeDifficulty`].
|
||||||
///
|
///
|
||||||
/// The current cumulative difficulty of our chain.
|
/// The current cumulative difficulty of our chain.
|
||||||
|
@ -207,7 +207,7 @@ struct BlockDownloader<N: NetworkZone, S, C> {
|
||||||
/// The amount of blocks to request in the next batch.
|
/// The amount of blocks to request in the next batch.
|
||||||
amount_of_blocks_to_request: usize,
|
amount_of_blocks_to_request: usize,
|
||||||
/// The height at which [`Self::amount_of_blocks_to_request`] was updated.
|
/// The height at which [`Self::amount_of_blocks_to_request`] was updated.
|
||||||
amount_of_blocks_to_request_updated_at: u64,
|
amount_of_blocks_to_request_updated_at: usize,
|
||||||
|
|
||||||
/// The amount of consecutive empty chain entries we received.
|
/// The amount of consecutive empty chain entries we received.
|
||||||
///
|
///
|
||||||
|
@ -225,12 +225,12 @@ struct BlockDownloader<N: NetworkZone, S, C> {
|
||||||
/// The current inflight requests.
|
/// The current inflight requests.
|
||||||
///
|
///
|
||||||
/// This is a map of batch start heights to block IDs and related information of the batch.
|
/// This is a map of batch start heights to block IDs and related information of the batch.
|
||||||
inflight_requests: BTreeMap<u64, BlocksToRetrieve<N>>,
|
inflight_requests: BTreeMap<usize, BlocksToRetrieve<N>>,
|
||||||
|
|
||||||
/// A queue of start heights from failed batches that should be retried.
|
/// A queue of start heights from failed batches that should be retried.
|
||||||
///
|
///
|
||||||
/// Wrapped in [`Reverse`] so we prioritize early batches.
|
/// Wrapped in [`Reverse`] so we prioritize early batches.
|
||||||
failed_batches: BinaryHeap<Reverse<u64>>,
|
failed_batches: BinaryHeap<Reverse<usize>>,
|
||||||
|
|
||||||
block_queue: BlockQueue,
|
block_queue: BlockQueue,
|
||||||
|
|
||||||
|
@ -524,7 +524,7 @@ where
|
||||||
/// Handles a response to a request to get blocks from a peer.
|
/// Handles a response to a request to get blocks from a peer.
|
||||||
async fn handle_download_batch_res(
|
async fn handle_download_batch_res(
|
||||||
&mut self,
|
&mut self,
|
||||||
start_height: u64,
|
start_height: usize,
|
||||||
res: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
res: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||||
chain_tracker: &mut ChainTracker<N>,
|
chain_tracker: &mut ChainTracker<N>,
|
||||||
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>,
|
pending_peers: &mut BTreeMap<PruningSeed, Vec<ClientPoolDropGuard<N>>>,
|
||||||
|
@ -692,18 +692,19 @@ where
|
||||||
/// The return value from the block download tasks.
|
/// The return value from the block download tasks.
|
||||||
struct BlockDownloadTaskResponse<N: NetworkZone> {
|
struct BlockDownloadTaskResponse<N: NetworkZone> {
|
||||||
/// The start height of the batch.
|
/// The start height of the batch.
|
||||||
start_height: u64,
|
start_height: usize,
|
||||||
/// A result containing the batch or an error.
|
/// A result containing the batch or an error.
|
||||||
result: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
result: Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`].
|
/// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`].
|
||||||
fn client_has_block_in_range(pruning_seed: &PruningSeed, start_height: u64, length: usize) -> bool {
|
fn client_has_block_in_range(
|
||||||
|
pruning_seed: &PruningSeed,
|
||||||
|
start_height: usize,
|
||||||
|
length: usize,
|
||||||
|
) -> bool {
|
||||||
pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||||
&& pruning_seed.has_full_block(
|
&& pruning_seed.has_full_block(start_height + length, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||||
start_height + u64::try_from(length).unwrap(),
|
|
||||||
CRYPTONOTE_MAX_BLOCK_HEIGHT,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculates the next amount of blocks to request in a batch.
|
/// Calculates the next amount of blocks to request in a batch.
|
||||||
|
|
|
@ -15,7 +15,7 @@ use super::{BlockBatch, BlockDownloadError};
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ReadyQueueBatch {
|
pub struct ReadyQueueBatch {
|
||||||
/// The start height of the batch.
|
/// The start height of the batch.
|
||||||
pub start_height: u64,
|
pub start_height: usize,
|
||||||
/// The batch of blocks.
|
/// The batch of blocks.
|
||||||
pub block_batch: BlockBatch,
|
pub block_batch: BlockBatch,
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ impl BlockQueue {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the oldest batch that has not been put in the [`async_buffer`] yet.
|
/// Returns the oldest batch that has not been put in the [`async_buffer`] yet.
|
||||||
pub fn oldest_ready_batch(&self) -> Option<u64> {
|
pub fn oldest_ready_batch(&self) -> Option<usize> {
|
||||||
self.ready_batches.peek().map(|batch| batch.start_height)
|
self.ready_batches.peek().map(|batch| batch.start_height)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,13 +80,13 @@ impl BlockQueue {
|
||||||
pub async fn add_incoming_batch(
|
pub async fn add_incoming_batch(
|
||||||
&mut self,
|
&mut self,
|
||||||
new_batch: ReadyQueueBatch,
|
new_batch: ReadyQueueBatch,
|
||||||
oldest_in_flight_start_height: Option<u64>,
|
oldest_in_flight_start_height: Option<usize>,
|
||||||
) -> Result<(), BlockDownloadError> {
|
) -> Result<(), BlockDownloadError> {
|
||||||
self.ready_batches_size += new_batch.block_batch.size;
|
self.ready_batches_size += new_batch.block_batch.size;
|
||||||
self.ready_batches.push(new_batch);
|
self.ready_batches.push(new_batch);
|
||||||
|
|
||||||
// The height to stop pushing batches into the buffer.
|
// The height to stop pushing batches into the buffer.
|
||||||
let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(u64::MAX);
|
let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(usize::MAX);
|
||||||
|
|
||||||
while self
|
while self
|
||||||
.ready_batches
|
.ready_batches
|
||||||
|
@ -124,14 +124,14 @@ mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
prop_compose! {
|
prop_compose! {
|
||||||
fn ready_batch_strategy()(start_height in 0_u64..500_000_000) -> ReadyQueueBatch {
|
fn ready_batch_strategy()(start_height in 0_usize..500_000_000) -> ReadyQueueBatch {
|
||||||
let (_, peer_handle) = HandleBuilder::new().build();
|
let (_, peer_handle) = HandleBuilder::new().build();
|
||||||
|
|
||||||
ReadyQueueBatch {
|
ReadyQueueBatch {
|
||||||
start_height,
|
start_height,
|
||||||
block_batch: BlockBatch {
|
block_batch: BlockBatch {
|
||||||
blocks: vec![],
|
blocks: vec![],
|
||||||
size: start_height as usize,
|
size: start_height,
|
||||||
peer_handle,
|
peer_handle,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ pub struct BlocksToRetrieve<N: NetworkZone> {
|
||||||
/// The hash of the last block before this batch.
|
/// The hash of the last block before this batch.
|
||||||
pub prev_id: [u8; 32],
|
pub prev_id: [u8; 32],
|
||||||
/// The expected height of the first block in [`BlocksToRetrieve::ids`].
|
/// The expected height of the first block in [`BlocksToRetrieve::ids`].
|
||||||
pub start_height: u64,
|
pub start_height: usize,
|
||||||
/// The peer who told us about this batch.
|
/// The peer who told us about this batch.
|
||||||
pub peer_who_told_us: InternalPeerID<N::Addr>,
|
pub peer_who_told_us: InternalPeerID<N::Addr>,
|
||||||
/// The peer who told us about this batch's handle.
|
/// The peer who told us about this batch's handle.
|
||||||
|
@ -54,7 +54,7 @@ pub struct ChainTracker<N: NetworkZone> {
|
||||||
/// A list of [`ChainEntry`]s, in order.
|
/// A list of [`ChainEntry`]s, in order.
|
||||||
entries: VecDeque<ChainEntry<N>>,
|
entries: VecDeque<ChainEntry<N>>,
|
||||||
/// The height of the first block, in the first entry in [`Self::entries`].
|
/// The height of the first block, in the first entry in [`Self::entries`].
|
||||||
first_height: u64,
|
first_height: usize,
|
||||||
/// The hash of the last block in the last entry.
|
/// The hash of the last block in the last entry.
|
||||||
top_seen_hash: [u8; 32],
|
top_seen_hash: [u8; 32],
|
||||||
/// The hash of the block one below [`Self::first_height`].
|
/// The hash of the block one below [`Self::first_height`].
|
||||||
|
@ -67,7 +67,7 @@ impl<N: NetworkZone> ChainTracker<N> {
|
||||||
/// Creates a new chain tracker.
|
/// Creates a new chain tracker.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
new_entry: ChainEntry<N>,
|
new_entry: ChainEntry<N>,
|
||||||
first_height: u64,
|
first_height: usize,
|
||||||
our_genesis: [u8; 32],
|
our_genesis: [u8; 32],
|
||||||
previous_hash: [u8; 32],
|
previous_hash: [u8; 32],
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
@ -96,14 +96,14 @@ impl<N: NetworkZone> ChainTracker<N> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the height of the highest block we are tracking.
|
/// Returns the height of the highest block we are tracking.
|
||||||
pub fn top_height(&self) -> u64 {
|
pub fn top_height(&self) -> usize {
|
||||||
let top_block_idx = self
|
let top_block_idx = self
|
||||||
.entries
|
.entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|entry| entry.ids.len())
|
.map(|entry| entry.ids.len())
|
||||||
.sum::<usize>();
|
.sum::<usize>();
|
||||||
|
|
||||||
self.first_height + u64::try_from(top_block_idx).unwrap()
|
self.first_height + top_block_idx
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total number of queued batches for a certain `batch_size`.
|
/// Returns the total number of queued batches for a certain `batch_size`.
|
||||||
|
@ -171,15 +171,12 @@ impl<N: NetworkZone> ChainTracker<N> {
|
||||||
// - index of the next pruned block for this seed
|
// - index of the next pruned block for this seed
|
||||||
let end_idx = min(
|
let end_idx = min(
|
||||||
min(entry.ids.len(), max_blocks),
|
min(entry.ids.len(), max_blocks),
|
||||||
usize::try_from(
|
|
||||||
pruning_seed
|
pruning_seed
|
||||||
.get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
.get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||||
.expect("We use local values to calculate height which should be below the sanity limit")
|
.expect("We use local values to calculate height which should be below the sanity limit")
|
||||||
// Use a big value as a fallback if the seed does no pruning.
|
// Use a big value as a fallback if the seed does no pruning.
|
||||||
.unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
.unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT)
|
||||||
- self.first_height,
|
- self.first_height,
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if end_idx == 0 {
|
if end_idx == 0 {
|
||||||
|
@ -198,7 +195,7 @@ impl<N: NetworkZone> ChainTracker<N> {
|
||||||
failures: 0,
|
failures: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.first_height += u64::try_from(end_idx).unwrap();
|
self.first_height += end_idx;
|
||||||
// TODO: improve ByteArrayVec API.
|
// TODO: improve ByteArrayVec API.
|
||||||
self.previous_hash = blocks.ids[blocks.ids.len() - 1];
|
self.previous_hash = blocks.ids[blocks.ids.len() - 1];
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ pub async fn download_batch_task<N: NetworkZone>(
|
||||||
client: ClientPoolDropGuard<N>,
|
client: ClientPoolDropGuard<N>,
|
||||||
ids: ByteArrayVec<32>,
|
ids: ByteArrayVec<32>,
|
||||||
previous_id: [u8; 32],
|
previous_id: [u8; 32],
|
||||||
expected_start_height: u64,
|
expected_start_height: usize,
|
||||||
_attempt: usize,
|
_attempt: usize,
|
||||||
) -> BlockDownloadTaskResponse<N> {
|
) -> BlockDownloadTaskResponse<N> {
|
||||||
BlockDownloadTaskResponse {
|
BlockDownloadTaskResponse {
|
||||||
|
@ -51,7 +51,7 @@ async fn request_batch_from_peer<N: NetworkZone>(
|
||||||
mut client: ClientPoolDropGuard<N>,
|
mut client: ClientPoolDropGuard<N>,
|
||||||
ids: ByteArrayVec<32>,
|
ids: ByteArrayVec<32>,
|
||||||
previous_id: [u8; 32],
|
previous_id: [u8; 32],
|
||||||
expected_start_height: u64,
|
expected_start_height: usize,
|
||||||
) -> Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError> {
|
) -> Result<(ClientPoolDropGuard<N>, BlockBatch), BlockDownloadError> {
|
||||||
let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest {
|
let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest {
|
||||||
blocks: ids.clone(),
|
blocks: ids.clone(),
|
||||||
|
@ -105,7 +105,7 @@ async fn request_batch_from_peer<N: NetworkZone>(
|
||||||
|
|
||||||
fn deserialize_batch(
|
fn deserialize_batch(
|
||||||
blocks_response: GetObjectsResponse,
|
blocks_response: GetObjectsResponse,
|
||||||
expected_start_height: u64,
|
expected_start_height: usize,
|
||||||
requested_ids: ByteArrayVec<32>,
|
requested_ids: ByteArrayVec<32>,
|
||||||
previous_id: [u8; 32],
|
previous_id: [u8; 32],
|
||||||
peer_handle: ConnectionHandle,
|
peer_handle: ConnectionHandle,
|
||||||
|
@ -115,7 +115,7 @@ fn deserialize_batch(
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, block_entry)| {
|
.map(|(i, block_entry)| {
|
||||||
let expected_height = u64::try_from(i).unwrap() + expected_start_height;
|
let expected_height = i + expected_start_height;
|
||||||
|
|
||||||
let mut size = block_entry.block.len();
|
let mut size = block_entry.block.len();
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ fn deserialize_batch(
|
||||||
let block_hash = block.hash();
|
let block_hash = block.hash();
|
||||||
|
|
||||||
// Check the block matches the one requested and the peer sent enough transactions.
|
// Check the block matches the one requested and the peer sent enough transactions.
|
||||||
if requested_ids[i] != block_hash || block.txs.len() != block_entry.txs.len() {
|
if requested_ids[i] != block_hash || block.transactions.len() != block_entry.txs.len() {
|
||||||
return Err(BlockDownloadError::PeersResponseWasInvalid);
|
return Err(BlockDownloadError::PeersResponseWasInvalid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ fn deserialize_batch(
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
// Make sure the transactions in the block were the ones the peer sent.
|
// Make sure the transactions in the block were the ones the peer sent.
|
||||||
let mut expected_txs = block.txs.iter().collect::<HashSet<_>>();
|
let mut expected_txs = block.transactions.iter().collect::<HashSet<_>>();
|
||||||
|
|
||||||
for tx in &txs {
|
for tx in &txs {
|
||||||
if !expected_txs.remove(&tx.hash()) {
|
if !expected_txs.remove(&tx.hash()) {
|
||||||
|
|
|
@ -11,7 +11,6 @@ use futures::{FutureExt, StreamExt};
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use monero_serai::{
|
use monero_serai::{
|
||||||
block::{Block, BlockHeader},
|
block::{Block, BlockHeader},
|
||||||
ringct::{RctBase, RctPrunable, RctSignatures},
|
|
||||||
transaction::{Input, Timelock, Transaction, TransactionPrefix},
|
transaction::{Input, Timelock, Transaction, TransactionPrefix},
|
||||||
};
|
};
|
||||||
use proptest::{collection::vec, prelude::*};
|
use proptest::{collection::vec, prelude::*};
|
||||||
|
@ -90,30 +89,20 @@ proptest! {
|
||||||
|
|
||||||
prop_compose! {
|
prop_compose! {
|
||||||
/// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader.
|
/// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader.
|
||||||
fn dummy_transaction_stragtegy(height: u64)
|
fn dummy_transaction_stragtegy(height: usize)
|
||||||
(
|
(
|
||||||
extra in vec(any::<u8>(), 0..1_000),
|
extra in vec(any::<u8>(), 0..1_000),
|
||||||
timelock in 1_usize..50_000_000,
|
timelock in 1_usize..50_000_000,
|
||||||
)
|
)
|
||||||
-> Transaction {
|
-> Transaction {
|
||||||
Transaction {
|
Transaction::V1 {
|
||||||
prefix: TransactionPrefix {
|
prefix: TransactionPrefix {
|
||||||
version: 1,
|
additional_timelock: Timelock::Block(timelock),
|
||||||
timelock: Timelock::Block(timelock),
|
|
||||||
inputs: vec![Input::Gen(height)],
|
inputs: vec![Input::Gen(height)],
|
||||||
outputs: vec![],
|
outputs: vec![],
|
||||||
extra,
|
extra,
|
||||||
},
|
},
|
||||||
signatures: vec![],
|
signatures: vec![],
|
||||||
rct_signatures: RctSignatures {
|
|
||||||
base: RctBase {
|
|
||||||
fee: 0,
|
|
||||||
pseudo_outs: vec![],
|
|
||||||
encrypted_amounts: vec![],
|
|
||||||
commitments: vec![],
|
|
||||||
},
|
|
||||||
prunable: RctPrunable::Null
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,25 +110,25 @@ prop_compose! {
|
||||||
prop_compose! {
|
prop_compose! {
|
||||||
/// Returns a strategy to generate a [`Block`] that is valid for the block downloader.
|
/// Returns a strategy to generate a [`Block`] that is valid for the block downloader.
|
||||||
fn dummy_block_stragtegy(
|
fn dummy_block_stragtegy(
|
||||||
height: u64,
|
height: usize,
|
||||||
previous: [u8; 32],
|
previous: [u8; 32],
|
||||||
)
|
)
|
||||||
(
|
(
|
||||||
miner_tx in dummy_transaction_stragtegy(height),
|
miner_transaction in dummy_transaction_stragtegy(height),
|
||||||
txs in vec(dummy_transaction_stragtegy(height), 0..25)
|
txs in vec(dummy_transaction_stragtegy(height), 0..25)
|
||||||
)
|
)
|
||||||
-> (Block, Vec<Transaction>) {
|
-> (Block, Vec<Transaction>) {
|
||||||
(
|
(
|
||||||
Block {
|
Block {
|
||||||
header: BlockHeader {
|
header: BlockHeader {
|
||||||
major_version: 0,
|
hardfork_version: 0,
|
||||||
minor_version: 0,
|
hardfork_signal: 0,
|
||||||
timestamp: 0,
|
timestamp: 0,
|
||||||
previous,
|
previous,
|
||||||
nonce: 0,
|
nonce: 0,
|
||||||
},
|
},
|
||||||
miner_tx,
|
miner_transaction,
|
||||||
txs: txs.iter().map(Transaction::hash).collect(),
|
transactions: txs.iter().map(Transaction::hash).collect(),
|
||||||
},
|
},
|
||||||
txs
|
txs
|
||||||
)
|
)
|
||||||
|
@ -167,7 +156,7 @@ prop_compose! {
|
||||||
for (height, mut block) in blocks.into_iter().enumerate() {
|
for (height, mut block) in blocks.into_iter().enumerate() {
|
||||||
if let Some(last) = blockchain.last() {
|
if let Some(last) = blockchain.last() {
|
||||||
block.0.header.previous = *last.0;
|
block.0.header.previous = *last.0;
|
||||||
block.0.miner_tx.prefix.inputs = vec![Input::Gen(height as u64)]
|
block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)]
|
||||||
}
|
}
|
||||||
|
|
||||||
blockchain.insert(block.0.hash(), block);
|
blockchain.insert(block.0.hash(), block);
|
||||||
|
|
|
@ -38,7 +38,7 @@ enum OutboundConnectorError {
|
||||||
/// set needs specific data that none of the currently connected peers have.
|
/// set needs specific data that none of the currently connected peers have.
|
||||||
pub struct MakeConnectionRequest {
|
pub struct MakeConnectionRequest {
|
||||||
/// The block needed that no connected peers have due to pruning.
|
/// The block needed that no connected peers have due to pruning.
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The outbound connection count keeper.
|
/// The outbound connection count keeper.
|
||||||
|
|
|
@ -99,7 +99,7 @@ impl<N: NetworkZone> PeerSyncSvc<N> {
|
||||||
fn peers_to_sync_from(
|
fn peers_to_sync_from(
|
||||||
&self,
|
&self,
|
||||||
current_cum_diff: u128,
|
current_cum_diff: u128,
|
||||||
block_needed: Option<u64>,
|
block_needed: Option<usize>,
|
||||||
) -> Vec<InternalPeerID<N::Addr>> {
|
) -> Vec<InternalPeerID<N::Addr>> {
|
||||||
self.cumulative_difficulties
|
self.cumulative_difficulties
|
||||||
.range((current_cum_diff + 1)..)
|
.range((current_cum_diff + 1)..)
|
||||||
|
|
|
@ -22,13 +22,13 @@ use std::cmp::Ordering;
|
||||||
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: u64 = 500000000;
|
pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: usize = 500000000;
|
||||||
/// The default log stripes for Monero pruning.
|
/// The default log stripes for Monero pruning.
|
||||||
pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3;
|
pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3;
|
||||||
/// The amount of blocks that peers keep before another stripe starts storing blocks.
|
/// The amount of blocks that peers keep before another stripe starts storing blocks.
|
||||||
pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: u64 = 4096;
|
pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: usize = 4096;
|
||||||
/// The amount of blocks from the top of the chain that should not be pruned.
|
/// The amount of blocks from the top of the chain that should not be pruned.
|
||||||
pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: u64 = 5500;
|
pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: usize = 5500;
|
||||||
|
|
||||||
const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7;
|
const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7;
|
||||||
const PRUNING_SEED_STRIPE_SHIFT: u32 = 0;
|
const PRUNING_SEED_STRIPE_SHIFT: u32 = 0;
|
||||||
|
@ -127,7 +127,7 @@ impl PruningSeed {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
|
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
|
||||||
pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
|
pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool {
|
||||||
match self {
|
match self {
|
||||||
PruningSeed::NotPruned => true,
|
PruningSeed::NotPruned => true,
|
||||||
PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height),
|
PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height),
|
||||||
|
@ -151,9 +151,9 @@ impl PruningSeed {
|
||||||
/// This function will also error if `block_height` > `blockchain_height`
|
/// This function will also error if `block_height` > `blockchain_height`
|
||||||
pub fn get_next_pruned_block(
|
pub fn get_next_pruned_block(
|
||||||
&self,
|
&self,
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
blockchain_height: u64,
|
blockchain_height: usize,
|
||||||
) -> Result<Option<u64>, PruningError> {
|
) -> Result<Option<usize>, PruningError> {
|
||||||
Ok(match self {
|
Ok(match self {
|
||||||
PruningSeed::NotPruned => None,
|
PruningSeed::NotPruned => None,
|
||||||
PruningSeed::Pruned(seed) => {
|
PruningSeed::Pruned(seed) => {
|
||||||
|
@ -177,9 +177,9 @@ impl PruningSeed {
|
||||||
///
|
///
|
||||||
pub fn get_next_unpruned_block(
|
pub fn get_next_unpruned_block(
|
||||||
&self,
|
&self,
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
blockchain_height: u64,
|
blockchain_height: usize,
|
||||||
) -> Result<u64, PruningError> {
|
) -> Result<usize, PruningError> {
|
||||||
Ok(match self {
|
Ok(match self {
|
||||||
PruningSeed::NotPruned => block_height,
|
PruningSeed::NotPruned => block_height,
|
||||||
PruningSeed::Pruned(seed) => {
|
PruningSeed::Pruned(seed) => {
|
||||||
|
@ -312,7 +312,7 @@ impl DecompressedPruningSeed {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
|
/// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block.
|
||||||
pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool {
|
pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool {
|
||||||
match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) {
|
match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) {
|
||||||
Some(block_stripe) => self.stripe == block_stripe,
|
Some(block_stripe) => self.stripe == block_stripe,
|
||||||
None => true,
|
None => true,
|
||||||
|
@ -334,9 +334,9 @@ impl DecompressedPruningSeed {
|
||||||
///
|
///
|
||||||
pub fn get_next_unpruned_block(
|
pub fn get_next_unpruned_block(
|
||||||
&self,
|
&self,
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
blockchain_height: u64,
|
blockchain_height: usize,
|
||||||
) -> Result<u64, PruningError> {
|
) -> Result<usize, PruningError> {
|
||||||
if block_height > CRYPTONOTE_MAX_BLOCK_HEIGHT || block_height > blockchain_height {
|
if block_height > CRYPTONOTE_MAX_BLOCK_HEIGHT || block_height > blockchain_height {
|
||||||
return Err(PruningError::BlockHeightTooLarge);
|
return Err(PruningError::BlockHeightTooLarge);
|
||||||
}
|
}
|
||||||
|
@ -373,7 +373,7 @@ impl DecompressedPruningSeed {
|
||||||
|
|
||||||
// amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks
|
// amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks
|
||||||
let calculated_height = cycles_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << self.log_stripes)
|
let calculated_height = cycles_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << self.log_stripes)
|
||||||
+ (self.stripe as u64 - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE;
|
+ (self.stripe as usize - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE;
|
||||||
|
|
||||||
if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height {
|
if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height {
|
||||||
// if our calculated height is greater than the amount of tip blocks then the start of the tip blocks will be the next un-pruned
|
// if our calculated height is greater than the amount of tip blocks then the start of the tip blocks will be the next un-pruned
|
||||||
|
@ -400,9 +400,9 @@ impl DecompressedPruningSeed {
|
||||||
///
|
///
|
||||||
pub fn get_next_pruned_block(
|
pub fn get_next_pruned_block(
|
||||||
&self,
|
&self,
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
blockchain_height: u64,
|
blockchain_height: usize,
|
||||||
) -> Result<Option<u64>, PruningError> {
|
) -> Result<Option<usize>, PruningError> {
|
||||||
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
||||||
// If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
|
// If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should
|
||||||
// not prune blocks.
|
// not prune blocks.
|
||||||
|
@ -434,16 +434,16 @@ impl DecompressedPruningSeed {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_block_pruning_stripe(
|
fn get_block_pruning_stripe(
|
||||||
block_height: u64,
|
block_height: usize,
|
||||||
blockchain_height: u64,
|
blockchain_height: usize,
|
||||||
log_stripe: u32,
|
log_stripe: u32,
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(
|
Some(
|
||||||
(((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as u64 - 1)) + 1)
|
(((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as usize - 1))
|
||||||
as u32, // it's trivial to prove it's ok to us `as` here
|
+ 1) as u32, // it's trivial to prove it's ok to us `as` here
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -503,7 +503,7 @@ mod tests {
|
||||||
for i in 0_u32..8 {
|
for i in 0_u32..8 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_block_pruning_stripe(
|
get_block_pruning_stripe(
|
||||||
(i * 4096) as u64,
|
(i * 4096) as usize,
|
||||||
blockchain_height,
|
blockchain_height,
|
||||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||||
)
|
)
|
||||||
|
@ -515,7 +515,7 @@ mod tests {
|
||||||
for i in 0_u32..8 {
|
for i in 0_u32..8 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_block_pruning_stripe(
|
get_block_pruning_stripe(
|
||||||
32768 + (i * 4096) as u64,
|
32768 + (i * 4096) as usize,
|
||||||
blockchain_height,
|
blockchain_height,
|
||||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||||
)
|
)
|
||||||
|
@ -527,7 +527,7 @@ mod tests {
|
||||||
for i in 1_u32..8 {
|
for i in 1_u32..8 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_block_pruning_stripe(
|
get_block_pruning_stripe(
|
||||||
32767 + (i * 4096) as u64,
|
32767 + (i * 4096) as usize,
|
||||||
blockchain_height,
|
blockchain_height,
|
||||||
CRYPTONOTE_PRUNING_LOG_STRIPES
|
CRYPTONOTE_PRUNING_LOG_STRIPES
|
||||||
)
|
)
|
||||||
|
@ -553,23 +553,23 @@ mod tests {
|
||||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
seed.get_next_unpruned_block(0, blockchain_height).unwrap(),
|
seed.get_next_unpruned_block(0, blockchain_height).unwrap(),
|
||||||
i as u64 * 4096
|
i * 4096
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
seed.get_next_unpruned_block((i as u64 + 1) * 4096, blockchain_height)
|
seed.get_next_unpruned_block((i + 1) * 4096, blockchain_height)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
i as u64 * 4096 + 32768
|
i * 4096 + 32768
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
seed.get_next_unpruned_block((i as u64 + 8) * 4096, blockchain_height)
|
seed.get_next_unpruned_block((i + 8) * 4096, blockchain_height)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
i as u64 * 4096 + 32768
|
i * 4096 + 32768
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -610,19 +610,19 @@ mod tests {
|
||||||
|
|
||||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
seed.get_next_pruned_block((i as u64 + 1) * 4096, blockchain_height)
|
seed.get_next_pruned_block((i + 1) * 4096, blockchain_height)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
(i as u64 + 1) * 4096
|
(i + 1) * 4096
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
for (i, seed) in all_valid_seeds.iter().enumerate() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
seed.get_next_pruned_block((i as u64 + 8) * 4096, blockchain_height)
|
seed.get_next_pruned_block((i + 8) * 4096, blockchain_height)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
(i as u64 + 9) * 4096
|
(i + 9) * 4096
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,17 +65,17 @@ pub fn add_block(
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(block.block.serialize(), block.block_blob);
|
assert_eq!(block.block.serialize(), block.block_blob);
|
||||||
assert_eq!(block.block.txs.len(), block.txs.len());
|
assert_eq!(block.block.transactions.len(), block.txs.len());
|
||||||
for (i, tx) in block.txs.iter().enumerate() {
|
for (i, tx) in block.txs.iter().enumerate() {
|
||||||
assert_eq!(tx.tx_blob, tx.tx.serialize());
|
assert_eq!(tx.tx_blob, tx.tx.serialize());
|
||||||
assert_eq!(tx.tx_hash, block.block.txs[i]);
|
assert_eq!(tx.tx_hash, block.block.transactions[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------ Transaction / Outputs / Key Images
|
//------------------------------------------------------ Transaction / Outputs / Key Images
|
||||||
// Add the miner transaction first.
|
// Add the miner transaction first.
|
||||||
{
|
{
|
||||||
let tx = &block.block.miner_tx;
|
let tx = &block.block.miner_transaction;
|
||||||
add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?;
|
add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,8 +154,8 @@ pub fn pop_block(
|
||||||
let block = Block::read(&mut block_blob.as_slice())?;
|
let block = Block::read(&mut block_blob.as_slice())?;
|
||||||
|
|
||||||
//------------------------------------------------------ Transaction / Outputs / Key Images
|
//------------------------------------------------------ Transaction / Outputs / Key Images
|
||||||
remove_tx(&block.miner_tx.hash(), tables)?;
|
remove_tx(&block.miner_transaction.hash(), tables)?;
|
||||||
for tx_hash in &block.txs {
|
for tx_hash in &block.transactions {
|
||||||
remove_tx(tx_hash, tables)?;
|
remove_tx(tx_hash, tables)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,8 +200,8 @@ pub fn get_block_extended_header_from_height(
|
||||||
#[allow(clippy::cast_possible_truncation)]
|
#[allow(clippy::cast_possible_truncation)]
|
||||||
Ok(ExtendedBlockHeader {
|
Ok(ExtendedBlockHeader {
|
||||||
cumulative_difficulty,
|
cumulative_difficulty,
|
||||||
version: block.header.major_version,
|
version: block.header.hardfork_version,
|
||||||
vote: block.header.minor_version,
|
vote: block.header.hardfork_signal,
|
||||||
timestamp: block.header.timestamp,
|
timestamp: block.header.timestamp,
|
||||||
block_weight: block_info.weight as usize,
|
block_weight: block_info.weight as usize,
|
||||||
long_term_weight: block_info.long_term_weight as usize,
|
long_term_weight: block_info.long_term_weight as usize,
|
||||||
|
@ -297,7 +297,7 @@ mod test {
|
||||||
// HACK: `add_block()` asserts blocks with non-sequential heights
|
// HACK: `add_block()` asserts blocks with non-sequential heights
|
||||||
// cannot be added, to get around this, manually edit the block height.
|
// cannot be added, to get around this, manually edit the block height.
|
||||||
for (height, block) in blocks.iter_mut().enumerate() {
|
for (height, block) in blocks.iter_mut().enumerate() {
|
||||||
block.height = height as u64;
|
block.height = height;
|
||||||
assert_eq!(block.block.serialize(), block.block_blob);
|
assert_eq!(block.block.serialize(), block.block_blob);
|
||||||
}
|
}
|
||||||
let generated_coins_sum = blocks
|
let generated_coins_sum = blocks
|
||||||
|
@ -369,8 +369,8 @@ mod test {
|
||||||
let b1 = block_header_from_hash;
|
let b1 = block_header_from_hash;
|
||||||
let b2 = block;
|
let b2 = block;
|
||||||
assert_eq!(b1, block_header_from_height);
|
assert_eq!(b1, block_header_from_height);
|
||||||
assert_eq!(b1.version, b2.block.header.major_version);
|
assert_eq!(b1.version, b2.block.header.hardfork_version);
|
||||||
assert_eq!(b1.vote, b2.block.header.minor_version);
|
assert_eq!(b1.vote, b2.block.header.hardfork_signal);
|
||||||
assert_eq!(b1.timestamp, b2.block.header.timestamp);
|
assert_eq!(b1.timestamp, b2.block.header.timestamp);
|
||||||
assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty);
|
assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty);
|
||||||
assert_eq!(b1.block_weight, b2.weight);
|
assert_eq!(b1.block_weight, b2.weight);
|
||||||
|
@ -388,7 +388,7 @@ mod test {
|
||||||
|
|
||||||
assert_eq!(tx.tx_blob, tx2.serialize());
|
assert_eq!(tx.tx_blob, tx2.serialize());
|
||||||
assert_eq!(tx.tx_weight, tx2.weight());
|
assert_eq!(tx.tx_weight, tx2.weight());
|
||||||
assert_eq!(tx.tx_hash, block.block.txs[i]);
|
assert_eq!(tx.tx_hash, block.block.transactions[i]);
|
||||||
assert_eq!(tx.tx_hash, tx2.hash());
|
assert_eq!(tx.tx_hash, tx2.hash());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -440,7 +440,7 @@ mod test {
|
||||||
|
|
||||||
let mut block = block_v9_tx3().clone();
|
let mut block = block_v9_tx3().clone();
|
||||||
|
|
||||||
block.height = u64::from(u32::MAX) + 1;
|
block.height = usize::try_from(u32::MAX).unwrap() + 1;
|
||||||
add_block(&block, &mut tables).unwrap();
|
add_block(&block, &mut tables).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ use crate::{
|
||||||
pub fn chain_height(
|
pub fn chain_height(
|
||||||
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
table_block_heights: &impl DatabaseRo<BlockHeights>,
|
||||||
) -> Result<BlockHeight, RuntimeError> {
|
) -> Result<BlockHeight, RuntimeError> {
|
||||||
table_block_heights.len()
|
#[allow(clippy::cast_possible_truncation)] // we enforce 64-bit
|
||||||
|
table_block_heights.len().map(|height| height as usize)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve the height of the top block.
|
/// Retrieve the height of the top block.
|
||||||
|
@ -47,7 +48,8 @@ pub fn top_block_height(
|
||||||
) -> Result<BlockHeight, RuntimeError> {
|
) -> Result<BlockHeight, RuntimeError> {
|
||||||
match table_block_heights.len()? {
|
match table_block_heights.len()? {
|
||||||
0 => Err(RuntimeError::KeyNotFound),
|
0 => Err(RuntimeError::KeyNotFound),
|
||||||
height => Ok(height - 1),
|
#[allow(clippy::cast_possible_truncation)] // we enforce 64-bit
|
||||||
|
height => Ok(height as usize - 1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +112,7 @@ mod test {
|
||||||
block_v9_tx3().clone(),
|
block_v9_tx3().clone(),
|
||||||
block_v16_tx0().clone(),
|
block_v16_tx0().clone(),
|
||||||
];
|
];
|
||||||
let blocks_len = u64::try_from(blocks.len()).unwrap();
|
let blocks_len = blocks.len();
|
||||||
|
|
||||||
// Add blocks.
|
// Add blocks.
|
||||||
{
|
{
|
||||||
|
@ -127,7 +129,6 @@ mod test {
|
||||||
);
|
);
|
||||||
|
|
||||||
for (i, block) in blocks.iter_mut().enumerate() {
|
for (i, block) in blocks.iter_mut().enumerate() {
|
||||||
let i = u64::try_from(i).unwrap();
|
|
||||||
// HACK: `add_block()` asserts blocks with non-sequential heights
|
// HACK: `add_block()` asserts blocks with non-sequential heights
|
||||||
// cannot be added, to get around this, manually edit the block height.
|
// cannot be added, to get around this, manually edit the block height.
|
||||||
block.height = i;
|
block.height = i;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar};
|
||||||
use monero_serai::{transaction::Timelock, H};
|
use monero_serai::{generators::H, transaction::Timelock};
|
||||||
|
|
||||||
use cuprate_database::{
|
use cuprate_database::{
|
||||||
RuntimeError, {DatabaseRo, DatabaseRw},
|
RuntimeError, {DatabaseRo, DatabaseRw},
|
||||||
|
@ -157,7 +157,7 @@ pub fn output_to_output_on_chain(
|
||||||
) -> Result<OutputOnChain, RuntimeError> {
|
) -> Result<OutputOnChain, RuntimeError> {
|
||||||
// FIXME: implement lookup table for common values:
|
// FIXME: implement lookup table for common values:
|
||||||
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
||||||
let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount);
|
let commitment = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount);
|
||||||
|
|
||||||
let time_lock = if output
|
let time_lock = if output
|
||||||
.output_flags
|
.output_flags
|
||||||
|
@ -173,7 +173,7 @@ pub fn output_to_output_on_chain(
|
||||||
.unwrap_or(None);
|
.unwrap_or(None);
|
||||||
|
|
||||||
Ok(OutputOnChain {
|
Ok(OutputOnChain {
|
||||||
height: u64::from(output.height),
|
height: output.height as usize,
|
||||||
time_lock,
|
time_lock,
|
||||||
key,
|
key,
|
||||||
commitment,
|
commitment,
|
||||||
|
@ -213,7 +213,7 @@ pub fn rct_output_to_output_on_chain(
|
||||||
.unwrap_or(None);
|
.unwrap_or(None);
|
||||||
|
|
||||||
Ok(OutputOnChain {
|
Ok(OutputOnChain {
|
||||||
height: u64::from(rct_output.height),
|
height: rct_output.height as usize,
|
||||||
time_lock,
|
time_lock,
|
||||||
key,
|
key,
|
||||||
commitment,
|
commitment,
|
||||||
|
|
|
@ -68,7 +68,7 @@ pub fn add_tx(
|
||||||
// so the `u64/usize` is stored without any tag.
|
// so the `u64/usize` is stored without any tag.
|
||||||
//
|
//
|
||||||
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
|
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1558504285>
|
||||||
match tx.prefix.timelock {
|
match tx.prefix().additional_timelock {
|
||||||
Timelock::None => (),
|
Timelock::None => (),
|
||||||
Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?,
|
Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?,
|
||||||
Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?,
|
Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?,
|
||||||
|
@ -92,7 +92,7 @@ pub fn add_tx(
|
||||||
let mut miner_tx = false;
|
let mut miner_tx = false;
|
||||||
|
|
||||||
// Key images.
|
// Key images.
|
||||||
for inputs in &tx.prefix.inputs {
|
for inputs in &tx.prefix().inputs {
|
||||||
match inputs {
|
match inputs {
|
||||||
// Key images.
|
// Key images.
|
||||||
Input::ToKey { key_image, .. } => {
|
Input::ToKey { key_image, .. } => {
|
||||||
|
@ -106,70 +106,64 @@ pub fn add_tx(
|
||||||
//------------------------------------------------------ Outputs
|
//------------------------------------------------------ Outputs
|
||||||
// Output bit flags.
|
// Output bit flags.
|
||||||
// Set to a non-zero bit value if the unlock time is non-zero.
|
// Set to a non-zero bit value if the unlock time is non-zero.
|
||||||
let output_flags = match tx.prefix.timelock {
|
let output_flags = match tx.prefix().additional_timelock {
|
||||||
Timelock::None => OutputFlags::empty(),
|
Timelock::None => OutputFlags::empty(),
|
||||||
Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME,
|
Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut amount_indices = Vec::with_capacity(tx.prefix.outputs.len());
|
let amount_indices = match &tx {
|
||||||
|
Transaction::V1 { prefix, .. } => prefix
|
||||||
for (i, output) in tx.prefix.outputs.iter().enumerate() {
|
.outputs
|
||||||
let key = *output.key.as_bytes();
|
.iter()
|
||||||
|
.map(|output| {
|
||||||
// Outputs with clear amounts.
|
// Pre-RingCT outputs.
|
||||||
let amount_index = if let Some(amount) = output.amount {
|
Ok(add_output(
|
||||||
// RingCT (v2 transaction) miner outputs.
|
output.amount.unwrap_or(0),
|
||||||
if miner_tx && tx.prefix.version == 2 {
|
|
||||||
// Create commitment.
|
|
||||||
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
|
|
||||||
// FIXME: implement lookup table for common values:
|
|
||||||
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
|
||||||
let commitment = (ED25519_BASEPOINT_POINT
|
|
||||||
+ monero_serai::H() * Scalar::from(amount))
|
|
||||||
.compress()
|
|
||||||
.to_bytes();
|
|
||||||
|
|
||||||
add_rct_output(
|
|
||||||
&RctOutput {
|
|
||||||
key,
|
|
||||||
height,
|
|
||||||
output_flags,
|
|
||||||
tx_idx: tx_id,
|
|
||||||
commitment,
|
|
||||||
},
|
|
||||||
tables.rct_outputs_mut(),
|
|
||||||
)?
|
|
||||||
// Pre-RingCT outputs.
|
|
||||||
} else {
|
|
||||||
add_output(
|
|
||||||
amount,
|
|
||||||
&Output {
|
&Output {
|
||||||
key,
|
key: output.key.0,
|
||||||
height,
|
height,
|
||||||
output_flags,
|
output_flags,
|
||||||
tx_idx: tx_id,
|
tx_idx: tx_id,
|
||||||
},
|
},
|
||||||
tables,
|
tables,
|
||||||
)?
|
)?
|
||||||
.amount_index
|
.amount_index)
|
||||||
}
|
})
|
||||||
// RingCT outputs.
|
.collect::<Result<Vec<_>, RuntimeError>>()?,
|
||||||
} else {
|
Transaction::V2 { prefix, proofs } => prefix
|
||||||
let commitment = tx.rct_signatures.base.commitments[i].compress().to_bytes();
|
.outputs
|
||||||
add_rct_output(
|
.iter()
|
||||||
&RctOutput {
|
.enumerate()
|
||||||
key,
|
.map(|(i, output)| {
|
||||||
height,
|
// Create commitment.
|
||||||
output_flags,
|
// <https://github.com/Cuprate/cuprate/pull/102#discussion_r1559489302>
|
||||||
tx_idx: tx_id,
|
// FIXME: implement lookup table for common values:
|
||||||
commitment,
|
// <https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/ringct/rctOps.cpp#L322>
|
||||||
},
|
let commitment = if miner_tx {
|
||||||
tables.rct_outputs_mut(),
|
ED25519_BASEPOINT_POINT
|
||||||
)?
|
+ *monero_serai::generators::H * Scalar::from(output.amount.unwrap_or(0))
|
||||||
};
|
} else {
|
||||||
|
proofs
|
||||||
|
.as_ref()
|
||||||
|
.expect("A V2 transaction with no RCT proofs is a miner tx")
|
||||||
|
.base
|
||||||
|
.commitments[i]
|
||||||
|
};
|
||||||
|
|
||||||
amount_indices.push(amount_index);
|
// Add the RCT output.
|
||||||
} // for each output
|
add_rct_output(
|
||||||
|
&RctOutput {
|
||||||
|
key: output.key.0,
|
||||||
|
height,
|
||||||
|
output_flags,
|
||||||
|
tx_idx: tx_id,
|
||||||
|
commitment: commitment.compress().0,
|
||||||
|
},
|
||||||
|
tables.rct_outputs_mut(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>, _>>()?,
|
||||||
|
};
|
||||||
|
|
||||||
tables
|
tables
|
||||||
.tx_outputs_mut()
|
.tx_outputs_mut()
|
||||||
|
@ -227,7 +221,7 @@ pub fn remove_tx(
|
||||||
//------------------------------------------------------ Key Images
|
//------------------------------------------------------ Key Images
|
||||||
// Is this a miner transaction?
|
// Is this a miner transaction?
|
||||||
let mut miner_tx = false;
|
let mut miner_tx = false;
|
||||||
for inputs in &tx.prefix.inputs {
|
for inputs in &tx.prefix().inputs {
|
||||||
match inputs {
|
match inputs {
|
||||||
// Key images.
|
// Key images.
|
||||||
Input::ToKey { key_image, .. } => {
|
Input::ToKey { key_image, .. } => {
|
||||||
|
@ -240,11 +234,11 @@ pub fn remove_tx(
|
||||||
|
|
||||||
//------------------------------------------------------ Outputs
|
//------------------------------------------------------ Outputs
|
||||||
// Remove each output in the transaction.
|
// Remove each output in the transaction.
|
||||||
for output in &tx.prefix.outputs {
|
for output in &tx.prefix().outputs {
|
||||||
// Outputs with clear amounts.
|
// Outputs with clear amounts.
|
||||||
if let Some(amount) = output.amount {
|
if let Some(amount) = output.amount {
|
||||||
// RingCT miner outputs.
|
// RingCT miner outputs.
|
||||||
if miner_tx && tx.prefix.version == 2 {
|
if miner_tx && tx.version() == 2 {
|
||||||
let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
|
let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1;
|
||||||
remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
|
remove_rct_output(&amount_index, tables.rct_outputs_mut())?;
|
||||||
// Pre-RingCT outputs.
|
// Pre-RingCT outputs.
|
||||||
|
|
|
@ -48,9 +48,9 @@ pub fn init(
|
||||||
///
|
///
|
||||||
/// The height offset is the difference between the top block's height and the block height that should be in that position.
|
/// The height offset is the difference between the top block's height and the block height that should be in that position.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(super) const fn compact_history_index_to_height_offset<const INITIAL_BLOCKS: u64>(
|
pub(super) const fn compact_history_index_to_height_offset<const INITIAL_BLOCKS: usize>(
|
||||||
i: u64,
|
i: usize,
|
||||||
) -> u64 {
|
) -> usize {
|
||||||
// If the position is below the initial blocks just return the position back
|
// If the position is below the initial blocks just return the position back
|
||||||
if i <= INITIAL_BLOCKS {
|
if i <= INITIAL_BLOCKS {
|
||||||
i
|
i
|
||||||
|
@ -66,8 +66,8 @@ pub(super) const fn compact_history_index_to_height_offset<const INITIAL_BLOCKS:
|
||||||
///
|
///
|
||||||
/// The genesis must always be included in the compact history.
|
/// The genesis must always be included in the compact history.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(super) const fn compact_history_genesis_not_included<const INITIAL_BLOCKS: u64>(
|
pub(super) const fn compact_history_genesis_not_included<const INITIAL_BLOCKS: usize>(
|
||||||
top_block_height: u64,
|
top_block_height: usize,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
// If the top block height is less than the initial blocks then it will always be included.
|
// If the top block height is less than the initial blocks then it will always be included.
|
||||||
// Otherwise, we use the fact that to reach the genesis block this statement must be true (for a
|
// Otherwise, we use the fact that to reach the genesis block this statement must be true (for a
|
||||||
|
@ -91,7 +91,7 @@ mod tests {
|
||||||
|
|
||||||
proptest! {
|
proptest! {
|
||||||
#[test]
|
#[test]
|
||||||
fn compact_history(top_height in 0_u64..500_000_000) {
|
fn compact_history(top_height in 0_usize..500_000_000) {
|
||||||
let mut heights = (0..)
|
let mut heights = (0..)
|
||||||
.map(compact_history_index_to_height_offset::<11>)
|
.map(compact_history_index_to_height_offset::<11>)
|
||||||
.map_while(|i| top_height.checked_sub(i))
|
.map_while(|i| top_height.checked_sub(i))
|
||||||
|
|
|
@ -87,7 +87,7 @@
|
||||||
//!
|
//!
|
||||||
//! // Prepare a request to write block.
|
//! // Prepare a request to write block.
|
||||||
//! let mut block = block_v16_tx0().clone();
|
//! let mut block = block_v16_tx0().clone();
|
||||||
//! # block.height = 0_u64; // must be 0th height or panic in `add_block()`
|
//! # block.height = 0_usize; // must be 0th height or panic in `add_block()`
|
||||||
//! let request = BlockchainWriteRequest::WriteBlock(block);
|
//! let request = BlockchainWriteRequest::WriteBlock(block);
|
||||||
//!
|
//!
|
||||||
//! // Send the request.
|
//! // Send the request.
|
||||||
|
|
|
@ -278,7 +278,7 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult {
|
||||||
|
|
||||||
/// [`BlockchainReadRequest::GeneratedCoins`].
|
/// [`BlockchainReadRequest::GeneratedCoins`].
|
||||||
#[inline]
|
#[inline]
|
||||||
fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult {
|
fn generated_coins(env: &ConcreteEnv, height: usize) -> ResponseResult {
|
||||||
// Single-threaded, no `ThreadLocal` required.
|
// Single-threaded, no `ThreadLocal` required.
|
||||||
let env_inner = env.env_inner();
|
let env_inner = env.env_inner();
|
||||||
let tx_ro = env_inner.tx_ro()?;
|
let tx_ro = env_inner.tx_ro()?;
|
||||||
|
@ -429,7 +429,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult {
|
||||||
);
|
);
|
||||||
|
|
||||||
/// The amount of top block IDs in the compact chain.
|
/// The amount of top block IDs in the compact chain.
|
||||||
const INITIAL_BLOCKS: u64 = 11;
|
const INITIAL_BLOCKS: usize = 11;
|
||||||
|
|
||||||
// rayon is not used here because the amount of block IDs is expected to be small.
|
// rayon is not used here because the amount of block IDs is expected to be small.
|
||||||
let mut block_ids = (0..)
|
let mut block_ids = (0..)
|
||||||
|
|
|
@ -78,7 +78,7 @@ async fn test_template(
|
||||||
// cannot be added, to get around this, manually edit the block height.
|
// cannot be added, to get around this, manually edit the block height.
|
||||||
for (i, block_fn) in block_fns.iter().enumerate() {
|
for (i, block_fn) in block_fns.iter().enumerate() {
|
||||||
let mut block = block_fn().clone();
|
let mut block = block_fn().clone();
|
||||||
block.height = i as u64;
|
block.height = i;
|
||||||
|
|
||||||
// Request a block to be written, assert it was written.
|
// Request a block to be written, assert it was written.
|
||||||
let request = BlockchainWriteRequest::WriteBlock(block);
|
let request = BlockchainWriteRequest::WriteBlock(block);
|
||||||
|
|
|
@ -68,7 +68,7 @@ pub type BlockBlob = StorableVec<u8>;
|
||||||
pub type BlockHash = [u8; 32];
|
pub type BlockHash = [u8; 32];
|
||||||
|
|
||||||
/// A block's height.
|
/// A block's height.
|
||||||
pub type BlockHeight = u64;
|
pub type BlockHeight = usize;
|
||||||
|
|
||||||
/// A key image.
|
/// A key image.
|
||||||
pub type KeyImage = [u8; 32];
|
pub type KeyImage = [u8; 32];
|
||||||
|
|
|
@ -6,24 +6,26 @@ license = "MIT"
|
||||||
authors = ["Boog900", "hinto-janai"]
|
authors = ["Boog900", "hinto-janai"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
cuprate-types = { path = "../types" }
|
cuprate-types = { path = "../types" }
|
||||||
cuprate-helper = { path = "../helper", features = ["map"] }
|
cuprate-helper = { path = "../helper", features = ["map"] }
|
||||||
cuprate-wire = { path = "../net/wire" }
|
cuprate-wire = { path = "../net/wire" }
|
||||||
cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] }
|
cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] }
|
||||||
|
|
||||||
hex = { workspace = true }
|
hex = { workspace = true }
|
||||||
hex-literal = { workspace = true }
|
hex-literal = { workspace = true }
|
||||||
monero-serai = { workspace = true, features = ["std", "http-rpc"] }
|
monero-serai = { workspace = true, features = ["std"] }
|
||||||
futures = { workspace = true, features = ["std"] }
|
monero-simple-request-rpc = { workspace = true }
|
||||||
async-trait = { workspace = true }
|
monero-rpc = { workspace = true }
|
||||||
tokio = { workspace = true, features = ["full"] }
|
futures = { workspace = true, features = ["std"] }
|
||||||
tokio-util = { workspace = true }
|
async-trait = { workspace = true }
|
||||||
serde = { workspace = true }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
serde_json = { workspace = true }
|
tokio-util = { workspace = true }
|
||||||
bytes = { workspace = true, features = ["std"] }
|
serde = { workspace = true }
|
||||||
tempfile = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
paste = { workspace = true }
|
bytes = { workspace = true, features = ["std"] }
|
||||||
borsh = { workspace = true, features = ["derive"]}
|
tempfile = { workspace = true }
|
||||||
|
paste = { workspace = true }
|
||||||
|
borsh = { workspace = true, features = ["derive"]}
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = { workspace = true }
|
hex = { workspace = true }
|
||||||
|
|
|
@ -34,12 +34,12 @@ macro_rules! const_block_blob {
|
||||||
#[doc = ""]
|
#[doc = ""]
|
||||||
#[doc = concat!("let block = Block::read(&mut ", stringify!($name), ").unwrap();")]
|
#[doc = concat!("let block = Block::read(&mut ", stringify!($name), ").unwrap();")]
|
||||||
#[doc = ""]
|
#[doc = ""]
|
||||||
#[doc = concat!("assert_eq!(block.header.major_version, ", $major_version, ");")]
|
#[doc = concat!("assert_eq!(block.header.hardfork_version, ", $major_version, ");")]
|
||||||
#[doc = concat!("assert_eq!(block.header.minor_version, ", $minor_version, ");")]
|
#[doc = concat!("assert_eq!(block.header.hardfork_signal, ", $minor_version, ");")]
|
||||||
#[doc = concat!("assert_eq!(block.header.timestamp, ", $timestamp, ");")]
|
#[doc = concat!("assert_eq!(block.header.timestamp, ", $timestamp, ");")]
|
||||||
#[doc = concat!("assert_eq!(block.header.nonce, ", $nonce, ");")]
|
#[doc = concat!("assert_eq!(block.header.nonce, ", $nonce, ");")]
|
||||||
#[doc = concat!("assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(", $height, ")));")]
|
#[doc = concat!("assert!(matches!(block.miner_transaction.prefix().inputs[0], Input::Gen(", $height, ")));")]
|
||||||
#[doc = concat!("assert_eq!(block.txs.len(), ", $tx_len, ");")]
|
#[doc = concat!("assert_eq!(block.transactions.len(), ", $tx_len, ");")]
|
||||||
#[doc = concat!("assert_eq!(hex::encode(block.hash()), \"", $hash, "\")")]
|
#[doc = concat!("assert_eq!(hex::encode(block.hash()), \"", $hash, "\")")]
|
||||||
/// ```
|
/// ```
|
||||||
pub const $name: &[u8] = include_bytes!($data_path);
|
pub const $name: &[u8] = include_bytes!($data_path);
|
||||||
|
@ -107,7 +107,6 @@ macro_rules! const_tx_blob {
|
||||||
timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`)
|
timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`)
|
||||||
input_len: $input_len:literal, // Amount of inputs
|
input_len: $input_len:literal, // Amount of inputs
|
||||||
output_len: $output_len:literal, // Amount of outputs
|
output_len: $output_len:literal, // Amount of outputs
|
||||||
signatures_len: $signatures_len:literal, // Amount of signatures
|
|
||||||
) => {
|
) => {
|
||||||
#[doc = concat!("Transaction with hash `", $hash, "`.")]
|
#[doc = concat!("Transaction with hash `", $hash, "`.")]
|
||||||
///
|
///
|
||||||
|
@ -117,11 +116,10 @@ macro_rules! const_tx_blob {
|
||||||
#[doc = ""]
|
#[doc = ""]
|
||||||
#[doc = concat!("let tx = Transaction::read(&mut ", stringify!($name), ").unwrap();")]
|
#[doc = concat!("let tx = Transaction::read(&mut ", stringify!($name), ").unwrap();")]
|
||||||
#[doc = ""]
|
#[doc = ""]
|
||||||
#[doc = concat!("assert_eq!(tx.prefix.version, ", $version, ");")]
|
#[doc = concat!("assert_eq!(tx.version(), ", $version, ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.prefix.timelock, ", stringify!($timelock), ");")]
|
#[doc = concat!("assert_eq!(tx.prefix().additional_timelock, ", stringify!($timelock), ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.prefix.inputs.len(), ", $input_len, ");")]
|
#[doc = concat!("assert_eq!(tx.prefix().inputs.len(), ", $input_len, ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.prefix.outputs.len(), ", $output_len, ");")]
|
#[doc = concat!("assert_eq!(tx.prefix().outputs.len(), ", $output_len, ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.signatures.len(), ", $signatures_len, ");")]
|
|
||||||
#[doc = concat!("assert_eq!(hex::encode(tx.hash()), \"", $hash, "\")")]
|
#[doc = concat!("assert_eq!(hex::encode(tx.hash()), \"", $hash, "\")")]
|
||||||
/// ```
|
/// ```
|
||||||
pub const $name: &[u8] = include_bytes!($data_path);
|
pub const $name: &[u8] = include_bytes!($data_path);
|
||||||
|
@ -136,7 +134,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::Block(100_081),
|
timelock: Timelock::Block(100_081),
|
||||||
input_len: 1,
|
input_len: 1,
|
||||||
output_len: 5,
|
output_len: 5,
|
||||||
signatures_len: 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -147,7 +144,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 19,
|
input_len: 19,
|
||||||
output_len: 61,
|
output_len: 61,
|
||||||
signatures_len: 19,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -158,7 +154,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 46,
|
input_len: 46,
|
||||||
output_len: 46,
|
output_len: 46,
|
||||||
signatures_len: 46,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -169,7 +164,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 1,
|
input_len: 1,
|
||||||
output_len: 2,
|
output_len: 2,
|
||||||
signatures_len: 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -180,7 +174,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 1,
|
input_len: 1,
|
||||||
output_len: 2,
|
output_len: 2,
|
||||||
signatures_len: 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -191,7 +184,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 2,
|
input_len: 2,
|
||||||
output_len: 2,
|
output_len: 2,
|
||||||
signatures_len: 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -202,7 +194,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 2,
|
input_len: 2,
|
||||||
output_len: 5,
|
output_len: 5,
|
||||||
signatures_len: 2,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const_tx_blob! {
|
const_tx_blob! {
|
||||||
|
@ -213,7 +204,6 @@ const_tx_blob! {
|
||||||
timelock: Timelock::None,
|
timelock: Timelock::None,
|
||||||
input_len: 2,
|
input_len: 2,
|
||||||
output_len: 2,
|
output_len: 2,
|
||||||
signatures_len: 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Tests
|
//---------------------------------------------------------------------------------------------------- Tests
|
||||||
|
|
|
@ -8,11 +8,11 @@
|
||||||
//---------------------------------------------------------------------------------------------------- Import
|
//---------------------------------------------------------------------------------------------------- Import
|
||||||
use std::sync::OnceLock;
|
use std::sync::OnceLock;
|
||||||
|
|
||||||
use hex_literal::hex;
|
|
||||||
use monero_serai::{block::Block, transaction::Transaction};
|
|
||||||
|
|
||||||
use cuprate_helper::map::combine_low_high_bits_to_u128;
|
use cuprate_helper::map::combine_low_high_bits_to_u128;
|
||||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||||
|
use hex_literal::hex;
|
||||||
|
use monero_serai::transaction::Input;
|
||||||
|
use monero_serai::{block::Block, transaction::Transaction};
|
||||||
|
|
||||||
use crate::data::constants::{
|
use crate::data::constants::{
|
||||||
BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73,
|
BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73,
|
||||||
|
@ -31,7 +31,7 @@ use crate::data::constants::{
|
||||||
struct VerifiedBlockMap {
|
struct VerifiedBlockMap {
|
||||||
block_blob: &'static [u8],
|
block_blob: &'static [u8],
|
||||||
pow_hash: [u8; 32],
|
pow_hash: [u8; 32],
|
||||||
height: u64,
|
height: usize,
|
||||||
generated_coins: u64,
|
generated_coins: u64,
|
||||||
weight: usize,
|
weight: usize,
|
||||||
long_term_weight: usize,
|
long_term_weight: usize,
|
||||||
|
@ -68,11 +68,11 @@ impl VerifiedBlockMap {
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txs.len(),
|
txs.len(),
|
||||||
block.txs.len(),
|
block.transactions.len(),
|
||||||
"(deserialized txs).len() != (txs hashes in block).len()"
|
"(deserialized txs).len() != (txs hashes in block).len()"
|
||||||
);
|
);
|
||||||
|
|
||||||
for (tx, tx_hash_in_block) in txs.iter().zip(&block.txs) {
|
for (tx, tx_hash_in_block) in txs.iter().zip(&block.transactions) {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&tx.tx_hash, tx_hash_in_block,
|
&tx.tx_hash, tx_hash_in_block,
|
||||||
"deserialized tx hash is not the same as the one in the parent block"
|
"deserialized tx hash is not the same as the one in the parent block"
|
||||||
|
@ -103,13 +103,43 @@ fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInfo
|
||||||
let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap();
|
let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap();
|
||||||
VerifiedTransactionInformation {
|
VerifiedTransactionInformation {
|
||||||
tx_weight: tx.weight(),
|
tx_weight: tx.weight(),
|
||||||
fee: tx.rct_signatures.base.fee,
|
fee: tx_fee(&tx),
|
||||||
tx_hash: tx.hash(),
|
tx_hash: tx.hash(),
|
||||||
tx_blob,
|
tx_blob,
|
||||||
tx,
|
tx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calculates the fee of the [`Transaction`].
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
/// This will panic if the inputs overflow or the transaction outputs too much.
|
||||||
|
pub fn tx_fee(tx: &Transaction) -> u64 {
|
||||||
|
let mut fee = 0_u64;
|
||||||
|
|
||||||
|
match &tx {
|
||||||
|
Transaction::V1 { prefix, .. } => {
|
||||||
|
for input in &prefix.inputs {
|
||||||
|
match input {
|
||||||
|
Input::Gen(_) => return 0,
|
||||||
|
Input::ToKey { amount, .. } => {
|
||||||
|
fee = fee.checked_add(amount.unwrap_or(0)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for output in &prefix.outputs {
|
||||||
|
fee.checked_sub(output.amount.unwrap_or(0)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Transaction::V2 { proofs, .. } => {
|
||||||
|
fee = proofs.as_ref().unwrap().base.fee;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fee
|
||||||
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Blocks
|
//---------------------------------------------------------------------------------------------------- Blocks
|
||||||
/// Generate a block accessor function with this signature:
|
/// Generate a block accessor function with this signature:
|
||||||
/// `fn() -> &'static VerifiedBlockInformation`
|
/// `fn() -> &'static VerifiedBlockInformation`
|
||||||
|
@ -255,7 +285,6 @@ macro_rules! transaction_verification_data_fn {
|
||||||
#[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")]
|
#[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")]
|
#[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")]
|
||||||
#[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")]
|
#[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")]
|
||||||
#[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"]
|
|
||||||
/// ```
|
/// ```
|
||||||
pub fn $fn_name() -> &'static VerifiedTransactionInformation {
|
pub fn $fn_name() -> &'static VerifiedTransactionInformation {
|
||||||
static TX: OnceLock<VerifiedTransactionInformation> = OnceLock::new();
|
static TX: OnceLock<VerifiedTransactionInformation> = OnceLock::new();
|
||||||
|
|
|
@ -32,4 +32,6 @@ pub use constants::{
|
||||||
};
|
};
|
||||||
|
|
||||||
mod free;
|
mod free;
|
||||||
pub use free::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3};
|
pub use free::{
|
||||||
|
block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_fee, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3,
|
||||||
|
};
|
||||||
|
|
|
@ -5,13 +5,14 @@ use serde::Deserialize;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tokio::task::spawn_blocking;
|
use tokio::task::spawn_blocking;
|
||||||
|
|
||||||
use monero_serai::{
|
use monero_rpc::Rpc;
|
||||||
block::Block,
|
use monero_serai::block::Block;
|
||||||
rpc::{HttpRpc, Rpc},
|
use monero_simple_request_rpc::SimpleRequestRpc;
|
||||||
};
|
|
||||||
|
|
||||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||||
|
|
||||||
|
use crate::data::tx_fee;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Constants
|
//---------------------------------------------------------------------------------------------------- Constants
|
||||||
/// The default URL used for Monero RPC connections.
|
/// The default URL used for Monero RPC connections.
|
||||||
pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081";
|
pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081";
|
||||||
|
@ -20,7 +21,7 @@ pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081";
|
||||||
/// An HTTP RPC client for Monero.
|
/// An HTTP RPC client for Monero.
|
||||||
pub struct HttpRpcClient {
|
pub struct HttpRpcClient {
|
||||||
address: String,
|
address: String,
|
||||||
rpc: Rpc<HttpRpc>,
|
rpc: SimpleRequestRpc,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpRpcClient {
|
impl HttpRpcClient {
|
||||||
|
@ -40,7 +41,7 @@ impl HttpRpcClient {
|
||||||
let address = address.unwrap_or_else(|| LOCALHOST_RPC_URL.to_string());
|
let address = address.unwrap_or_else(|| LOCALHOST_RPC_URL.to_string());
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
rpc: HttpRpc::new(address.clone()).await.unwrap(),
|
rpc: SimpleRequestRpc::new(address.clone()).await.unwrap(),
|
||||||
address,
|
address,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -53,7 +54,7 @@ impl HttpRpcClient {
|
||||||
|
|
||||||
/// Access to the inner RPC client for other usage.
|
/// Access to the inner RPC client for other usage.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
const fn rpc(&self) -> &Rpc<HttpRpc> {
|
const fn rpc(&self) -> &SimpleRequestRpc {
|
||||||
&self.rpc
|
&self.rpc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +63,7 @@ impl HttpRpcClient {
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// This function will panic at any error point, e.g.,
|
/// This function will panic at any error point, e.g.,
|
||||||
/// if the node cannot be connected to, if deserialization fails, etc.
|
/// if the node cannot be connected to, if deserialization fails, etc.
|
||||||
pub async fn get_verified_block_information(&self, height: u64) -> VerifiedBlockInformation {
|
pub async fn get_verified_block_information(&self, height: usize) -> VerifiedBlockInformation {
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct Result {
|
struct Result {
|
||||||
blob: String,
|
blob: String,
|
||||||
|
@ -75,7 +76,7 @@ impl HttpRpcClient {
|
||||||
long_term_weight: usize,
|
long_term_weight: usize,
|
||||||
cumulative_difficulty: u128,
|
cumulative_difficulty: u128,
|
||||||
hash: String,
|
hash: String,
|
||||||
height: u64,
|
height: usize,
|
||||||
pow_hash: String,
|
pow_hash: String,
|
||||||
reward: u64, // generated_coins + total_tx_fees
|
reward: u64, // generated_coins + total_tx_fees
|
||||||
}
|
}
|
||||||
|
@ -111,7 +112,7 @@ impl HttpRpcClient {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let txs: Vec<VerifiedTransactionInformation> = self
|
let txs: Vec<VerifiedTransactionInformation> = self
|
||||||
.get_transaction_verification_data(&block.txs)
|
.get_transaction_verification_data(&block.transactions)
|
||||||
.await
|
.await
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
@ -124,8 +125,8 @@ impl HttpRpcClient {
|
||||||
|
|
||||||
let total_tx_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
let total_tx_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||||
let generated_coins = block
|
let generated_coins = block
|
||||||
.miner_tx
|
.miner_transaction
|
||||||
.prefix
|
.prefix()
|
||||||
.outputs
|
.outputs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|output| output.amount.expect("miner_tx amount was None"))
|
.map(|output| output.amount.expect("miner_tx amount was None"))
|
||||||
|
@ -173,7 +174,7 @@ impl HttpRpcClient {
|
||||||
tx_blob: tx.serialize(),
|
tx_blob: tx.serialize(),
|
||||||
tx_weight: tx.weight(),
|
tx_weight: tx.weight(),
|
||||||
tx_hash,
|
tx_hash,
|
||||||
fee: tx.rct_signatures.base.fee,
|
fee: tx_fee(&tx),
|
||||||
tx,
|
tx,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -199,7 +200,7 @@ mod tests {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn assert_eq(
|
async fn assert_eq(
|
||||||
rpc: &HttpRpcClient,
|
rpc: &HttpRpcClient,
|
||||||
height: u64,
|
height: usize,
|
||||||
block_hash: [u8; 32],
|
block_hash: [u8; 32],
|
||||||
pow_hash: [u8; 32],
|
pow_hash: [u8; 32],
|
||||||
generated_coins: u64,
|
generated_coins: u64,
|
||||||
|
|
|
@ -22,5 +22,6 @@ bytes = { workspace = true }
|
||||||
curve25519-dalek = { workspace = true }
|
curve25519-dalek = { workspace = true }
|
||||||
monero-serai = { workspace = true }
|
monero-serai = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"], optional = true }
|
serde = { workspace = true, features = ["derive"], optional = true }
|
||||||
|
borsh = { workspace = true, optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
|
@ -25,12 +25,12 @@ pub enum BlockchainReadRequest {
|
||||||
/// Request a block's extended header.
|
/// Request a block's extended header.
|
||||||
///
|
///
|
||||||
/// The input is the block's height.
|
/// The input is the block's height.
|
||||||
BlockExtendedHeader(u64),
|
BlockExtendedHeader(usize),
|
||||||
|
|
||||||
/// Request a block's hash.
|
/// Request a block's hash.
|
||||||
///
|
///
|
||||||
/// The input is the block's height and the chain it is on.
|
/// The input is the block's height and the chain it is on.
|
||||||
BlockHash(u64, Chain),
|
BlockHash(usize, Chain),
|
||||||
|
|
||||||
/// Request to check if we have a block and which [`Chain`] it is on.
|
/// Request to check if we have a block and which [`Chain`] it is on.
|
||||||
///
|
///
|
||||||
|
@ -45,7 +45,7 @@ pub enum BlockchainReadRequest {
|
||||||
/// Request a range of block extended headers.
|
/// Request a range of block extended headers.
|
||||||
///
|
///
|
||||||
/// The input is a range of block heights.
|
/// The input is a range of block heights.
|
||||||
BlockExtendedHeaderInRange(Range<u64>, Chain),
|
BlockExtendedHeaderInRange(Range<usize>, Chain),
|
||||||
|
|
||||||
/// Request the current chain height.
|
/// Request the current chain height.
|
||||||
///
|
///
|
||||||
|
@ -53,7 +53,7 @@ pub enum BlockchainReadRequest {
|
||||||
ChainHeight,
|
ChainHeight,
|
||||||
|
|
||||||
/// Request the total amount of generated coins (atomic units) at this height.
|
/// Request the total amount of generated coins (atomic units) at this height.
|
||||||
GeneratedCoins(u64),
|
GeneratedCoins(usize),
|
||||||
|
|
||||||
/// Request data for multiple outputs.
|
/// Request data for multiple outputs.
|
||||||
///
|
///
|
||||||
|
@ -137,7 +137,7 @@ pub enum BlockchainResponse {
|
||||||
/// Response to [`BlockchainReadRequest::FindBlock`].
|
/// Response to [`BlockchainReadRequest::FindBlock`].
|
||||||
///
|
///
|
||||||
/// Inner value is the chain and height of the block if found.
|
/// Inner value is the chain and height of the block if found.
|
||||||
FindBlock(Option<(Chain, u64)>),
|
FindBlock(Option<(Chain, usize)>),
|
||||||
|
|
||||||
/// Response to [`BlockchainReadRequest::FilterUnknownHashes`].
|
/// Response to [`BlockchainReadRequest::FilterUnknownHashes`].
|
||||||
///
|
///
|
||||||
|
@ -152,7 +152,7 @@ pub enum BlockchainResponse {
|
||||||
/// Response to [`BlockchainReadRequest::ChainHeight`].
|
/// Response to [`BlockchainReadRequest::ChainHeight`].
|
||||||
///
|
///
|
||||||
/// Inner value is the chain height, and the top block's hash.
|
/// Inner value is the chain height, and the top block's hash.
|
||||||
ChainHeight(u64, [u8; 32]),
|
ChainHeight(usize, [u8; 32]),
|
||||||
|
|
||||||
/// Response to [`BlockchainReadRequest::GeneratedCoins`].
|
/// Response to [`BlockchainReadRequest::GeneratedCoins`].
|
||||||
///
|
///
|
||||||
|
@ -195,7 +195,7 @@ pub enum BlockchainResponse {
|
||||||
/// Contains the index of the first unknown block and its expected height.
|
/// Contains the index of the first unknown block and its expected height.
|
||||||
///
|
///
|
||||||
/// This will be [`None`] if all blocks were known.
|
/// This will be [`None`] if all blocks were known.
|
||||||
FindFirstUnknown(Option<(usize, u64)>),
|
FindFirstUnknown(Option<(usize, usize)>),
|
||||||
|
|
||||||
//------------------------------------------------------ Writes
|
//------------------------------------------------------ Writes
|
||||||
/// Response to [`BlockchainWriteRequest::WriteBlock`].
|
/// Response to [`BlockchainWriteRequest::WriteBlock`].
|
||||||
|
|
|
@ -17,13 +17,13 @@ pub struct ExtendedBlockHeader {
|
||||||
///
|
///
|
||||||
/// This can also be represented with `cuprate_consensus::HardFork`.
|
/// This can also be represented with `cuprate_consensus::HardFork`.
|
||||||
///
|
///
|
||||||
/// This is the same value as [`monero_serai::block::BlockHeader::major_version`].
|
/// This is the same value as [`monero_serai::block::BlockHeader::hardfork_version`].
|
||||||
pub version: u8,
|
pub version: u8,
|
||||||
/// The block's hard-fork vote.
|
/// The block's hard-fork vote.
|
||||||
///
|
///
|
||||||
/// This can also be represented with `cuprate_consensus::HardFork`.
|
/// This can also be represented with `cuprate_consensus::HardFork`.
|
||||||
///
|
///
|
||||||
/// This is the same value as [`monero_serai::block::BlockHeader::minor_version`].
|
/// This is the same value as [`monero_serai::block::BlockHeader::hardfork_signal`].
|
||||||
pub vote: u8,
|
pub vote: u8,
|
||||||
/// The UNIX time at which the block was mined.
|
/// The UNIX time at which the block was mined.
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
|
@ -72,7 +72,7 @@ pub struct VerifiedBlockInformation {
|
||||||
///
|
///
|
||||||
/// [`Block::serialize`].
|
/// [`Block::serialize`].
|
||||||
pub block_blob: Vec<u8>,
|
pub block_blob: Vec<u8>,
|
||||||
/// All the transactions in the block, excluding the [`Block::miner_tx`].
|
/// All the transactions in the block, excluding the [`Block::miner_transaction`].
|
||||||
pub txs: Vec<VerifiedTransactionInformation>,
|
pub txs: Vec<VerifiedTransactionInformation>,
|
||||||
/// The block's hash.
|
/// The block's hash.
|
||||||
///
|
///
|
||||||
|
@ -81,7 +81,7 @@ pub struct VerifiedBlockInformation {
|
||||||
/// The block's proof-of-work hash.
|
/// The block's proof-of-work hash.
|
||||||
pub pow_hash: [u8; 32],
|
pub pow_hash: [u8; 32],
|
||||||
/// The block's height.
|
/// The block's height.
|
||||||
pub height: u64,
|
pub height: usize,
|
||||||
/// The amount of generated coins (atomic units) in this block.
|
/// The amount of generated coins (atomic units) in this block.
|
||||||
pub generated_coins: u64,
|
pub generated_coins: u64,
|
||||||
/// The adjusted block size, in bytes.
|
/// The adjusted block size, in bytes.
|
||||||
|
@ -119,7 +119,7 @@ pub struct AltBlockInformation {
|
||||||
///
|
///
|
||||||
/// [`Block::serialize`].
|
/// [`Block::serialize`].
|
||||||
pub block_blob: Vec<u8>,
|
pub block_blob: Vec<u8>,
|
||||||
/// All the transactions in the block, excluding the [`Block::miner_tx`].
|
/// All the transactions in the block, excluding the [`Block::miner_transaction`].
|
||||||
pub txs: Vec<VerifiedTransactionInformation>,
|
pub txs: Vec<VerifiedTransactionInformation>,
|
||||||
/// The block's hash.
|
/// The block's hash.
|
||||||
///
|
///
|
||||||
|
@ -128,7 +128,7 @@ pub struct AltBlockInformation {
|
||||||
/// The block's proof-of-work hash.
|
/// The block's proof-of-work hash.
|
||||||
pub pow_hash: [u8; 32],
|
pub pow_hash: [u8; 32],
|
||||||
/// The block's height.
|
/// The block's height.
|
||||||
pub height: u64,
|
pub height: usize,
|
||||||
/// The adjusted block size, in bytes.
|
/// The adjusted block size, in bytes.
|
||||||
pub weight: usize,
|
pub weight: usize,
|
||||||
/// The long term block weight, which is the weight factored in with previous block weights.
|
/// The long term block weight, which is the weight factored in with previous block weights.
|
||||||
|
@ -144,7 +144,7 @@ pub struct AltBlockInformation {
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub struct OutputOnChain {
|
pub struct OutputOnChain {
|
||||||
/// The block height this output belongs to.
|
/// The block height this output belongs to.
|
||||||
pub height: u64,
|
pub height: usize,
|
||||||
/// The timelock of this output, if any.
|
/// The timelock of this output, if any.
|
||||||
pub time_lock: Timelock,
|
pub time_lock: Timelock,
|
||||||
/// The public key of this output, if any.
|
/// The public key of this output, if any.
|
||||||
|
|
Loading…
Reference in a new issue