mirror of
https://github.com/hinto-janai/cuprate.git
synced 2024-11-16 15:58:14 +00:00
Cleanup & Document consensus (#65)
* change monero-consensus to cuprate-consensus-rules * document the context service * remove the mutex on blockchain context * comment the context caches * add back tokio * document block checks * typo * keep tha amount of outputs with a certain amount in the output cache * typo * nuke cross-block batch verification * remove RPC scanner * change how contextual data is got. * fmt & clippy fixes * typo * cargo update * restore Cargo.lock * add a verify tx test. + fixes an issue with verifying signatures after BPs * clippy * remove bad test * add mores tests and fix a couple bugs * typos * move tests and add some more * typo * remove scan_chain docs * fix check for duplicate txs when duplicates are not sequential * add a proptest for dup txs * cache tx verification state * doc updates + move `Vec` to `Arc<[]>` * clippy * misc changes * Apply suggestions from code review Co-authored-by: hinto-janai <hinto.janai@protonmail.com> Co-authored-by: SyntheticBird <118022351+SyntheticBird45@users.noreply.github.com> * fix fmt * review changes --------- Co-authored-by: hinto-janai <hinto.janai@protonmail.com> Co-authored-by: SyntheticBird <118022351+SyntheticBird45@users.noreply.github.com>
This commit is contained in:
parent
d21160868c
commit
889e15738b
37 changed files with 1784 additions and 2970 deletions
262
Cargo.lock
generated
262
Cargo.lock
generated
|
@ -50,54 +50,6 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
"anstyle-query",
|
||||
"anstyle-wincon",
|
||||
"colorchoice",
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-query"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "3.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-lock"
|
||||
version = "3.3.0"
|
||||
|
@ -325,52 +277,6 @@ dependencies = [
|
|||
"windows-targets 0.52.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.60",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "2.5.0"
|
||||
|
@ -532,33 +438,48 @@ dependencies = [
|
|||
name = "cuprate-consensus"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"clap",
|
||||
"cuprate-consensus-rules",
|
||||
"cuprate-helper",
|
||||
"cuprate-test-utils",
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
"dirs",
|
||||
"epee-encoding",
|
||||
"futures",
|
||||
"hex",
|
||||
"monero-consensus",
|
||||
"hex-literal",
|
||||
"monero-serai",
|
||||
"monero-wire",
|
||||
"multiexp",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
"randomx-rs",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 2.0.60",
|
||||
"thiserror",
|
||||
"thread_local",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cuprate-consensus-rules"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"cryptonight-cuprate",
|
||||
"cuprate-helper",
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"monero-serai",
|
||||
"multiexp",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
"rand",
|
||||
"rayon",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -676,7 +597,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "dalek-ff-group"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"curve25519-dalek",
|
||||
|
@ -772,21 +693,6 @@ dependencies = [
|
|||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dleq"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"ff",
|
||||
"flexible-transcript",
|
||||
"group",
|
||||
"multiexp",
|
||||
"rand_core",
|
||||
"rustversion",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "doxygen-rs"
|
||||
version = "0.4.2"
|
||||
|
@ -886,7 +792,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "flexible-transcript"
|
||||
version = "0.3.2"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"digest",
|
||||
|
@ -1064,12 +970,6 @@ version = "0.4.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "heed"
|
||||
version = "0.20.0"
|
||||
|
@ -1196,9 +1096,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "hyper-rustls"
|
||||
version = "0.26.0"
|
||||
version = "0.27.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c"
|
||||
checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"http",
|
||||
|
@ -1452,32 +1352,10 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "monero-consensus"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"cryptonight-cuprate",
|
||||
"cuprate-helper",
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"monero-serai",
|
||||
"multiexp",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
"rand",
|
||||
"rayon",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "monero-generators"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
|
@ -1519,7 +1397,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-serai"
|
||||
version = "0.1.4-alpha"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"async-lock",
|
||||
"async-trait",
|
||||
|
@ -1527,7 +1405,6 @@ dependencies = [
|
|||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
"digest_auth",
|
||||
"dleq",
|
||||
"flexible-transcript",
|
||||
"group",
|
||||
"hex",
|
||||
|
@ -1566,7 +1443,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "multiexp"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"group",
|
||||
|
@ -1576,16 +1453,6 @@ dependencies = [
|
|||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.18"
|
||||
|
@ -1633,12 +1500,6 @@ version = "0.2.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.6.0"
|
||||
|
@ -2081,10 +1942,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.22.4"
|
||||
version = "0.23.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
|
||||
checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
"rustls-webpki",
|
||||
|
@ -2177,7 +2039,7 @@ version = "0.5.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
|
||||
dependencies = [
|
||||
"heck 0.4.1",
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.60",
|
||||
|
@ -2264,15 +2126,6 @@ dependencies = [
|
|||
"keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.2"
|
||||
|
@ -2285,7 +2138,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "simple-request"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
|
@ -2335,18 +2188,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
|
|||
[[package]]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.5",
|
||||
"spin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.5.0"
|
||||
|
@ -2500,9 +2347,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tokio-rustls"
|
||||
version = "0.25.0"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
|
||||
checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
|
||||
dependencies = [
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
|
@ -2619,18 +2466,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"valuable",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
|
||||
dependencies = [
|
||||
"log",
|
||||
"once_cell",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2639,12 +2474,7 @@ version = "0.3.18"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
|
||||
dependencies = [
|
||||
"nu-ansi-term",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2703,18 +2533,6 @@ dependencies = [
|
|||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
|
||||
|
||||
[[package]]
|
||||
name = "valuable"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
|
|
|
@ -52,15 +52,15 @@ chrono = { version = "0.4.31", default-features = false }
|
|||
crypto-bigint = { version = "0.5.5", default-features = false }
|
||||
crossbeam = { version = "0.8.4", default-features = false }
|
||||
curve25519-dalek = { version = "4.1.1", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
dashmap = { version = "5.5.3", default-features = false }
|
||||
dirs = { version = "5.0.1", default-features = false }
|
||||
futures = { version = "0.3.29", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
hex-literal = { version = "0.4", default-features = false }
|
||||
indexmap = { version = "2.2.5", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false }
|
||||
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
|
||||
paste = { version = "1.0.14", default-features = false }
|
||||
pin-project = { version = "1.1.3", default-features = false }
|
||||
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
|
||||
|
@ -85,7 +85,6 @@ pretty_assertions = { version = "1.4.0" }
|
|||
proptest = { version = "1" }
|
||||
proptest-derive = { version = "0.4.0" }
|
||||
|
||||
|
||||
## TODO:
|
||||
## Potential dependencies.
|
||||
# arc-swap = { version = "1.6.0" } # Atomically swappable Arc<T> | https://github.com/vorner/arc-swap
|
||||
|
|
|
@ -7,34 +7,14 @@ license = "MIT"
|
|||
authors = ["Boog900"]
|
||||
repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
binaries = [
|
||||
"tokio/rt-multi-thread",
|
||||
"tokio/macros",
|
||||
"tower/retry",
|
||||
"tower/balance",
|
||||
"tower/buffer",
|
||||
"tower/timeout",
|
||||
"monero-serai/http-rpc",
|
||||
"dep:tracing-subscriber",
|
||||
"dep:serde_json",
|
||||
"dep:serde",
|
||||
"dep:epee-encoding",
|
||||
"dep:monero-wire",
|
||||
"dep:borsh",
|
||||
"dep:dirs",
|
||||
"dep:clap"
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] }
|
||||
monero-consensus = {path = "./rules", features = ["rayon"]}
|
||||
cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
|
||||
|
||||
thiserror = { workspace = true }
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
futures = { workspace = true, features = ["std"] }
|
||||
futures = { workspace = true, features = ["std", "async-await"] }
|
||||
|
||||
randomx-rs = { workspace = true }
|
||||
monero-serai = { workspace = true, features = ["std"] }
|
||||
|
@ -47,23 +27,13 @@ thread_local = { workspace = true }
|
|||
tokio = { workspace = true, features = ["rt"] }
|
||||
tokio-util = { workspace = true }
|
||||
|
||||
hex = "0.4"
|
||||
|
||||
# used in binaries
|
||||
monero-wire = {path="../net/monero-wire", optional = true}
|
||||
epee-encoding = { path="../net/epee-encoding" , optional = true}
|
||||
serde_json = {version = "1", optional = true}
|
||||
serde = {version = "1", optional = true, features = ["derive"]}
|
||||
tracing-subscriber = {version = "0.3", optional = true}
|
||||
borsh = { workspace = true, optional = true}
|
||||
dirs = {version="5.0", optional = true}
|
||||
clap = { version = "4.4.8", optional = true, features = ["derive"] }
|
||||
# here to help cargo to pick a version - remove me
|
||||
syn = "2.0.37"
|
||||
|
||||
hex = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
monero-consensus = {path = "./rules", features = ["proptest"]}
|
||||
cuprate-test-utils = { path = "../test-utils" }
|
||||
cuprate-consensus-rules = {path = "./rules", features = ["proptest"]}
|
||||
|
||||
hex-literal = { workspace = true }
|
||||
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
||||
proptest = { workspace = true }
|
||||
|
|
|
@ -1,37 +1,14 @@
|
|||
# Consensus Rules
|
||||
|
||||
This folder contains 2 crates: `monero-consensus` (rules) and `cuprate-consensus`. `monero-consensus` contains the raw-rules
|
||||
and is built to be a more flexible library which requires the user to give the correct data and do minimal calculations, `cuprate-consensus`
|
||||
on the other hand contains multiple tower::Services that handle tx/ block verification as a whole with a `context` service that
|
||||
keeps track of blockchain state. `cuprate-consensus` uses `monero-consensus` internally.
|
||||
This folder contains 2 crates:
|
||||
- `cuprate-consensus-rules` (`rules/` directory)
|
||||
- `cuprate-consensus`
|
||||
|
||||
If you are looking to use monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back to
|
||||
`monero-consensus` if you need more flexibility.
|
||||
`cuprate-consensus-rules` contains the raw-rules and is built to be a more flexible library which requires the user
|
||||
to give the correct data and do minimal calculations.
|
||||
|
||||
## scan_chain
|
||||
`cuprate-consensus` on the other hand contains multiple `tower::Service`s that handle transaction/block verification as a
|
||||
whole with a `context` service that keeps track of blockchain state. `cuprate-consensus` uses `cuprate-consensus-rules` internally.
|
||||
|
||||
`cuprate-consensus` contains a binary,`scan_chain`, which uses multiple RPC connections to scan the blockchain and verify it against the
|
||||
consensus rules. It keeps track of minimal data and uses the RPC connection to get blocks/transactions/outputs.
|
||||
|
||||
`scan_chain` was not built for wide usage, so you may find issues, if you do, open an issue in Cuprates issue tracker and or join our matrix
|
||||
room for help. `scan_chain` has only been verified on `x86_64-unknown-linux-gnu`.
|
||||
|
||||
`scan_chain` will take at least a day for stagenet and testnet and 6 for mainnet but expect it to be longer. If you are just looking to verify
|
||||
previous transactions it may be worth using `monerod` with `--fast-block-sync 0` this will probably be faster to complete and you will have a
|
||||
usable node at the end!
|
||||
|
||||
### How to run
|
||||
|
||||
First you will need to install Rust/Cargo: https://www.rust-lang.org/tools/install
|
||||
|
||||
Next you need to clone Cuprates git repo, enter the root of Cuprate, then run:
|
||||
|
||||
```
|
||||
cargo run --bin scan_chain -r
|
||||
```
|
||||
|
||||
If you want to pass in options you need to add `--` then the option(s), so to list the options do:
|
||||
|
||||
```
|
||||
cargo run --bin scan_chain -r -- --help
|
||||
```
|
||||
If you are looking to use Monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back
|
||||
to `cuprate-consensus-rules` if you need more flexibility.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[package]
|
||||
name = "monero-consensus"
|
||||
name = "cuprate-consensus-rules"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use crypto_bigint::{CheckedMul, U256};
|
||||
use monero_serai::block::Block;
|
||||
|
||||
|
@ -196,12 +198,13 @@ fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockErro
|
|||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#no-duplicate-transactions>
|
||||
fn check_txs_unique(txs: &[[u8; 32]]) -> Result<(), BlockError> {
|
||||
txs.windows(2).try_for_each(|window| {
|
||||
if window[0] == window[1] {
|
||||
Err(BlockError::DuplicateTransaction)?;
|
||||
}
|
||||
let set = txs.iter().collect::<HashSet<_>>();
|
||||
|
||||
if set.len() == txs.len() {
|
||||
Ok(())
|
||||
})
|
||||
} else {
|
||||
Err(BlockError::DuplicateTransaction)
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct contains the data needed to verify a block, implementers MUST make sure
|
||||
|
@ -275,3 +278,28 @@ pub fn check_block(
|
|||
|
||||
Ok((vote, generated_coins))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
|
||||
use super::*;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_check_unique_txs(
|
||||
mut txs in vec(any::<[u8; 32]>(), 2..3000),
|
||||
duplicate in any::<[u8; 32]>(),
|
||||
dup_idx_1 in any::<usize>(),
|
||||
dup_idx_2 in any::<usize>(),
|
||||
) {
|
||||
|
||||
prop_assert!(check_txs_unique(&txs).is_ok());
|
||||
|
||||
txs.insert(dup_idx_1 % txs.len(), duplicate);
|
||||
txs.insert(dup_idx_2 % txs.len(), duplicate);
|
||||
|
||||
prop_assert!(check_txs_unique(&txs).is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,12 @@ use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS};
|
|||
|
||||
const TEST_WINDOW_SIZE: u64 = 25;
|
||||
|
||||
#[test]
|
||||
fn target_block_time() {
|
||||
assert_eq!(HardFork::V1.block_time().as_secs(), 60);
|
||||
assert_eq!(HardFork::V2.block_time().as_secs(), 120);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_hard_forks() {
|
||||
let mut prev = HardFork::V1;
|
||||
|
|
|
@ -207,3 +207,17 @@ pub fn check_miner_tx(
|
|||
|
||||
check_total_output_amt(total_outs, reward, total_fees, hf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn tail_emission(generated_coins in any::<u64>(), hf in any::<HardFork>()) {
|
||||
prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use monero_serai::ringct::RctType;
|
||||
use std::{cmp::Ordering, collections::HashSet, sync::Arc};
|
||||
|
||||
use monero_serai::transaction::{Input, Output, Timelock, Transaction};
|
||||
use multiexp::BatchVerifier;
|
||||
|
@ -11,6 +12,8 @@ use crate::{
|
|||
mod contextual_data;
|
||||
mod ring_ct;
|
||||
mod ring_signatures;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use contextual_data::*;
|
||||
pub use ring_ct::RingCTError;
|
||||
|
@ -57,7 +60,7 @@ pub enum TransactionError {
|
|||
#[error("The transaction inputs are not ordered.")]
|
||||
InputsAreNotOrdered,
|
||||
#[error("The transaction spends a decoy which is too young.")]
|
||||
OneOrMoreDecoysLocked,
|
||||
OneOrMoreRingMembersLocked,
|
||||
#[error("The transaction inputs overflow.")]
|
||||
InputsOverflow,
|
||||
#[error("The transaction has no inputs.")]
|
||||
|
@ -124,7 +127,7 @@ pub(crate) fn check_output_types(
|
|||
) -> Result<(), TransactionError> {
|
||||
if hf == &HardFork::V15 {
|
||||
for outs in outputs.windows(2) {
|
||||
if outs[0].view_tag.is_some() != outs[0].view_tag.is_some() {
|
||||
if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() {
|
||||
return Err(TransactionError::OutputTypeInvalid);
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +216,10 @@ fn check_number_of_outputs(
|
|||
}
|
||||
|
||||
match rct_type {
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::BulletproofsPlus => {
|
||||
RctType::Bulletproofs
|
||||
| RctType::BulletproofsCompactAmount
|
||||
| RctType::Clsag
|
||||
| RctType::BulletproofsPlus => {
|
||||
if outputs <= MAX_BULLETPROOFS_OUTPUTS {
|
||||
Ok(())
|
||||
} else {
|
||||
|
@ -247,7 +253,7 @@ fn check_outputs_semantics(
|
|||
/// Checks if an outputs unlock time has passed.
|
||||
///
|
||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
||||
fn output_unlocked(
|
||||
pub fn output_unlocked(
|
||||
time_lock: &Timelock,
|
||||
current_chain_height: u64,
|
||||
current_time_lock_timestamp: u64,
|
||||
|
@ -272,7 +278,7 @@ fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool
|
|||
unlock_height <= current_chain_height
|
||||
}
|
||||
|
||||
/// Returns if a locked output, which uses a block height, can be spend.
|
||||
/// Returns if a locked output, which uses a block height, can be spent.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp>
|
||||
fn check_timestamp_time_lock(
|
||||
|
@ -303,7 +309,7 @@ fn check_all_time_locks(
|
|||
hf,
|
||||
) {
|
||||
tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}.");
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -316,7 +322,7 @@ fn check_all_time_locks(
|
|||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys>
|
||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys>
|
||||
fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
|
||||
pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
|
||||
if hf == &HardFork::V15 {
|
||||
// Hard-fork 15 allows both v14 and v16 rules
|
||||
return check_decoy_info(decoy_info, &HardFork::V14)
|
||||
|
@ -347,26 +353,16 @@ fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Transac
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks the inputs key images for torsion and for duplicates in the spent_kis list.
|
||||
/// Checks the inputs key images for torsion.
|
||||
///
|
||||
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions
|
||||
/// key images, for example transactions in a block. The chain will be checked for duplicates later.
|
||||
///
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>
|
||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
|
||||
fn check_key_images(
|
||||
input: &Input,
|
||||
spent_kis: &mut HashSet<[u8; 32]>,
|
||||
) -> Result<(), TransactionError> {
|
||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
|
||||
fn check_key_images(input: &Input) -> Result<(), TransactionError> {
|
||||
match input {
|
||||
Input::ToKey { key_image, .. } => {
|
||||
// this happens in monero-serai but we may as well duplicate the check.
|
||||
if !key_image.is_torsion_free() {
|
||||
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
|
||||
}
|
||||
if !spent_kis.insert(key_image.compress().to_bytes()) {
|
||||
return Err(TransactionError::KeyImageSpent);
|
||||
}
|
||||
}
|
||||
_ => Err(TransactionError::IncorrectInputType)?,
|
||||
}
|
||||
|
@ -455,7 +451,7 @@ fn check_10_block_lock(
|
|||
tracing::debug!(
|
||||
"Transaction invalid: One or more ring members younger than 10 blocks."
|
||||
);
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -510,23 +506,19 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result<u64, Transa
|
|||
///
|
||||
/// Contextual rules are rules that require blockchain context to check.
|
||||
///
|
||||
/// This function does not check signatures.
|
||||
///
|
||||
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions
|
||||
/// key images, for example transactions in a block. The chain should be checked for duplicates later.
|
||||
/// This function does not check signatures or for duplicate key-images.
|
||||
fn check_inputs_contextual(
|
||||
inputs: &[Input],
|
||||
tx_ring_members_info: &TxRingMembersInfo,
|
||||
current_chain_height: u64,
|
||||
hf: &HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
||||
// When picking ring members monerod will only look in the DB at past blocks so an output has to be younger
|
||||
// than this transaction to be used in this tx.
|
||||
if tx_ring_members_info.youngest_used_out_height >= current_chain_height {
|
||||
tracing::debug!("Transaction invalid: One or more ring members too young.");
|
||||
Err(TransactionError::OneOrMoreDecoysLocked)?;
|
||||
Err(TransactionError::OneOrMoreRingMembersLocked)?;
|
||||
}
|
||||
|
||||
check_10_block_lock(
|
||||
|
@ -541,11 +533,9 @@ fn check_inputs_contextual(
|
|||
assert_eq!(hf, &HardFork::V1);
|
||||
}
|
||||
|
||||
let mut spent_kis_lock = spent_kis.lock().unwrap();
|
||||
for input in inputs {
|
||||
check_key_images(input, &mut spent_kis_lock)?;
|
||||
check_key_images(input)?;
|
||||
}
|
||||
drop(spent_kis_lock);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -608,7 +598,7 @@ fn transaction_weight_limit(hf: &HardFork) -> usize {
|
|||
/// - The tx-pool will use the current hard-fork
|
||||
/// - When syncing the hard-fork is in the block header.
|
||||
///
|
||||
/// To fully verify a transaction this must be accompanied with [`check_transaction_contextual`]
|
||||
/// To fully verify a transaction this must be accompanied by [`check_transaction_contextual`]
|
||||
///
|
||||
pub fn check_transaction_semantic(
|
||||
tx: &Transaction,
|
||||
|
@ -655,9 +645,11 @@ pub fn check_transaction_semantic(
|
|||
|
||||
/// Checks the transaction is contextually valid.
|
||||
///
|
||||
/// To fully verify a transaction this must be accompanied with [`check_transaction_semantic`]
|
||||
/// To fully verify a transaction this must be accompanied by [`check_transaction_semantic`].
|
||||
///
|
||||
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
/// This function also does _not_ check for duplicate key-images: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>.
|
||||
///
|
||||
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>.
|
||||
|
||||
pub fn check_transaction_contextual(
|
||||
tx: &Transaction,
|
||||
|
@ -665,7 +657,6 @@ pub fn check_transaction_contextual(
|
|||
current_chain_height: u64,
|
||||
current_time_lock_timestamp: u64,
|
||||
hf: &HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
let tx_version = TxVersion::from_raw(tx.prefix.version)
|
||||
.ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||
|
@ -675,7 +666,6 @@ pub fn check_transaction_contextual(
|
|||
tx_ring_members_info,
|
||||
current_chain_height,
|
||||
hf,
|
||||
spent_kis,
|
||||
)?;
|
||||
check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?;
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ use monero_serai::transaction::{Input, Timelock};
|
|||
use crate::{transactions::TransactionError, HardFork, TxVersion};
|
||||
|
||||
/// An already approved previous transaction output.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct OutputOnChain {
|
||||
pub height: u64,
|
||||
pub time_lock: Timelock,
|
||||
|
@ -67,10 +67,10 @@ pub fn insert_ring_member_ids(
|
|||
/// Get the ring members for the inputs from the outputs on the chain.
|
||||
///
|
||||
/// Will error if `outputs` does not contain the outputs needed.
|
||||
pub fn get_ring_members_for_inputs<'a>(
|
||||
get_outputs: impl Fn(u64, u64) -> Option<&'a OutputOnChain>,
|
||||
pub fn get_ring_members_for_inputs(
|
||||
get_outputs: impl Fn(u64, u64) -> Option<OutputOnChain>,
|
||||
inputs: &[Input],
|
||||
) -> Result<Vec<Vec<&'a OutputOnChain>>, TransactionError> {
|
||||
) -> Result<Vec<Vec<OutputOnChain>>, TransactionError> {
|
||||
inputs
|
||||
.iter()
|
||||
.map(|inp| match inp {
|
||||
|
@ -105,7 +105,7 @@ pub enum Rings {
|
|||
impl Rings {
|
||||
/// Builds the rings for the transaction inputs, from the given outputs.
|
||||
fn new(
|
||||
outputs: Vec<Vec<&OutputOnChain>>,
|
||||
outputs: Vec<Vec<OutputOnChain>>,
|
||||
tx_version: TxVersion,
|
||||
) -> Result<Rings, TransactionError> {
|
||||
Ok(match tx_version {
|
||||
|
@ -141,7 +141,7 @@ impl Rings {
|
|||
}
|
||||
}
|
||||
|
||||
/// Information on the outputs the transaction is is referencing for inputs (ring members).
|
||||
/// Information on the outputs the transaction is referencing for inputs (ring members).
|
||||
#[derive(Debug)]
|
||||
pub struct TxRingMembersInfo {
|
||||
pub rings: Rings,
|
||||
|
@ -149,7 +149,6 @@ pub struct TxRingMembersInfo {
|
|||
pub decoy_info: Option<DecoyInfo>,
|
||||
pub youngest_used_out_height: u64,
|
||||
pub time_locked_outs: Vec<Timelock>,
|
||||
pub hf: HardFork,
|
||||
}
|
||||
|
||||
impl TxRingMembersInfo {
|
||||
|
@ -157,10 +156,9 @@ impl TxRingMembersInfo {
|
|||
///
|
||||
/// The used outs must be all the ring members used in the transactions inputs.
|
||||
pub fn new(
|
||||
used_outs: Vec<Vec<&OutputOnChain>>,
|
||||
used_outs: Vec<Vec<OutputOnChain>>,
|
||||
decoy_info: Option<DecoyInfo>,
|
||||
tx_version: TxVersion,
|
||||
hf: HardFork,
|
||||
) -> Result<TxRingMembersInfo, TransactionError> {
|
||||
Ok(TxRingMembersInfo {
|
||||
youngest_used_out_height: used_outs
|
||||
|
@ -187,7 +185,6 @@ impl TxRingMembersInfo {
|
|||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect(),
|
||||
hf,
|
||||
rings: Rings::new(used_outs, tx_version)?,
|
||||
decoy_info,
|
||||
})
|
||||
|
@ -202,7 +199,7 @@ impl TxRingMembersInfo {
|
|||
/// - The top block hash is the same as when this data was retrieved (the blockchain state is unchanged).
|
||||
///
|
||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html>
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct DecoyInfo {
|
||||
/// The number of inputs that have enough outputs on the chain to mix with.
|
||||
pub mixable: usize,
|
||||
|
@ -229,7 +226,7 @@ impl DecoyInfo {
|
|||
///
|
||||
pub fn new(
|
||||
inputs: &[Input],
|
||||
outputs_with_amount: &HashMap<u64, usize>,
|
||||
outputs_with_amount: impl Fn(u64) -> usize,
|
||||
hf: &HardFork,
|
||||
) -> Result<DecoyInfo, TransactionError> {
|
||||
let mut min_decoys = usize::MAX;
|
||||
|
@ -247,9 +244,7 @@ impl DecoyInfo {
|
|||
..
|
||||
} => {
|
||||
if let Some(amount) = amount {
|
||||
let outs_with_amt = *outputs_with_amount
|
||||
.get(amount)
|
||||
.expect("outputs_with_amount does not include needed amount.");
|
||||
let outs_with_amt = outputs_with_amount(*amount);
|
||||
|
||||
// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html#mixable-and-unmixable-inputs>
|
||||
if outs_with_amt <= minimum_decoys {
|
||||
|
|
|
@ -154,6 +154,13 @@ pub(crate) fn check_input_signatures(
|
|||
Err(RingCTError::RingInvalid)?;
|
||||
}
|
||||
|
||||
let pseudo_outs = match &rct_sig.prunable {
|
||||
RctPrunable::MlsagBulletproofs { pseudo_outs, .. }
|
||||
| RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(),
|
||||
RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(),
|
||||
RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[],
|
||||
};
|
||||
|
||||
match &rct_sig.prunable {
|
||||
RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?,
|
||||
RctPrunable::AggregateMlsagBorromean { mlsag, .. } => {
|
||||
|
@ -174,7 +181,7 @@ pub(crate) fn check_input_signatures(
|
|||
}
|
||||
RctPrunable::MlsagBorromean { mlsags, .. }
|
||||
| RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags)
|
||||
.zip(&rct_sig.base.pseudo_outs)
|
||||
.zip(pseudo_outs)
|
||||
.zip(inputs)
|
||||
.zip(rings)
|
||||
.try_for_each(|(((mlsag, pseudo_out), input), ring)| {
|
||||
|
@ -189,7 +196,7 @@ pub(crate) fn check_input_signatures(
|
|||
)?)
|
||||
}),
|
||||
RctPrunable::Clsag { clsags, .. } => try_par_iter(clsags)
|
||||
.zip(&rct_sig.base.pseudo_outs)
|
||||
.zip(pseudo_outs)
|
||||
.zip(inputs)
|
||||
.zip(rings)
|
||||
.try_for_each(|(((clsags, pseudo_out), input), ring)| {
|
||||
|
|
298
consensus/rules/src/transactions/tests.rs
Normal file
298
consensus/rules/src/transactions/tests.rs
Normal file
|
@ -0,0 +1,298 @@
|
|||
use std::ops::Range;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_POINT, EIGHT_TORSION},
|
||||
edwards::CompressedEdwardsY,
|
||||
EdwardsPoint,
|
||||
};
|
||||
use proptest::{collection::vec, prelude::*};
|
||||
|
||||
use monero_serai::transaction::Output;
|
||||
|
||||
use super::*;
|
||||
use crate::decomposed_amount::decomposed_amounts;
|
||||
|
||||
#[test]
|
||||
fn test_check_output_amount_v1() {
|
||||
for amount in decomposed_amounts() {
|
||||
assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok())
|
||||
}
|
||||
|
||||
proptest!(|(amount in any::<u64>().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| {
|
||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err());
|
||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok())
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum_outputs() {
|
||||
let mut output_10 = Output {
|
||||
key: CompressedEdwardsY([0; 32]),
|
||||
amount: None,
|
||||
view_tag: None,
|
||||
};
|
||||
|
||||
output_10.amount = Some(10);
|
||||
|
||||
let mut outputs_20 = output_10.clone();
|
||||
outputs_20.amount = Some(20);
|
||||
|
||||
let outs = [output_10, outputs_20];
|
||||
|
||||
let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap();
|
||||
assert_eq!(sum, 30);
|
||||
|
||||
assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decoy_info() {
|
||||
let decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V8),
|
||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err());
|
||||
|
||||
let mut decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V8) - 1,
|
||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
||||
|
||||
decoy_info.not_mixable = 1;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
||||
|
||||
decoy_info.mixable = 2;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
||||
|
||||
let mut decoy_info = DecoyInfo {
|
||||
mixable: 0,
|
||||
not_mixable: 0,
|
||||
min_decoys: minimum_decoys(&HardFork::V12),
|
||||
max_decoys: minimum_decoys(&HardFork::V12) + 1,
|
||||
};
|
||||
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err());
|
||||
|
||||
decoy_info.max_decoys = decoy_info.min_decoys;
|
||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_torsion_ki() {
|
||||
for &key_image in EIGHT_TORSION[1..].iter() {
|
||||
assert!(check_key_images(&Input::ToKey {
|
||||
key_image,
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
})
|
||||
.is_err())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a strategy that resolves to a [`RctType`] that uses
|
||||
/// BPs(+).
|
||||
#[allow(unreachable_code)]
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
fn bulletproof_rct_type() -> BoxedStrategy<RctType> {
|
||||
return prop_oneof![
|
||||
Just(RctType::Bulletproofs),
|
||||
Just(RctType::BulletproofsCompactAmount),
|
||||
Just(RctType::Clsag),
|
||||
Just(RctType::BulletproofsPlus),
|
||||
]
|
||||
.boxed();
|
||||
|
||||
// Here to make sure this is updated when needed.
|
||||
match unreachable!() {
|
||||
RctType::Null => {}
|
||||
RctType::MlsagAggregate => {}
|
||||
RctType::MlsagIndividual => {}
|
||||
RctType::Bulletproofs => {}
|
||||
RctType::BulletproofsCompactAmount => {}
|
||||
RctType::Clsag => {}
|
||||
RctType::BulletproofsPlus => {}
|
||||
};
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a valid prime-order point.
|
||||
fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint {
|
||||
EdwardsPoint::mul_base_clamped(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a valid torsioned point.
|
||||
fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint {
|
||||
point + curve25519_dalek::constants::EIGHT_TORSION[torsion]
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a random [`Output`].
|
||||
///
|
||||
/// `key` is always valid.
|
||||
fn random_out(rct: bool, view_tagged: bool)(
|
||||
point in random_point(),
|
||||
amount in any::<u64>(),
|
||||
view_tag in any::<u8>(),
|
||||
) -> Output {
|
||||
Output {
|
||||
amount: if rct { None } else { Some(amount) },
|
||||
key: point.compress(),
|
||||
view_tag: if view_tagged { Some(view_tag) } else { None },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a random [`Output`].
|
||||
///
|
||||
/// `key` is always valid but torsioned.
|
||||
fn random_torsioned_out(rct: bool, view_tagged: bool)(
|
||||
point in random_torsioned_point(),
|
||||
amount in any::<u64>(),
|
||||
view_tag in any::<u8>(),
|
||||
) -> Output {
|
||||
Output {
|
||||
amount: if rct { None } else { Some(amount) },
|
||||
key: point.compress(),
|
||||
view_tag: if view_tagged { Some(view_tag) } else { None },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`HardFork`] in a specific range.
|
||||
fn hf_in_range(range: Range<u8>)(
|
||||
hf in range,
|
||||
) -> HardFork {
|
||||
HardFork::from_version(hf).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`Timelock`] that is locked given a height and time.
|
||||
fn locked_timelock(height: u64, time_for_time_lock: u64)(
|
||||
timebased in any::<bool>(),
|
||||
lock_height in (height+1)..500_000_001,
|
||||
time_for_time_lock in (time_for_time_lock+121)..,
|
||||
) -> Timelock {
|
||||
if timebased || lock_height > 500_000_000 {
|
||||
Timelock::Time(time_for_time_lock)
|
||||
} else {
|
||||
Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
/// Returns a [`Timelock`] that is unlocked given a height and time.
|
||||
fn unlocked_timelock(height: u64, time_for_time_lock: u64)(
|
||||
ty in 0..3,
|
||||
lock_height in 0..(height+1),
|
||||
time_for_time_lock in 0..(time_for_time_lock+121),
|
||||
) -> Timelock {
|
||||
match ty {
|
||||
0 => Timelock::None,
|
||||
1 => Timelock::Time(time_for_time_lock),
|
||||
_ => Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_check_output_keys(
|
||||
outs in vec(random_out(true, true), 0..16),
|
||||
torsioned_outs in vec(random_torsioned_out(false, true), 0..16)
|
||||
) {
|
||||
prop_assert!(check_output_keys(&outs).is_ok());
|
||||
prop_assert!(check_output_keys(&torsioned_outs).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn output_types(
|
||||
mut view_tag_outs in vec(random_out(true, true), 1..16),
|
||||
mut non_view_tag_outs in vec(random_out(true, false), 1..16),
|
||||
hf_no_view_tags in hf_in_range(1..14),
|
||||
hf_view_tags in hf_in_range(16..17),
|
||||
) {
|
||||
prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok());
|
||||
prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err());
|
||||
|
||||
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok());
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err());
|
||||
|
||||
prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok());
|
||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok());
|
||||
view_tag_outs.append(&mut non_view_tag_outs);
|
||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) {
|
||||
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) {
|
||||
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_output_amount_v2(amt in 1..u64::MAX) {
|
||||
prop_assert!(check_output_amount_v2(amt).is_err());
|
||||
prop_assert!(check_output_amount_v2(0).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_unlock_time(height in 1..u64::MAX) {
|
||||
prop_assert!(check_block_time_lock(height, height));
|
||||
prop_assert!(!check_block_time_lock(height, height - 1));
|
||||
prop_assert!(check_block_time_lock(height, height+1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) {
|
||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16));
|
||||
prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16));
|
||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_time_locks(
|
||||
mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50),
|
||||
mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50)
|
||||
) {
|
||||
assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok());
|
||||
|
||||
unlocked_locks.append(&mut locked_locks);
|
||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_input_has_decoys(key_offsets in vec(any::<u64>(), 1..10_000)) {
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
amount: None,
|
||||
key_offsets,
|
||||
}).is_ok());
|
||||
|
||||
assert!(check_input_has_decoys(&Input::ToKey {
|
||||
key_image: ED25519_BASEPOINT_POINT,
|
||||
amount: None,
|
||||
key_offsets: vec![],
|
||||
}).is_err());
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
use std::cell::UnsafeCell;
|
||||
use std::{cell::RefCell, ops::DerefMut};
|
||||
|
||||
use multiexp::BatchVerifier as InternalBatchVerifier;
|
||||
use rayon::prelude::*;
|
||||
|
@ -8,7 +8,7 @@ use crate::ConsensusError;
|
|||
|
||||
/// A multithreaded batch verifier.
|
||||
pub struct MultiThreadedBatchVerifier {
|
||||
internal: ThreadLocal<UnsafeCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
|
||||
internal: ThreadLocal<RefCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
|
||||
}
|
||||
|
||||
impl MultiThreadedBatchVerifier {
|
||||
|
@ -27,19 +27,15 @@ impl MultiThreadedBatchVerifier {
|
|||
) -> Result<R, ConsensusError> {
|
||||
let verifier_cell = self
|
||||
.internal
|
||||
.get_or(|| UnsafeCell::new(InternalBatchVerifier::new(0)));
|
||||
// SAFETY: This is safe for 2 reasons:
|
||||
// 1. each thread gets a different batch verifier.
|
||||
// 2. only this function `queue_statement` will get the inner batch verifier, it's private.
|
||||
//
|
||||
// TODO: it's probably ok to just use RefCell
|
||||
stmt(unsafe { &mut *verifier_cell.get() })
|
||||
.get_or(|| RefCell::new(InternalBatchVerifier::new(8)));
|
||||
// TODO: this is not ok as a rayon par_iter could be called in stmt.
|
||||
stmt(verifier_cell.borrow_mut().deref_mut())
|
||||
}
|
||||
|
||||
pub fn verify(self) -> bool {
|
||||
self.internal
|
||||
.into_iter()
|
||||
.map(UnsafeCell::into_inner)
|
||||
.map(RefCell::into_inner)
|
||||
.par_bridge()
|
||||
.find_any(|batch_verifier| !batch_verifier.verify_vartime())
|
||||
.is_none()
|
||||
|
|
|
@ -1,374 +0,0 @@
|
|||
#[cfg(feature = "binaries")]
|
||||
mod bin {
|
||||
use std::{ops::Range, path::PathBuf, sync::Arc};
|
||||
|
||||
use clap::Parser;
|
||||
use futures::{channel::mpsc, SinkExt, StreamExt};
|
||||
use monero_serai::{block::Block, transaction::Transaction};
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
|
||||
use cuprate_helper::network::Network;
|
||||
|
||||
use cuprate_consensus::{
|
||||
context::{
|
||||
BlockChainContextRequest, BlockChainContextResponse, ContextConfig,
|
||||
UpdateBlockchainCacheData,
|
||||
},
|
||||
initialize_blockchain_context, initialize_verifier,
|
||||
rpc::{cache::ScanningCache, init_rpc_load_balancer, RpcConfig},
|
||||
Database, DatabaseRequest, DatabaseResponse, VerifiedBlockInformation, VerifyBlockRequest,
|
||||
VerifyBlockResponse,
|
||||
};
|
||||
|
||||
const MAX_BLOCKS_IN_RANGE: u64 = 500;
|
||||
const BATCHES_IN_REQUEST: u64 = 3;
|
||||
const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 1000;
|
||||
|
||||
/// Calls for a batch of blocks, returning the response and the time it took.
|
||||
async fn call_batch<D: Database>(
|
||||
range: Range<u64>,
|
||||
database: D,
|
||||
) -> Result<DatabaseResponse, tower::BoxError> {
|
||||
database
|
||||
.oneshot(DatabaseRequest::BlockBatchInRange(range))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn update_cache_and_context<Ctx>(
|
||||
cache: &RwLock<ScanningCache>,
|
||||
context_updater: &mut Ctx,
|
||||
verified_block_info: VerifiedBlockInformation,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
Ctx: tower::Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
{
|
||||
// add the new block to the cache
|
||||
cache.write().await.add_new_block_data(
|
||||
verified_block_info.generated_coins,
|
||||
&verified_block_info.block.miner_tx,
|
||||
&verified_block_info.txs,
|
||||
);
|
||||
// update the chain context svc with the new block
|
||||
context_updater
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::Update(
|
||||
UpdateBlockchainCacheData {
|
||||
new_top_hash: verified_block_info.block_hash,
|
||||
height: verified_block_info.height,
|
||||
timestamp: verified_block_info.block.header.timestamp,
|
||||
weight: verified_block_info.weight,
|
||||
long_term_weight: verified_block_info.long_term_weight,
|
||||
vote: verified_block_info.hf_vote,
|
||||
generated_coins: verified_block_info.generated_coins,
|
||||
cumulative_difficulty: verified_block_info.cumulative_difficulty,
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn call_blocks<D>(
|
||||
mut block_chan: mpsc::Sender<Vec<(Block, Vec<Transaction>)>>,
|
||||
start_height: u64,
|
||||
chain_height: u64,
|
||||
database: D,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let mut next_fut = tokio::spawn(call_batch(
|
||||
start_height
|
||||
..(start_height + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST)).min(chain_height),
|
||||
database.clone(),
|
||||
));
|
||||
|
||||
for next_batch_start in (start_height..chain_height)
|
||||
.step_by((MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST) as usize)
|
||||
.skip(1)
|
||||
{
|
||||
// Call the next batch while we handle this batch.
|
||||
let current_fut = std::mem::replace(
|
||||
&mut next_fut,
|
||||
tokio::spawn(call_batch(
|
||||
next_batch_start
|
||||
..(next_batch_start + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))
|
||||
.min(chain_height),
|
||||
database.clone(),
|
||||
)),
|
||||
);
|
||||
|
||||
let DatabaseResponse::BlockBatchInRange(blocks) = current_fut.await?? else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
"Got batch: {:?}, chain height: {}",
|
||||
(next_batch_start - (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))..(next_batch_start),
|
||||
chain_height
|
||||
);
|
||||
|
||||
block_chan.send(blocks).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn scan_chain<D>(
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
save_file: PathBuf,
|
||||
_rpc_config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
database: D,
|
||||
net: Network,
|
||||
) -> Result<(), tower::BoxError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
tracing::info!("Beginning chain scan");
|
||||
|
||||
// TODO: when we implement all rules use the RPCs chain height, for now we don't check v2 txs.
|
||||
let chain_height = 3_152_725;
|
||||
|
||||
tracing::info!("scanning to chain height: {}", chain_height);
|
||||
|
||||
let config = match net {
|
||||
Network::Mainnet => ContextConfig::main_net(),
|
||||
Network::Stagenet => ContextConfig::stage_net(),
|
||||
Network::Testnet => ContextConfig::test_net(),
|
||||
};
|
||||
|
||||
let mut ctx_svc = initialize_blockchain_context(config, database.clone()).await?;
|
||||
|
||||
let (mut block_verifier, _) =
|
||||
initialize_verifier(database.clone(), ctx_svc.clone()).await?;
|
||||
|
||||
let start_height = cache.read().await.height;
|
||||
|
||||
let (block_tx, mut incoming_blocks) = mpsc::channel(3);
|
||||
|
||||
tokio::spawn(
|
||||
async move { call_blocks(block_tx, start_height, chain_height, database).await },
|
||||
);
|
||||
|
||||
while let Some(incoming_blocks) = incoming_blocks.next().await {
|
||||
let VerifyBlockResponse::MainChainBatchPrep(blocks, txs) = block_verifier
|
||||
.ready()
|
||||
.await?
|
||||
.call(VerifyBlockRequest::MainChainBatchPrep(incoming_blocks))
|
||||
.await?
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
|
||||
let mut height;
|
||||
for (block, txs) in blocks.into_iter().zip(txs) {
|
||||
let VerifyBlockResponse::MainChain(verified_block_info) = block_verifier
|
||||
.ready()
|
||||
.await?
|
||||
.call(VerifyBlockRequest::MainChainPrepared(block, txs))
|
||||
.await?
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
|
||||
height = verified_block_info.height;
|
||||
|
||||
if verified_block_info.height % 5000 == 0 {
|
||||
tracing::info!("saving cache to: {}", save_file.display());
|
||||
cache.write().await.save(&save_file).unwrap();
|
||||
}
|
||||
|
||||
update_cache_and_context(&cache, &mut ctx_svc, verified_block_info).await?;
|
||||
|
||||
if height % 200 == 0 {
|
||||
tracing::info!(
|
||||
"verified blocks: {:?}, chain height: {}",
|
||||
0..height,
|
||||
chain_height
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
struct Args {
|
||||
/// The log level, valid values:
|
||||
/// "off", "error", "warn", "info", "debug", "trace", or a number 0-5.
|
||||
#[arg(short, long, default_value = "info")]
|
||||
log_level: LevelFilter,
|
||||
/// The network we should scan, valid values:
|
||||
/// "mainnet", "testnet", "stagenet".
|
||||
#[arg(short, long, default_value = "mainnet")]
|
||||
network: String,
|
||||
/// A list of RPC nodes we should use.
|
||||
/// Example: <http://xmr-node.cakewallet.com:18081>
|
||||
#[arg(long)]
|
||||
rpc_nodes: Vec<String>,
|
||||
/// Stops the scanner from including the default list of nodes, this is not
|
||||
/// recommended unless you have sufficient self defined nodes with `rpc_nodes`
|
||||
#[arg(long)]
|
||||
dont_use_default_nodes: bool,
|
||||
/// The directory/ folder to save the scanning cache in.
|
||||
/// This will default to your user cache directory.
|
||||
#[arg(long)]
|
||||
cache_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
pub async fn run() {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.dont_use_default_nodes & args.rpc_nodes.is_empty() {
|
||||
panic!("Can't run scanner with no RPC nodes, see `--help` ")
|
||||
}
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(args.log_level)
|
||||
.init();
|
||||
|
||||
let network = match args.network.as_str() {
|
||||
"mainnet" => Network::Mainnet,
|
||||
"testnet" => Network::Testnet,
|
||||
"stagenet" => Network::Stagenet,
|
||||
_ => panic!("Invalid network, scanner currently only supports mainnet"),
|
||||
};
|
||||
|
||||
let mut file_for_cache = match args.cache_dir {
|
||||
Some(dir) => dir,
|
||||
None => dirs::cache_dir().unwrap(),
|
||||
};
|
||||
|
||||
match network {
|
||||
Network::Mainnet => file_for_cache.push("cuprate_rpc_scanning_cache.bin"),
|
||||
Network::Stagenet => file_for_cache.push("cuprate_rpc_scanning_cache_stage_net.bin"),
|
||||
Network::Testnet => file_for_cache.push("cuprate_rpc_scanning_cache_test_net.bin"),
|
||||
}
|
||||
|
||||
let mut urls = if args.dont_use_default_nodes {
|
||||
vec![]
|
||||
} else {
|
||||
match network {
|
||||
Network::Mainnet => vec![
|
||||
"http://xmr-node.cakewallet.com:18081".to_string(),
|
||||
"https://node.sethforprivacy.com".to_string(),
|
||||
// "http://nodex.monerujo.io:18081".to_string(),
|
||||
"http://nodes.hashvault.pro:18081".to_string(),
|
||||
"http://node.c3pool.com:18081".to_string(),
|
||||
"http://node.trocador.app:18089".to_string(),
|
||||
"http://xmr.lukas.services:18089".to_string(),
|
||||
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
|
||||
"http://68.118.241.70:18089".to_string(),
|
||||
"http://145.239.97.211:18089".to_string(),
|
||||
//
|
||||
"http://xmr-node.cakewallet.com:18081".to_string(),
|
||||
"https://node.sethforprivacy.com".to_string(),
|
||||
// "http://nodex.monerujo.io:18081".to_string(),
|
||||
"http://nodes.hashvault.pro:18081".to_string(),
|
||||
"http://node.c3pool.com:18081".to_string(),
|
||||
"http://node.trocador.app:18089".to_string(),
|
||||
"http://xmr.lukas.services:18089".to_string(),
|
||||
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
|
||||
"http://68.118.241.70:18089".to_string(),
|
||||
"http://145.239.97.211:18089".to_string(),
|
||||
],
|
||||
Network::Testnet => vec![
|
||||
"http://testnet.xmr-tw.org:28081".to_string(),
|
||||
"http://node3.monerodevs.org:28089".to_string(),
|
||||
"http://node.monerodevs.org:28089".to_string(),
|
||||
"http://125.229.105.12:28081".to_string(),
|
||||
"http://node2.monerodevs.org:28089".to_string(),
|
||||
"https://testnet.xmr.ditatompel.com".to_string(),
|
||||
"http://singapore.node.xmr.pm:28081".to_string(),
|
||||
//
|
||||
"http://testnet.xmr-tw.org:28081".to_string(),
|
||||
"http://node3.monerodevs.org:28089".to_string(),
|
||||
"http://node.monerodevs.org:28089".to_string(),
|
||||
"http://125.229.105.12:28081".to_string(),
|
||||
"http://node2.monerodevs.org:28089".to_string(),
|
||||
"https://testnet.xmr.ditatompel.com".to_string(),
|
||||
"http://singapore.node.xmr.pm:28081".to_string(),
|
||||
],
|
||||
Network::Stagenet => vec![
|
||||
"http://125.229.105.12:38081".to_string(),
|
||||
"http://90.189.159.23:38089".to_string(),
|
||||
"http://stagenet.xmr-tw.org:38081".to_string(),
|
||||
"http://node.monerodevs.org:38089".to_string(),
|
||||
"http://stagenet.community.rino.io:38081".to_string(),
|
||||
"http://node2.monerodevs.org:38089".to_string(),
|
||||
"http://node3.monerodevs.org:38089".to_string(),
|
||||
"http://singapore.node.xmr.pm:38081".to_string(),
|
||||
"https://stagenet.xmr.ditatompel.com".to_string(),
|
||||
"http://3.10.182.182:38081".to_string(),
|
||||
//
|
||||
"http://125.229.105.12:38081".to_string(),
|
||||
"http://90.189.159.23:38089".to_string(),
|
||||
"http://stagenet.xmr-tw.org:38081".to_string(),
|
||||
"http://node.monerodevs.org:38089".to_string(),
|
||||
"http://stagenet.community.rino.io:38081".to_string(),
|
||||
"http://node2.monerodevs.org:38089".to_string(),
|
||||
"http://node3.monerodevs.org:38089".to_string(),
|
||||
"http://singapore.node.xmr.pm:38081".to_string(),
|
||||
"https://stagenet.xmr.ditatompel.com".to_string(),
|
||||
"http://3.10.182.182:38081".to_string(),
|
||||
],
|
||||
}
|
||||
};
|
||||
|
||||
urls.extend(args.rpc_nodes.into_iter());
|
||||
|
||||
let rpc_config = RpcConfig::new(MAX_BLOCKS_IN_RANGE, MAX_BLOCKS_HEADERS_IN_RANGE);
|
||||
let rpc_config = Arc::new(std::sync::RwLock::new(rpc_config));
|
||||
|
||||
tracing::info!("Attempting to open cache at: {}", file_for_cache.display());
|
||||
let cache = match ScanningCache::load(&file_for_cache) {
|
||||
Ok(cache) => {
|
||||
tracing::info!("Reloaded from cache, chain height: {}", cache.height);
|
||||
Arc::new(RwLock::new(cache))
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::warn!("Couldn't load from cache starting from scratch");
|
||||
let mut cache = ScanningCache::default();
|
||||
let genesis = monero_consensus::genesis::generate_genesis_block(&network);
|
||||
|
||||
let total_outs = genesis
|
||||
.miner_tx
|
||||
.prefix
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|out| out.amount.unwrap_or(0))
|
||||
.sum::<u64>();
|
||||
|
||||
cache.add_new_block_data(total_outs, &genesis.miner_tx, &[]);
|
||||
Arc::new(RwLock::new(cache))
|
||||
}
|
||||
};
|
||||
|
||||
let rpc = init_rpc_load_balancer(urls, cache.clone(), rpc_config.clone());
|
||||
|
||||
scan_chain(cache, file_for_cache, rpc_config, rpc, network)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
bin::run().await
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "binaries"))]
|
||||
fn main() {
|
||||
panic!("must run with feature `binaries`")
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
//! Block Verifier Service.
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
future::Future,
|
||||
|
@ -8,98 +9,52 @@ use std::{
|
|||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use futures::FutureExt;
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use monero_serai::{block::Block, transaction::Input};
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use monero_consensus::{
|
||||
blocks::{
|
||||
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height,
|
||||
randomx_seed_height, BlockError, RandomX,
|
||||
},
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX},
|
||||
miner_tx::MinerTxError,
|
||||
ConsensusError, HardFork,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
context::{
|
||||
rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse,
|
||||
RawBlockChainContext,
|
||||
},
|
||||
transactions::{
|
||||
batch_setup_txs, contextual_data, OutputCache, TransactionVerificationData,
|
||||
VerifyTxRequest, VerifyTxResponse,
|
||||
},
|
||||
context::{BlockChainContextRequest, BlockChainContextResponse},
|
||||
transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
|
||||
Database, ExtendedConsensusError,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PrePreparedBlockExPOW {
|
||||
pub block: Block,
|
||||
pub block_blob: Vec<u8>,
|
||||
|
||||
pub hf_vote: HardFork,
|
||||
pub hf_version: HardFork,
|
||||
|
||||
pub block_hash: [u8; 32],
|
||||
pub height: u64,
|
||||
|
||||
pub miner_tx_weight: usize,
|
||||
}
|
||||
|
||||
impl PrePreparedBlockExPOW {
|
||||
pub fn new(block: Block) -> Result<PrePreparedBlockExPOW, ConsensusError> {
|
||||
let (hf_version, hf_vote) =
|
||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||
|
||||
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
|
||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||
MinerTxError::InputNotOfTypeGen,
|
||||
)))?
|
||||
};
|
||||
|
||||
Ok(PrePreparedBlockExPOW {
|
||||
block_blob: block.serialize(),
|
||||
hf_vote,
|
||||
hf_version,
|
||||
|
||||
block_hash: block.hash(),
|
||||
height: *height,
|
||||
|
||||
miner_tx_weight: block.miner_tx.weight(),
|
||||
block,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A pre-prepared block with all data needed to verify it.
|
||||
#[derive(Debug)]
|
||||
pub struct PrePreparedBlock {
|
||||
/// The block
|
||||
pub block: Block,
|
||||
/// The serialised blocks bytes
|
||||
pub block_blob: Vec<u8>,
|
||||
|
||||
/// The blocks hf vote
|
||||
pub hf_vote: HardFork,
|
||||
/// The blocks hf version
|
||||
pub hf_version: HardFork,
|
||||
|
||||
/// The blocks hash
|
||||
pub block_hash: [u8; 32],
|
||||
/// The blocks POW hash.
|
||||
pub pow_hash: [u8; 32],
|
||||
|
||||
/// The weight of the blocks miner transaction.
|
||||
pub miner_tx_weight: usize,
|
||||
}
|
||||
|
||||
impl PrePreparedBlock {
|
||||
pub fn new(block: Block) -> Result<PrePreparedBlock, ConsensusError> {
|
||||
struct DummyRX;
|
||||
|
||||
impl RandomX for DummyRX {
|
||||
type Error = ();
|
||||
fn calculate_hash(&self, _: &[u8]) -> Result<[u8; 32], Self::Error> {
|
||||
panic!("DummyRX cant calculate hash")
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`PrePreparedBlock`].
|
||||
///
|
||||
/// The randomX VM must be Some if RX is needed or this will panic.
|
||||
/// The randomX VM must also be initialised with the correct seed.
|
||||
fn new<R: RandomX>(
|
||||
block: Block,
|
||||
randomx_vm: Option<&R>,
|
||||
) -> Result<PrePreparedBlock, ConsensusError> {
|
||||
let (hf_version, hf_vote) =
|
||||
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
|
||||
|
||||
|
@ -115,86 +70,68 @@ impl PrePreparedBlock {
|
|||
hf_version,
|
||||
|
||||
block_hash: block.hash(),
|
||||
|
||||
pow_hash: calculate_pow_hash::<DummyRX>(
|
||||
None,
|
||||
pow_hash: calculate_pow_hash(
|
||||
randomx_vm,
|
||||
&block.serialize_hashable(),
|
||||
*height,
|
||||
&hf_version,
|
||||
)?,
|
||||
|
||||
miner_tx_weight: block.miner_tx.weight(),
|
||||
block,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_rx<R: RandomX>(
|
||||
block: PrePreparedBlockExPOW,
|
||||
randomx_vm: Option<&R>,
|
||||
) -> Result<PrePreparedBlock, ConsensusError> {
|
||||
let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else {
|
||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||
MinerTxError::InputNotOfTypeGen,
|
||||
)))?
|
||||
};
|
||||
|
||||
Ok(PrePreparedBlock {
|
||||
block_blob: block.block_blob,
|
||||
hf_vote: block.hf_vote,
|
||||
hf_version: block.hf_version,
|
||||
|
||||
block_hash: block.block_hash,
|
||||
pow_hash: calculate_pow_hash(
|
||||
randomx_vm,
|
||||
&block.block.serialize_hashable(),
|
||||
*height,
|
||||
&block.hf_version,
|
||||
)?,
|
||||
|
||||
miner_tx_weight: block.block.miner_tx.weight(),
|
||||
block: block.block,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about a verified block.
|
||||
#[derive(Debug)]
|
||||
pub struct VerifiedBlockInformation {
|
||||
/// The block that has been verified.
|
||||
pub block: Block,
|
||||
/// The block's hard-fork vote.
|
||||
pub hf_vote: HardFork,
|
||||
pub txs: Vec<Arc<TransactionVerificationData>>,
|
||||
/// The txs in this block.
|
||||
pub txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
/// The blocks hash.
|
||||
pub block_hash: [u8; 32],
|
||||
/// the blocks POW hash.
|
||||
pub pow_hash: [u8; 32],
|
||||
/// The blocks height.
|
||||
pub height: u64,
|
||||
/// The amount of coins generated by this block.
|
||||
pub generated_coins: u64,
|
||||
/// This blocks wight.
|
||||
pub weight: usize,
|
||||
/// This blocks long term weight.
|
||||
pub long_term_weight: usize,
|
||||
/// The cumulative difficulty of the chain including this block.
|
||||
pub cumulative_difficulty: u128,
|
||||
}
|
||||
|
||||
/// A request to verify a block.
|
||||
pub enum VerifyBlockRequest {
|
||||
MainChainBatchPrep(Vec<(Block, Vec<Transaction>)>),
|
||||
/// A request to verify a block.
|
||||
MainChain {
|
||||
block: Block,
|
||||
prepared_txs: Vec<Arc<TransactionVerificationData>>,
|
||||
txs: Vec<Transaction>,
|
||||
prepared_txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
},
|
||||
MainChainPrepared(PrePreparedBlock, Vec<Arc<TransactionVerificationData>>),
|
||||
}
|
||||
|
||||
/// A response from a verify block request.
|
||||
pub enum VerifyBlockResponse {
|
||||
/// This block is valid.
|
||||
MainChain(VerifiedBlockInformation),
|
||||
MainChainBatchPrep(
|
||||
Vec<PrePreparedBlock>,
|
||||
Vec<Vec<Arc<TransactionVerificationData>>>,
|
||||
),
|
||||
}
|
||||
|
||||
// TODO: it is probably a bad idea for this to derive clone, if 2 places (RPC, P2P) receive valid but different blocks
|
||||
// then they will both get approved but only one should go to main chain.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockVerifierService<C: Clone, TxV: Clone, D> {
|
||||
/// The block verifier service.
|
||||
pub struct BlockVerifierService<C, TxV, D> {
|
||||
/// The context service.
|
||||
context_svc: C,
|
||||
/// The tx verifier service.
|
||||
tx_verifier_svc: TxV,
|
||||
database: D,
|
||||
/// The database.
|
||||
// Not use yet but will be.
|
||||
_database: D,
|
||||
}
|
||||
|
||||
impl<C, TxV, D> BlockVerifierService<C, TxV, D>
|
||||
|
@ -210,7 +147,8 @@ where
|
|||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
pub fn new(
|
||||
/// Creates a new block verifier.
|
||||
pub(crate) fn new(
|
||||
context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
database: D,
|
||||
|
@ -218,7 +156,7 @@ where
|
|||
BlockVerifierService {
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
database,
|
||||
_database: database,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -255,30 +193,14 @@ where
|
|||
fn call(&mut self, req: VerifyBlockRequest) -> Self::Future {
|
||||
let context_svc = self.context_svc.clone();
|
||||
let tx_verifier_svc = self.tx_verifier_svc.clone();
|
||||
let database = self.database.clone();
|
||||
|
||||
async move {
|
||||
match req {
|
||||
VerifyBlockRequest::MainChain {
|
||||
block,
|
||||
prepared_txs,
|
||||
txs,
|
||||
} => {
|
||||
verify_main_chain_block(block, txs, prepared_txs, context_svc, tx_verifier_svc)
|
||||
.await
|
||||
}
|
||||
VerifyBlockRequest::MainChainPrepared(prepped_block, txs) => {
|
||||
verify_main_chain_block_prepared(
|
||||
prepped_block,
|
||||
txs,
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
VerifyBlockRequest::MainChainBatchPrep(blocks) => {
|
||||
batch_verify_main_chain_block(blocks, context_svc, database).await
|
||||
verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -286,188 +208,12 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
async fn batch_verify_main_chain_block<C, D>(
|
||||
blocks: Vec<(Block, Vec<Transaction>)>,
|
||||
mut context_svc: C,
|
||||
mut database: D,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
|
||||
|
||||
tracing::debug!("Calculating block hashes.");
|
||||
let blocks: Vec<PrePreparedBlockExPOW> = rayon_spawn_async(|| {
|
||||
blocks
|
||||
.into_iter()
|
||||
.map(PrePreparedBlockExPOW::new)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
|
||||
let mut new_rx_vm = None;
|
||||
|
||||
for window in blocks.windows(2) {
|
||||
if window[0].block_hash != window[1].block.header.previous
|
||||
|| window[0].height != window[1].height - 1
|
||||
{
|
||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||
}
|
||||
|
||||
if is_randomx_seed_height(window[0].height) {
|
||||
new_rx_vm = Some((window[0].height, window[0].block_hash));
|
||||
}
|
||||
|
||||
timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version))
|
||||
}
|
||||
|
||||
tracing::debug!("getting blockchain context");
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::BatchGetDifficulties(
|
||||
timestamps_hfs,
|
||||
))
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
|
||||
if context.chain_height != blocks[0].height {
|
||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||
MinerTxError::InputsHeightIncorrect,
|
||||
)))?;
|
||||
}
|
||||
|
||||
if context.top_hash != blocks[0].block.header.previous {
|
||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
||||
}
|
||||
|
||||
let mut rx_vms = context.rx_vms;
|
||||
|
||||
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
|
||||
let new_vm = rayon_spawn_async(move || {
|
||||
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
|
||||
})
|
||||
.await;
|
||||
|
||||
context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::NewRXVM((
|
||||
new_vm_seed,
|
||||
new_vm.clone(),
|
||||
)))
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?;
|
||||
|
||||
rx_vms.insert(new_vm_height, new_vm);
|
||||
}
|
||||
|
||||
let blocks = rayon_spawn_async(move || {
|
||||
blocks
|
||||
.into_par_iter()
|
||||
.zip(difficulties)
|
||||
.map(|(block, difficultly)| {
|
||||
let height = block.height;
|
||||
let block = PrePreparedBlock::new_rx(
|
||||
block,
|
||||
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
|
||||
)?;
|
||||
|
||||
check_block_pow(&block.pow_hash, difficultly)?;
|
||||
Ok(block)
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let txs = batch_setup_txs(
|
||||
txs.into_iter()
|
||||
.zip(blocks.iter().map(|block| block.hf_version))
|
||||
.collect(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut complete_block_idx = 0;
|
||||
|
||||
let mut out_cache = OutputCache::new();
|
||||
|
||||
out_cache
|
||||
.extend_from_block(
|
||||
blocks
|
||||
.iter()
|
||||
.map(|block| &block.block)
|
||||
.zip(txs.iter().map(Vec::as_slice)),
|
||||
&mut database,
|
||||
)
|
||||
.await?;
|
||||
|
||||
for (idx, hf) in blocks
|
||||
.windows(2)
|
||||
.enumerate()
|
||||
.filter(|(_, block)| block[0].hf_version != blocks[1].hf_version)
|
||||
.map(|(i, block)| (i, &block[0].hf_version))
|
||||
{
|
||||
contextual_data::batch_fill_ring_member_info(
|
||||
txs.iter()
|
||||
.take(idx + 1)
|
||||
.skip(complete_block_idx)
|
||||
.flat_map(|txs| txs.iter()),
|
||||
hf,
|
||||
context.re_org_token.clone(),
|
||||
database.clone(),
|
||||
Some(&out_cache),
|
||||
)
|
||||
.await?;
|
||||
|
||||
complete_block_idx = idx + 1;
|
||||
}
|
||||
|
||||
if complete_block_idx != blocks.len() {
|
||||
contextual_data::batch_fill_ring_member_info(
|
||||
txs.iter()
|
||||
.skip(complete_block_idx)
|
||||
.flat_map(|txs| txs.iter()),
|
||||
&blocks.last().unwrap().hf_version,
|
||||
context.re_org_token.clone(),
|
||||
database.clone(),
|
||||
Some(&out_cache),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(VerifyBlockResponse::MainChainBatchPrep(blocks, txs))
|
||||
}
|
||||
|
||||
async fn verify_main_chain_block_prepared<C, TxV>(
|
||||
prepped_block: PrePreparedBlock,
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
/// Verifies a prepared block.
|
||||
async fn verify_main_chain_block<C, TxV>(
|
||||
block: Block,
|
||||
txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
context: Option<RawBlockChainContext>,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
|
@ -479,10 +225,8 @@ where
|
|||
C::Future: Send + 'static,
|
||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||
{
|
||||
let context = match context {
|
||||
Some(context) => context,
|
||||
None => {
|
||||
tracing::debug!("getting blockchain context");
|
||||
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.oneshot(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
|
@ -492,19 +236,33 @@ where
|
|||
};
|
||||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
context
|
||||
}
|
||||
};
|
||||
|
||||
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
|
||||
|
||||
let rx_vms = context.rx_vms.clone();
|
||||
|
||||
let height = context.chain_height;
|
||||
let prepped_block = rayon_spawn_async(move || {
|
||||
PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref))
|
||||
})
|
||||
.await?;
|
||||
|
||||
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
|
||||
|
||||
check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
// Check that the txs included are what we need and that there are not any extra.
|
||||
// Collecting into a HashSet could hide duplicates but we check Key Images are unique so someone would have to find
|
||||
// a hash collision to include duplicate txs here.
|
||||
|
||||
let mut tx_hashes = txs.iter().map(|tx| &tx.tx_hash).collect::<HashSet<_>>();
|
||||
|
||||
tracing::debug!("Checking we have correct transactions for block.");
|
||||
|
||||
if tx_hashes.len() != txs.len() {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
|
||||
for tx_hash in &prepped_block.block.txs {
|
||||
if !tx_hashes.remove(tx_hash) {
|
||||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
|
@ -514,13 +272,15 @@ where
|
|||
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
|
||||
}
|
||||
|
||||
tracing::debug!("Verifying transactions for block.");
|
||||
|
||||
tx_verifier_svc
|
||||
.oneshot(VerifyTxRequest::Block {
|
||||
.oneshot(VerifyTxRequest::Prepped {
|
||||
txs: txs.clone(),
|
||||
current_chain_height: context.chain_height,
|
||||
top_hash: context.top_hash,
|
||||
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
|
||||
hf: context.current_hf,
|
||||
re_org_token: context.re_org_token.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
@ -528,6 +288,7 @@ where
|
|||
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
|
||||
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>();
|
||||
|
||||
tracing::debug!("Verifying block header.");
|
||||
let (hf_vote, generated_coins) = check_block(
|
||||
&prepped_block.block,
|
||||
total_fees,
|
||||
|
@ -550,58 +311,3 @@ where
|
|||
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn verify_main_chain_block<C, TxV>(
|
||||
block: Block,
|
||||
txs: Vec<Transaction>,
|
||||
mut prepared_txs: Vec<Arc<TransactionVerificationData>>,
|
||||
mut context_svc: C,
|
||||
tx_verifier_svc: TxV,
|
||||
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
Response = BlockChainContextResponse,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
C::Future: Send + 'static,
|
||||
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
|
||||
{
|
||||
tracing::debug!("getting blockchain context");
|
||||
let BlockChainContextResponse::Context(checked_context) = context_svc
|
||||
.ready()
|
||||
.await?
|
||||
.call(BlockChainContextRequest::GetContext)
|
||||
.await
|
||||
.map_err(Into::<ExtendedConsensusError>::into)?
|
||||
else {
|
||||
panic!("Context service returned wrong response!");
|
||||
};
|
||||
|
||||
let context = checked_context.unchecked_blockchain_context().clone();
|
||||
tracing::debug!("got blockchain context: {:?}", context);
|
||||
|
||||
let rx_vms = context.rx_vms.clone();
|
||||
let prepped_block = rayon_spawn_async(move || {
|
||||
let prepped_block_ex_pow = PrePreparedBlockExPOW::new(block)?;
|
||||
let height = prepped_block_ex_pow.height;
|
||||
|
||||
PrePreparedBlock::new_rx(prepped_block_ex_pow, rx_vms.get(&height).map(AsRef::as_ref))
|
||||
})
|
||||
.await?;
|
||||
|
||||
check_block_pow(&prepped_block.pow_hash, context.cumulative_difficulty)
|
||||
.map_err(ConsensusError::Block)?;
|
||||
|
||||
prepared_txs.append(&mut batch_setup_txs(vec![(txs, context.current_hf)]).await?[0]);
|
||||
|
||||
verify_main_chain_block_prepared(
|
||||
prepped_block,
|
||||
prepared_txs,
|
||||
context_svc,
|
||||
tx_verifier_svc,
|
||||
Some(context),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -4,32 +4,30 @@
|
|||
//! This is used during contextual validation, this does not have all the data for contextual validation
|
||||
//! (outputs) for that you will need a [`Database`].
|
||||
//!
|
||||
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
ops::DerefMut,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{
|
||||
lock::{Mutex, OwnedMutexGuard, OwnedMutexLockFuture},
|
||||
FutureExt,
|
||||
};
|
||||
use tower::{Service, ServiceExt};
|
||||
use futures::{channel::oneshot, FutureExt};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::PollSender;
|
||||
use tower::Service;
|
||||
|
||||
use monero_consensus::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork};
|
||||
use cuprate_consensus_rules::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
use crate::{Database, ExtendedConsensusError};
|
||||
|
||||
pub(crate) mod difficulty;
|
||||
pub(crate) mod hardforks;
|
||||
pub(crate) mod rx_vms;
|
||||
pub(crate) mod weight;
|
||||
|
||||
mod task;
|
||||
mod tokens;
|
||||
|
||||
pub use difficulty::DifficultyCacheConfig;
|
||||
|
@ -40,13 +38,18 @@ pub use weight::BlockWeightsCacheConfig;
|
|||
|
||||
const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60;
|
||||
|
||||
/// Config for the context service.
|
||||
pub struct ContextConfig {
|
||||
/// Hard-forks config.
|
||||
pub hard_fork_cfg: HardForkConfig,
|
||||
/// Difficulty config.
|
||||
pub difficulty_cfg: DifficultyCacheConfig,
|
||||
/// Block weight config.
|
||||
pub weights_config: BlockWeightsCacheConfig,
|
||||
}
|
||||
|
||||
impl ContextConfig {
|
||||
/// Get the config for main-net.
|
||||
pub fn main_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::main_net(),
|
||||
|
@ -55,26 +58,33 @@ impl ContextConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Get the config for stage-net.
|
||||
pub fn stage_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::stage_net(),
|
||||
// These 2 have the same config as main-net.
|
||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||
weights_config: BlockWeightsCacheConfig::main_net(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the config for test-net.
|
||||
pub fn test_net() -> ContextConfig {
|
||||
ContextConfig {
|
||||
hard_fork_cfg: HardForkConfig::test_net(),
|
||||
// These 2 have the same config as main-net.
|
||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||
weights_config: BlockWeightsCacheConfig::main_net(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the blockchain context service.
|
||||
///
|
||||
/// This function will request a lot of data from the database so it may take a while.
|
||||
pub async fn initialize_blockchain_context<D>(
|
||||
cfg: ContextConfig,
|
||||
mut database: D,
|
||||
database: D,
|
||||
) -> Result<
|
||||
impl Service<
|
||||
BlockChainContextRequest,
|
||||
|
@ -93,74 +103,16 @@ where
|
|||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let ContextConfig {
|
||||
difficulty_cfg,
|
||||
weights_config,
|
||||
hard_fork_cfg,
|
||||
} = cfg;
|
||||
let context_task = task::ContextTask::init_context(cfg, database).await?;
|
||||
|
||||
tracing::debug!("Initialising blockchain context");
|
||||
// TODO: make buffer size configurable.
|
||||
let (tx, rx) = mpsc::channel(15);
|
||||
|
||||
let DatabaseResponse::ChainHeight(chain_height, top_block_hash) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
tokio::spawn(context_task.run(rx));
|
||||
|
||||
let DatabaseResponse::GeneratedCoins(already_generated_coins) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::GeneratedCoins)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let db = database.clone();
|
||||
let hardfork_state_handle = tokio::spawn(async move {
|
||||
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let difficulty_cache_handle = tokio::spawn(async move {
|
||||
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let weight_cache_handle = tokio::spawn(async move {
|
||||
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db).await
|
||||
});
|
||||
|
||||
let hardfork_state = hardfork_state_handle.await.unwrap()?;
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
let db = database.clone();
|
||||
let rx_seed_handle = tokio::spawn(async move {
|
||||
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
});
|
||||
|
||||
let context_svc = BlockChainContextService {
|
||||
internal_blockchain_context: Arc::new(
|
||||
InternalBlockChainContext {
|
||||
current_validity_token: ValidityToken::new(),
|
||||
current_reorg_token: ReOrgToken::new(),
|
||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||
rx_seed_cache: rx_seed_handle.await.unwrap()?,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
already_generated_coins,
|
||||
top_block_hash,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
lock_state: MutexLockState::Locked,
|
||||
};
|
||||
|
||||
Ok(context_svc)
|
||||
Ok(BlockChainContextService {
|
||||
channel: PollSender::new(tx),
|
||||
})
|
||||
}
|
||||
|
||||
/// Raw blockchain context, gotten from [`BlockChainContext`]. This data may turn invalid so is not ok to keep
|
||||
|
@ -169,12 +121,14 @@ where
|
|||
pub struct RawBlockChainContext {
|
||||
/// The current cumulative difficulty.
|
||||
pub cumulative_difficulty: u128,
|
||||
/// A token which is used to signal if a reorg has happened since creating the token.
|
||||
pub re_org_token: ReOrgToken,
|
||||
/// RandomX VMs, this maps seeds height to VM. Will definitely contain the VM required to calculate the current blocks
|
||||
/// POW hash (if a RX VM is required), may contain more.
|
||||
pub rx_vms: HashMap<u64, Arc<RandomXVM>>,
|
||||
/// Context to verify a block, as needed by [`cuprate-consensus-rules`]
|
||||
pub context_to_verify_block: ContextToVerifyBlock,
|
||||
/// The median long term block weight.
|
||||
median_long_term_weight: usize,
|
||||
/// The top blocks timestamp (will be [`None`] if the top block is the genesis).
|
||||
top_block_timestamp: Option<u64>,
|
||||
}
|
||||
|
||||
|
@ -188,7 +142,7 @@ impl std::ops::Deref for RawBlockChainContext {
|
|||
impl RawBlockChainContext {
|
||||
/// Returns the timestamp the should be used when checking locked outputs.
|
||||
///
|
||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
/// ref: <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
|
||||
pub fn current_adjusted_timestamp_for_time_lock(&self) -> u64 {
|
||||
if self.current_hf < HardFork::V13 || self.median_block_timestamp.is_none() {
|
||||
current_unix_timestamp()
|
||||
|
@ -208,14 +162,7 @@ impl RawBlockChainContext {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn block_blob_size_limit(&self) -> usize {
|
||||
self.effective_median_weight * 2 - 600
|
||||
}
|
||||
|
||||
pub fn block_weight_limit(&self) -> usize {
|
||||
self.median_weight_for_block_reward * 2
|
||||
}
|
||||
|
||||
/// Returns the next blocks long term weight from it's block weight.
|
||||
pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize {
|
||||
weight::calculate_block_long_term_weight(
|
||||
&self.current_hf,
|
||||
|
@ -259,20 +206,31 @@ impl BlockChainContext {
|
|||
}
|
||||
}
|
||||
|
||||
/// Data needed from a new block to add it to the context cache.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UpdateBlockchainCacheData {
|
||||
pub new_top_hash: [u8; 32],
|
||||
pub struct NewBlockData {
|
||||
/// The blocks hash.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The blocks height.
|
||||
pub height: u64,
|
||||
/// The blocks timestamp.
|
||||
pub timestamp: u64,
|
||||
/// The blocks weight.
|
||||
pub weight: usize,
|
||||
/// long term weight of this block.
|
||||
pub long_term_weight: usize,
|
||||
/// The coins generated by this block.
|
||||
pub generated_coins: u64,
|
||||
/// The blocks hf vote.
|
||||
pub vote: HardFork,
|
||||
/// The cumulative difficulty of the chain.
|
||||
pub cumulative_difficulty: u128,
|
||||
}
|
||||
|
||||
/// A request to the blockchain context cache.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BlockChainContextRequest {
|
||||
/// Get the current blockchain context.
|
||||
GetContext,
|
||||
/// Get the next difficulties for these blocks.
|
||||
///
|
||||
|
@ -280,49 +238,30 @@ pub enum BlockChainContextRequest {
|
|||
///
|
||||
/// The number of difficulties returned will be one more than the number of timestamps/ hfs.
|
||||
BatchGetDifficulties(Vec<(u64, HardFork)>),
|
||||
/// Add a VM that has been created outside of the blockchain context service to the blockchain context.
|
||||
/// This is useful when batch calculating POW as you may need to create a new VM if you batch a lot of blocks together,
|
||||
/// it would be wasteful to then not give this VM to the context service to then use when it needs to init a VM with the same
|
||||
/// seed.
|
||||
///
|
||||
/// This should include the seed used to init this VM and the VM.
|
||||
NewRXVM(([u8; 32], Arc<RandomXVM>)),
|
||||
Update(UpdateBlockchainCacheData),
|
||||
/// A request to add a new block to the cache.
|
||||
Update(NewBlockData),
|
||||
}
|
||||
|
||||
pub enum BlockChainContextResponse {
|
||||
/// Blockchain context response.
|
||||
Context(BlockChainContext),
|
||||
/// A list of difficulties.
|
||||
BatchDifficulties(Vec<u128>),
|
||||
/// Ok response.
|
||||
Ok,
|
||||
}
|
||||
struct InternalBlockChainContext {
|
||||
/// A token used to invalidate previous contexts when a new
|
||||
/// block is added to the chain.
|
||||
current_validity_token: ValidityToken,
|
||||
/// A token which is used to signal a reorg has happened.
|
||||
current_reorg_token: ReOrgToken,
|
||||
|
||||
difficulty_cache: difficulty::DifficultyCache,
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
rx_seed_cache: rx_vms::RandomXVMCache,
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
||||
chain_height: u64,
|
||||
top_block_hash: [u8; 32],
|
||||
already_generated_coins: u64,
|
||||
}
|
||||
|
||||
enum MutexLockState {
|
||||
Locked,
|
||||
Acquiring(OwnedMutexLockFuture<InternalBlockChainContext>),
|
||||
Acquired(OwnedMutexGuard<InternalBlockChainContext>),
|
||||
}
|
||||
/// The blockchain context service.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockChainContextService {
|
||||
internal_blockchain_context: Arc<Mutex<InternalBlockChainContext>>,
|
||||
lock_state: MutexLockState,
|
||||
}
|
||||
|
||||
impl Clone for BlockChainContextService {
|
||||
fn clone(&self) -> Self {
|
||||
BlockChainContextService {
|
||||
internal_blockchain_context: self.internal_blockchain_context.clone(),
|
||||
lock_state: MutexLockState::Locked,
|
||||
}
|
||||
}
|
||||
channel: PollSender<task::ContextTaskRequest>,
|
||||
}
|
||||
|
||||
impl Service<BlockChainContextRequest> for BlockChainContextService {
|
||||
|
@ -332,111 +271,25 @@ impl Service<BlockChainContextRequest> for BlockChainContextService {
|
|||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
loop {
|
||||
match &mut self.lock_state {
|
||||
MutexLockState::Locked => {
|
||||
self.lock_state = MutexLockState::Acquiring(
|
||||
Arc::clone(&self.internal_blockchain_context).lock_owned(),
|
||||
)
|
||||
}
|
||||
MutexLockState::Acquiring(lock) => {
|
||||
self.lock_state = MutexLockState::Acquired(futures::ready!(lock.poll_unpin(cx)))
|
||||
}
|
||||
MutexLockState::Acquired(_) => return Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
self.channel
|
||||
.poll_reserve(cx)
|
||||
.map_err(|_| "Context service channel closed".into())
|
||||
}
|
||||
|
||||
fn call(&mut self, req: BlockChainContextRequest) -> Self::Future {
|
||||
let MutexLockState::Acquired(mut internal_blockchain_context) =
|
||||
std::mem::replace(&mut self.lock_state, MutexLockState::Locked)
|
||||
else {
|
||||
panic!("poll_ready() was not called first!")
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let req = task::ContextTaskRequest {
|
||||
req,
|
||||
tx,
|
||||
span: tracing::Span::current(),
|
||||
};
|
||||
|
||||
let res = self.channel.send_item(req);
|
||||
|
||||
async move {
|
||||
let InternalBlockChainContext {
|
||||
current_validity_token,
|
||||
current_reorg_token,
|
||||
difficulty_cache,
|
||||
weight_cache,
|
||||
rx_seed_cache,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
top_block_hash,
|
||||
already_generated_coins,
|
||||
} = internal_blockchain_context.deref_mut();
|
||||
|
||||
let res = match req {
|
||||
BlockChainContextRequest::GetContext => {
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
BlockChainContextResponse::Context(BlockChainContext {
|
||||
validity_token: current_validity_token.clone(),
|
||||
raw: RawBlockChainContext {
|
||||
context_to_verify_block: ContextToVerifyBlock {
|
||||
median_weight_for_block_reward: weight_cache
|
||||
.median_for_block_reward(¤t_hf),
|
||||
effective_median_weight: weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
top_hash: *top_block_hash,
|
||||
median_block_timestamp: difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
chain_height: *chain_height,
|
||||
current_hf,
|
||||
next_difficulty: difficulty_cache.next_difficulty(¤t_hf),
|
||||
already_generated_coins: *already_generated_coins,
|
||||
},
|
||||
rx_vms: rx_seed_cache.get_vms(),
|
||||
cumulative_difficulty: difficulty_cache.cumulative_difficulty(),
|
||||
median_long_term_weight: weight_cache.median_long_term_weight(),
|
||||
top_block_timestamp: difficulty_cache.top_block_timestamp(),
|
||||
re_org_token: current_reorg_token.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
|
||||
let next_diffs = difficulty_cache
|
||||
.next_difficulties(blocks, &hardfork_state.current_hardfork());
|
||||
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
||||
}
|
||||
BlockChainContextRequest::NewRXVM(vm) => {
|
||||
rx_seed_cache.add_vm(vm);
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
BlockChainContextRequest::Update(new) => {
|
||||
// Cancel the validity token and replace it with a new one.
|
||||
std::mem::replace(current_validity_token, ValidityToken::new())
|
||||
.set_data_invalid();
|
||||
|
||||
difficulty_cache.new_block(
|
||||
new.height,
|
||||
new.timestamp,
|
||||
new.cumulative_difficulty,
|
||||
);
|
||||
|
||||
weight_cache.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
hardfork_state.new_block(new.vote, new.height);
|
||||
|
||||
rx_seed_cache
|
||||
.new_block(
|
||||
new.height,
|
||||
&new.new_top_hash,
|
||||
&hardfork_state.current_hardfork(),
|
||||
)
|
||||
.await;
|
||||
|
||||
*chain_height = new.height + 1;
|
||||
*top_block_hash = new.new_top_hash;
|
||||
*already_generated_coins =
|
||||
already_generated_coins.saturating_add(new.generated_coins);
|
||||
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
res.map_err(|_| "Context service closed.")?;
|
||||
rx.await.expect("Oneshot closed without response!")
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
|
|
@ -1,3 +1,11 @@
|
|||
//! Difficulty Module
|
||||
//!
|
||||
//! This module handles keeping track of the data required to calculate block difficulty.
|
||||
//! This data is currently the cumulative difficulty of each block and its timestamp.
|
||||
//!
|
||||
//! The timestamps are also used in other consensus rules so instead of duplicating the same
|
||||
//! data in a different cache, the timestamps needed are retrieved from here.
|
||||
//!
|
||||
use std::{collections::VecDeque, ops::Range};
|
||||
|
||||
use tower::ServiceExt;
|
||||
|
@ -27,6 +35,10 @@ pub struct DifficultyCacheConfig {
|
|||
}
|
||||
|
||||
impl DifficultyCacheConfig {
|
||||
/// Create a new difficulty cache config.
|
||||
///
|
||||
/// # Notes
|
||||
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
|
||||
pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
|
||||
DifficultyCacheConfig { window, cut, lag }
|
||||
}
|
||||
|
@ -41,7 +53,9 @@ impl DifficultyCacheConfig {
|
|||
self.window - 2 * self.cut
|
||||
}
|
||||
|
||||
pub fn main_net() -> DifficultyCacheConfig {
|
||||
/// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the
|
||||
/// config for all other current networks.
|
||||
pub const fn main_net() -> DifficultyCacheConfig {
|
||||
DifficultyCacheConfig {
|
||||
window: DIFFICULTY_WINDOW,
|
||||
cut: DIFFICULTY_CUT,
|
||||
|
@ -66,6 +80,7 @@ pub(crate) struct DifficultyCache {
|
|||
}
|
||||
|
||||
impl DifficultyCache {
|
||||
/// Initialize the difficulty cache from the specified chain height.
|
||||
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
@ -100,13 +115,19 @@ impl DifficultyCache {
|
|||
Ok(diff)
|
||||
}
|
||||
|
||||
/// Add a new block to the difficulty cache.
|
||||
pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) {
|
||||
assert_eq!(self.last_accounted_height + 1, height);
|
||||
self.last_accounted_height += 1;
|
||||
|
||||
tracing::debug!(
|
||||
"Accounting for new blocks timestamp ({timestamp}) and cumulative_difficulty ({cumulative_difficulty})",
|
||||
);
|
||||
|
||||
self.timestamps.push_back(timestamp);
|
||||
self.cumulative_difficulties
|
||||
.push_back(cumulative_difficulty);
|
||||
|
||||
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
self.timestamps.pop_front();
|
||||
self.cumulative_difficulties.pop_front();
|
||||
|
@ -117,47 +138,28 @@ impl DifficultyCache {
|
|||
///
|
||||
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty
|
||||
pub fn next_difficulty(&self, hf: &HardFork) -> u128 {
|
||||
if self.timestamps.len() <= 1 {
|
||||
return 1;
|
||||
}
|
||||
|
||||
let mut timestamps = self.timestamps.clone();
|
||||
if timestamps.len() > self.config.window {
|
||||
// remove the lag.
|
||||
timestamps.drain(self.config.window..);
|
||||
};
|
||||
let timestamps_slice = timestamps.make_contiguous();
|
||||
|
||||
let (window_start, window_end) = get_window_start_and_end(
|
||||
timestamps_slice.len(),
|
||||
self.config.accounted_window_len(),
|
||||
self.config.window,
|
||||
);
|
||||
|
||||
// We don't sort the whole timestamp list
|
||||
let mut time_span = u128::from(
|
||||
*timestamps_slice.select_nth_unstable(window_end - 1).1
|
||||
- *timestamps_slice.select_nth_unstable(window_start).1,
|
||||
);
|
||||
|
||||
let windowed_work = self.cumulative_difficulties[window_end - 1]
|
||||
- self.cumulative_difficulties[window_start];
|
||||
|
||||
if time_span == 0 {
|
||||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
|
||||
next_difficulty(
|
||||
&self.config,
|
||||
&self.timestamps,
|
||||
&self.cumulative_difficulties,
|
||||
hf,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the difficulties for multiple next blocks, using the provided timestamps and hard-forks when needed.
|
||||
///
|
||||
/// The first difficulty will be the same as the difficulty from [`DifficultyCache::next_difficulty`] after that the
|
||||
/// first timestamp and hf will be applied to the cache and the difficulty from that will be added to the list.
|
||||
///
|
||||
/// After all timestamps and hfs have been dealt with the cache will be returned back to its original state and the
|
||||
/// difficulties will be returned.
|
||||
pub fn next_difficulties(
|
||||
&mut self,
|
||||
&self,
|
||||
blocks: Vec<(u64, HardFork)>,
|
||||
current_hf: &HardFork,
|
||||
) -> Vec<u128> {
|
||||
let new_timestamps_len = blocks.len();
|
||||
let initial_len = self.timestamps.len();
|
||||
let mut timestamps = self.timestamps.clone();
|
||||
let mut cumulative_difficulties = self.cumulative_difficulties.clone();
|
||||
|
||||
let mut difficulties = Vec::with_capacity(blocks.len() + 1);
|
||||
|
||||
|
@ -166,30 +168,24 @@ impl DifficultyCache {
|
|||
let mut diff_info_popped = Vec::new();
|
||||
|
||||
for (new_timestamp, hf) in blocks {
|
||||
self.timestamps.push_back(new_timestamp);
|
||||
self.cumulative_difficulties
|
||||
.push_back(self.cumulative_difficulty() + *difficulties.last().unwrap());
|
||||
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
timestamps.push_back(new_timestamp);
|
||||
|
||||
let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1);
|
||||
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
||||
|
||||
if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() {
|
||||
diff_info_popped.push((
|
||||
self.timestamps.pop_front().unwrap(),
|
||||
self.cumulative_difficulties.pop_front().unwrap(),
|
||||
timestamps.pop_front().unwrap(),
|
||||
cumulative_difficulties.pop_front().unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
difficulties.push(self.next_difficulty(&hf));
|
||||
}
|
||||
|
||||
self.cumulative_difficulties.drain(
|
||||
self.cumulative_difficulties
|
||||
.len()
|
||||
.saturating_sub(new_timestamps_len)..,
|
||||
);
|
||||
self.timestamps
|
||||
.drain(self.timestamps.len().saturating_sub(new_timestamps_len)..);
|
||||
|
||||
for (timestamp, cum_dif) in diff_info_popped.into_iter().take(initial_len).rev() {
|
||||
self.timestamps.push_front(timestamp);
|
||||
self.cumulative_difficulties.push_front(cum_dif);
|
||||
difficulties.push(next_difficulty(
|
||||
&self.config,
|
||||
×tamps,
|
||||
&cumulative_difficulties,
|
||||
&hf,
|
||||
));
|
||||
}
|
||||
|
||||
difficulties
|
||||
|
@ -227,11 +223,55 @@ impl DifficultyCache {
|
|||
self.cumulative_difficulties.back().copied().unwrap_or(1)
|
||||
}
|
||||
|
||||
/// Returns the top block's timestamp, returns [`None`] if the top block is the genesis block.
|
||||
pub fn top_block_timestamp(&self) -> Option<u64> {
|
||||
self.timestamps.back().copied()
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties.
|
||||
fn next_difficulty(
|
||||
config: &DifficultyCacheConfig,
|
||||
timestamps: &VecDeque<u64>,
|
||||
cumulative_difficulties: &VecDeque<u128>,
|
||||
hf: &HardFork,
|
||||
) -> u128 {
|
||||
if timestamps.len() <= 1 {
|
||||
return 1;
|
||||
}
|
||||
|
||||
let mut timestamps = timestamps.clone();
|
||||
|
||||
if timestamps.len() > config.window {
|
||||
// remove the lag.
|
||||
timestamps.drain(config.window..);
|
||||
};
|
||||
let timestamps_slice = timestamps.make_contiguous();
|
||||
|
||||
let (window_start, window_end) = get_window_start_and_end(
|
||||
timestamps_slice.len(),
|
||||
config.accounted_window_len(),
|
||||
config.window,
|
||||
);
|
||||
|
||||
// We don't sort the whole timestamp list
|
||||
let mut time_span = u128::from(
|
||||
*timestamps_slice.select_nth_unstable(window_end - 1).1
|
||||
- *timestamps_slice.select_nth_unstable(window_start).1,
|
||||
);
|
||||
|
||||
let windowed_work =
|
||||
cumulative_difficulties[window_end - 1] - cumulative_difficulties[window_start];
|
||||
|
||||
if time_span == 0 {
|
||||
time_span = 1;
|
||||
}
|
||||
|
||||
// TODO: do checked operations here and unwrap so we don't silently overflow?
|
||||
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
|
||||
}
|
||||
|
||||
/// Get the start and end of the window to calculate difficulty.
|
||||
fn get_window_start_and_end(
|
||||
window_len: usize,
|
||||
accounted_window: usize,
|
||||
|
@ -253,6 +293,7 @@ fn get_window_start_and_end(
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the timestamps and cumulative difficulty for the blocks with heights in the specified range.
|
||||
#[instrument(name = "get_blocks_timestamps", skip(database), level = "info")]
|
||||
async fn get_blocks_in_pow_info<D: Database + Clone>(
|
||||
database: D,
|
||||
|
|
|
@ -3,11 +3,13 @@ use std::ops::Range;
|
|||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use monero_consensus::{HFVotes, HFsInfo, HardFork};
|
||||
use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
|
||||
// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork
|
||||
/// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
|
||||
///
|
||||
/// ref: <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
|
||||
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
|
||||
|
||||
/// Configuration for hard-forks.
|
||||
|
@ -21,6 +23,7 @@ pub struct HardForkConfig {
|
|||
}
|
||||
|
||||
impl HardForkConfig {
|
||||
/// Config for main-net.
|
||||
pub const fn main_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::main_net(),
|
||||
|
@ -28,6 +31,7 @@ impl HardForkConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Config for stage-net.
|
||||
pub const fn stage_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::stage_net(),
|
||||
|
@ -35,6 +39,7 @@ impl HardForkConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Config for test-net.
|
||||
pub const fn test_net() -> HardForkConfig {
|
||||
Self {
|
||||
info: HFsInfo::test_net(),
|
||||
|
@ -46,15 +51,20 @@ impl HardForkConfig {
|
|||
/// A struct that keeps track of the current hard-fork and current votes.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HardForkState {
|
||||
/// The current active hard-fork.
|
||||
pub(crate) current_hardfork: HardFork,
|
||||
|
||||
/// The hard-fork config.
|
||||
pub(crate) config: HardForkConfig,
|
||||
/// The votes in the current window.
|
||||
pub(crate) votes: HFVotes,
|
||||
|
||||
/// The last block height accounted for.
|
||||
pub(crate) last_height: u64,
|
||||
}
|
||||
|
||||
impl HardForkState {
|
||||
/// Initialize the [`HardForkState`] from the specified chain height.
|
||||
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
|
@ -105,7 +115,10 @@ impl HardForkState {
|
|||
Ok(hfs)
|
||||
}
|
||||
|
||||
/// Add a new block to the cache.
|
||||
pub fn new_block(&mut self, vote: HardFork, height: u64) {
|
||||
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
||||
// of blocks.
|
||||
assert_eq!(self.last_height + 1, height);
|
||||
self.last_height += 1;
|
||||
|
||||
|
@ -115,6 +128,7 @@ impl HardForkState {
|
|||
vote
|
||||
);
|
||||
|
||||
// This function remove votes outside the window as well.
|
||||
self.votes.add_vote_for_hf(&vote);
|
||||
|
||||
if height > self.config.window {
|
||||
|
@ -136,11 +150,13 @@ impl HardForkState {
|
|||
);
|
||||
}
|
||||
|
||||
/// Returns the current hard-fork.
|
||||
pub fn current_hardfork(&self) -> HardFork {
|
||||
self.current_hardfork
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the block votes for blocks in the specified range.
|
||||
#[instrument(name = "get_votes", skip(database))]
|
||||
async fn get_votes_in_range<D: Database>(
|
||||
database: D,
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
//! RandomX VM Cache
|
||||
//!
|
||||
//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially
|
||||
//! more VMs around this height.
|
||||
//!
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::Arc,
|
||||
|
@ -8,26 +13,34 @@ use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
|
|||
use rayon::prelude::*;
|
||||
use thread_local::ThreadLocal;
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS},
|
||||
HardFork,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
|
||||
/// The amount of randomX VMs to keep in the cache.
|
||||
const RX_SEEDS_CACHED: usize = 2;
|
||||
|
||||
/// A multithreaded randomX VM.
|
||||
#[derive(Debug)]
|
||||
pub struct RandomXVM {
|
||||
/// These RandomX VMs all share the same cache.
|
||||
vms: ThreadLocal<VMInner>,
|
||||
/// The RandomX cache.
|
||||
cache: RandomXCache,
|
||||
/// The flags used to start the RandomX VMs.
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
impl RandomXVM {
|
||||
/// Create a new multithreaded randomX VM with the provided seed.
|
||||
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
|
||||
// TODO: allow passing in flags.
|
||||
let flags = RandomXFlag::get_recommended_flags();
|
||||
|
||||
let cache = RandomXCache::new(flags, seed.as_slice())?;
|
||||
|
@ -51,15 +64,21 @@ impl RandomX for RandomXVM {
|
|||
}
|
||||
}
|
||||
|
||||
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
|
||||
/// couple more around this VM.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RandomXVMCache {
|
||||
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
||||
pub(crate) seeds: VecDeque<(u64, [u8; 32])>,
|
||||
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
||||
pub(crate) vms: HashMap<u64, Arc<RandomXVM>>,
|
||||
|
||||
/// A single cached VM that was given to us from a part of Cuprate.
|
||||
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
|
||||
}
|
||||
|
||||
impl RandomXVMCache {
|
||||
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: u64,
|
||||
hf: &HardFork,
|
||||
|
@ -68,9 +87,12 @@ impl RandomXVMCache {
|
|||
let seed_heights = get_last_rx_seed_heights(chain_height - 1, RX_SEEDS_CACHED);
|
||||
let seed_hashes = get_block_hashes(seed_heights.clone(), database).await?;
|
||||
|
||||
tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",);
|
||||
|
||||
let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect();
|
||||
|
||||
let vms = if hf >= &HardFork::V12 {
|
||||
tracing::debug!("Creating RandomX VMs");
|
||||
let seeds_clone = seeds.clone();
|
||||
rayon_spawn_async(move || {
|
||||
seeds_clone
|
||||
|
@ -85,6 +107,7 @@ impl RandomXVMCache {
|
|||
})
|
||||
.await
|
||||
} else {
|
||||
tracing::debug!("We are before hard-fork 12 randomX VMs are not needed.");
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
|
@ -95,18 +118,25 @@ impl RandomXVMCache {
|
|||
})
|
||||
}
|
||||
|
||||
/// Add a randomX VM to the cache, with the seed it was created with.
|
||||
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) {
|
||||
self.cached_vm.replace(vm);
|
||||
}
|
||||
|
||||
/// Get the RandomX VMs.
|
||||
pub fn get_vms(&self) -> HashMap<u64, Arc<RandomXVM>> {
|
||||
self.vms.clone()
|
||||
}
|
||||
|
||||
/// Add a new block to the VM cache.
|
||||
///
|
||||
/// hash is the block hash not the blocks PoW hash.
|
||||
pub async fn new_block(&mut self, height: u64, hash: &[u8; 32], hf: &HardFork) {
|
||||
let should_make_vms = hf >= &HardFork::V12;
|
||||
if should_make_vms && self.vms.len() != self.seeds.len() {
|
||||
// this will only happen when syncing and rx activates.
|
||||
tracing::debug!("RandomX has activated, initialising VMs");
|
||||
|
||||
let seeds_clone = self.seeds.clone();
|
||||
self.vms = rayon_spawn_async(move || {
|
||||
seeds_clone
|
||||
|
@ -123,12 +153,21 @@ impl RandomXVMCache {
|
|||
}
|
||||
|
||||
if is_randomx_seed_height(height) {
|
||||
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
||||
|
||||
self.seeds.push_front((height, *hash));
|
||||
|
||||
if should_make_vms {
|
||||
let new_vm = 'new_vm_block: {
|
||||
tracing::debug!(
|
||||
"Past hard-fork 12 initializing VM for seed: {}",
|
||||
hex::encode(hash)
|
||||
);
|
||||
|
||||
// Check if we have been given the RX VM from another part of Cuprate.
|
||||
if let Some((cached_hash, cached_vm)) = self.cached_vm.take() {
|
||||
if &cached_hash == hash {
|
||||
tracing::debug!("VM was already created.");
|
||||
break 'new_vm_block cached_vm;
|
||||
}
|
||||
};
|
||||
|
@ -153,6 +192,8 @@ impl RandomXVMCache {
|
|||
}
|
||||
}
|
||||
|
||||
/// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block
|
||||
/// in the chain as VMs include some lag before a seed activates.
|
||||
pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec<u64> {
|
||||
let mut seeds = Vec::with_capacity(amount);
|
||||
if is_randomx_seed_height(last_height) {
|
||||
|
@ -174,6 +215,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize)
|
|||
seeds
|
||||
}
|
||||
|
||||
/// Gets the block hashes for the heights specified.
|
||||
async fn get_block_hashes<D: Database + Clone>(
|
||||
heights: Vec<u64>,
|
||||
database: D,
|
||||
|
|
232
consensus/src/context/task.rs
Normal file
232
consensus/src/context/task.rs
Normal file
|
@ -0,0 +1,232 @@
|
|||
//! Context Task
|
||||
//!
|
||||
//! This module contains the async task that handles keeping track of blockchain context.
|
||||
//! It holds all the context caches and handles [`tower::Service`] requests.
|
||||
//!
|
||||
use futures::channel::oneshot;
|
||||
use tokio::sync::mpsc;
|
||||
use tower::ServiceExt;
|
||||
use tracing::Instrument;
|
||||
|
||||
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
|
||||
|
||||
use super::{
|
||||
difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest,
|
||||
BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken,
|
||||
BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
|
||||
};
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError};
|
||||
|
||||
/// A request from the context service to the context task.
|
||||
pub(super) struct ContextTaskRequest {
|
||||
/// The request.
|
||||
pub req: BlockChainContextRequest,
|
||||
/// The response channel.
|
||||
pub tx: oneshot::Sender<Result<BlockChainContextResponse, tower::BoxError>>,
|
||||
/// The tracing span of the requester.
|
||||
pub span: tracing::Span,
|
||||
}
|
||||
|
||||
/// The Context task that keeps the blockchain context and handles requests.
|
||||
pub struct ContextTask {
|
||||
/// A token used to invalidate previous contexts when a new
|
||||
/// block is added to the chain.
|
||||
current_validity_token: ValidityToken,
|
||||
|
||||
/// The difficulty cache.
|
||||
difficulty_cache: difficulty::DifficultyCache,
|
||||
/// The weight cache.
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
/// The RX VM cache.
|
||||
rx_vm_cache: rx_vms::RandomXVMCache,
|
||||
/// The hard-fork state cache.
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
||||
/// The current chain height.
|
||||
chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_block_hash: [u8; 32],
|
||||
/// The total amount of coins generated.
|
||||
already_generated_coins: u64,
|
||||
}
|
||||
|
||||
impl ContextTask {
|
||||
/// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a
|
||||
/// while to complete.
|
||||
pub async fn init_context<D>(
|
||||
cfg: ContextConfig,
|
||||
mut database: D,
|
||||
) -> Result<ContextTask, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Send + Sync + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
let ContextConfig {
|
||||
difficulty_cfg,
|
||||
weights_config,
|
||||
hard_fork_cfg,
|
||||
} = cfg;
|
||||
|
||||
tracing::debug!("Initialising blockchain context");
|
||||
|
||||
let DatabaseResponse::ChainHeight(chain_height, top_block_hash) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::ChainHeight)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let DatabaseResponse::GeneratedCoins(already_generated_coins) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::GeneratedCoins)
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
let db = database.clone();
|
||||
let hardfork_state_handle = tokio::spawn(async move {
|
||||
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let difficulty_cache_handle = tokio::spawn(async move {
|
||||
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db)
|
||||
.await
|
||||
});
|
||||
|
||||
let db = database.clone();
|
||||
let weight_cache_handle = tokio::spawn(async move {
|
||||
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db)
|
||||
.await
|
||||
});
|
||||
|
||||
// Wait for the hardfork state to finish first as we need it to start the randomX VM cache.
|
||||
let hardfork_state = hardfork_state_handle.await.unwrap()?;
|
||||
let current_hf = hardfork_state.current_hardfork();
|
||||
|
||||
let db = database.clone();
|
||||
let rx_seed_handle = tokio::spawn(async move {
|
||||
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
});
|
||||
|
||||
let context_svc = ContextTask {
|
||||
current_validity_token: ValidityToken::new(),
|
||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||
rx_vm_cache: rx_seed_handle.await.unwrap()?,
|
||||
hardfork_state,
|
||||
chain_height,
|
||||
already_generated_coins,
|
||||
top_block_hash,
|
||||
};
|
||||
|
||||
Ok(context_svc)
|
||||
}
|
||||
|
||||
/// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`].
|
||||
pub async fn handle_req(
|
||||
&mut self,
|
||||
req: BlockChainContextRequest,
|
||||
) -> Result<BlockChainContextResponse, tower::BoxError> {
|
||||
Ok(match req {
|
||||
BlockChainContextRequest::GetContext => {
|
||||
tracing::debug!("Getting blockchain context");
|
||||
|
||||
let current_hf = self.hardfork_state.current_hardfork();
|
||||
|
||||
BlockChainContextResponse::Context(BlockChainContext {
|
||||
validity_token: self.current_validity_token.clone(),
|
||||
raw: RawBlockChainContext {
|
||||
context_to_verify_block: ContextToVerifyBlock {
|
||||
median_weight_for_block_reward: self
|
||||
.weight_cache
|
||||
.median_for_block_reward(¤t_hf),
|
||||
effective_median_weight: self
|
||||
.weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
top_hash: self.top_block_hash,
|
||||
median_block_timestamp: self.difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
chain_height: self.chain_height,
|
||||
current_hf,
|
||||
next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf),
|
||||
already_generated_coins: self.already_generated_coins,
|
||||
},
|
||||
rx_vms: self.rx_vm_cache.get_vms(),
|
||||
cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(),
|
||||
median_long_term_weight: self.weight_cache.median_long_term_weight(),
|
||||
top_block_timestamp: self.difficulty_cache.top_block_timestamp(),
|
||||
},
|
||||
})
|
||||
}
|
||||
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
|
||||
tracing::debug!("Getting batch difficulties len: {}", blocks.len() + 1);
|
||||
|
||||
let next_diffs = self
|
||||
.difficulty_cache
|
||||
.next_difficulties(blocks, &self.hardfork_state.current_hardfork());
|
||||
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
||||
}
|
||||
BlockChainContextRequest::NewRXVM(vm) => {
|
||||
tracing::debug!("Adding randomX VM to cache.");
|
||||
|
||||
self.rx_vm_cache.add_vm(vm);
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
BlockChainContextRequest::Update(new) => {
|
||||
tracing::debug!(
|
||||
"Updating blockchain cache with new block, height: {}",
|
||||
new.height
|
||||
);
|
||||
// Cancel the validity token and replace it with a new one.
|
||||
std::mem::replace(&mut self.current_validity_token, ValidityToken::new())
|
||||
.set_data_invalid();
|
||||
|
||||
self.difficulty_cache.new_block(
|
||||
new.height,
|
||||
new.timestamp,
|
||||
new.cumulative_difficulty,
|
||||
);
|
||||
|
||||
self.weight_cache
|
||||
.new_block(new.height, new.weight, new.long_term_weight);
|
||||
|
||||
self.hardfork_state.new_block(new.vote, new.height);
|
||||
|
||||
self.rx_vm_cache
|
||||
.new_block(
|
||||
new.height,
|
||||
&new.block_hash,
|
||||
// We use the current hf and not the hf of the top block as when syncing we need to generate VMs
|
||||
// on the switch to RX not after it.
|
||||
&self.hardfork_state.current_hardfork(),
|
||||
)
|
||||
.await;
|
||||
|
||||
self.chain_height = new.height + 1;
|
||||
self.top_block_hash = new.block_hash;
|
||||
self.already_generated_coins = self
|
||||
.already_generated_coins
|
||||
.saturating_add(new.generated_coins);
|
||||
|
||||
BlockChainContextResponse::Ok
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the
|
||||
/// task will finish.
|
||||
pub async fn run(mut self, mut rx: mpsc::Receiver<ContextTaskRequest>) {
|
||||
while let Some(req) = rx.recv().await {
|
||||
let res = self.handle_req(req.req).instrument(req.span).await;
|
||||
let _ = req.tx.send(res);
|
||||
}
|
||||
|
||||
tracing::info!("Shutting down blockchain context task.");
|
||||
}
|
||||
}
|
|
@ -1,3 +1,10 @@
|
|||
//! Tokens
|
||||
//!
|
||||
//! This module contains tokens which keep track of the validity of certain data.
|
||||
//! Currently, there is 1 token:
|
||||
//! - [`ValidityToken`]
|
||||
//!
|
||||
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
/// A token representing if a piece of data is valid.
|
||||
|
@ -7,39 +14,20 @@ pub struct ValidityToken {
|
|||
}
|
||||
|
||||
impl ValidityToken {
|
||||
/// Creates a new [`ValidityToken`]
|
||||
pub fn new() -> ValidityToken {
|
||||
ValidityToken {
|
||||
token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the data is still valid.
|
||||
pub fn is_data_valid(&self) -> bool {
|
||||
!self.token.is_cancelled()
|
||||
}
|
||||
|
||||
/// Sets the data to invalid.
|
||||
pub fn set_data_invalid(self) {
|
||||
self.token.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
/// A token representing if a re-org has happened since it's creation.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ReOrgToken {
|
||||
token: CancellationToken,
|
||||
}
|
||||
|
||||
impl ReOrgToken {
|
||||
pub fn new() -> ReOrgToken {
|
||||
ReOrgToken {
|
||||
token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reorg_happened(&self) -> bool {
|
||||
self.token.is_cancelled()
|
||||
}
|
||||
|
||||
pub fn set_reorg_happened(self) {
|
||||
self.token.cancel()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,14 @@ use rayon::prelude::*;
|
|||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
|
||||
use cuprate_helper::{asynch::rayon_spawn_async, num::median};
|
||||
use monero_consensus::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
|
||||
|
||||
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork};
|
||||
|
||||
/// The short term block weight window.
|
||||
const SHORT_TERM_WINDOW: u64 = 100;
|
||||
/// The long term block weight window.
|
||||
const LONG_TERM_WINDOW: u64 = 100000;
|
||||
|
||||
/// Configuration for the block weight cache.
|
||||
|
@ -33,6 +35,7 @@ pub struct BlockWeightsCacheConfig {
|
|||
}
|
||||
|
||||
impl BlockWeightsCacheConfig {
|
||||
/// Creates a new [`BlockWeightsCacheConfig`]
|
||||
pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
|
||||
BlockWeightsCacheConfig {
|
||||
short_term_window,
|
||||
|
@ -40,6 +43,7 @@ impl BlockWeightsCacheConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet).
|
||||
pub fn main_net() -> BlockWeightsCacheConfig {
|
||||
BlockWeightsCacheConfig {
|
||||
short_term_window: SHORT_TERM_WINDOW,
|
||||
|
@ -55,7 +59,9 @@ impl BlockWeightsCacheConfig {
|
|||
/// this data it reduces the load on the database.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockWeightsCache {
|
||||
/// The short term block weights.
|
||||
short_term_block_weights: VecDeque<usize>,
|
||||
/// The long term block weights.
|
||||
long_term_weights: VecDeque<usize>,
|
||||
|
||||
/// The short term block weights sorted so we don't have to sort them every time we need
|
||||
|
@ -68,6 +74,7 @@ pub struct BlockWeightsCache {
|
|||
/// The height of the top block.
|
||||
tip_height: u64,
|
||||
|
||||
/// The block weight config.
|
||||
config: BlockWeightsCacheConfig,
|
||||
}
|
||||
|
||||
|
@ -131,6 +138,7 @@ impl BlockWeightsCache {
|
|||
long_term_weight
|
||||
);
|
||||
|
||||
// add the new block to the `long_term_weights` list and the sorted `cached_sorted_long_term_weights` list.
|
||||
self.long_term_weights.push_back(long_term_weight);
|
||||
match self
|
||||
.cached_sorted_long_term_weights
|
||||
|
@ -141,6 +149,7 @@ impl BlockWeightsCache {
|
|||
.insert(idx, long_term_weight),
|
||||
}
|
||||
|
||||
// If the list now has too many entries remove the oldest.
|
||||
if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window {
|
||||
let val = self
|
||||
.long_term_weights
|
||||
|
@ -153,6 +162,7 @@ impl BlockWeightsCache {
|
|||
};
|
||||
}
|
||||
|
||||
// add the block to the short_term_block_weights and the sorted cached_sorted_short_term_weights list.
|
||||
self.short_term_block_weights.push_back(block_weight);
|
||||
match self
|
||||
.cached_sorted_short_term_weights
|
||||
|
@ -163,6 +173,7 @@ impl BlockWeightsCache {
|
|||
.insert(idx, block_weight),
|
||||
}
|
||||
|
||||
// If there are now too many entries remove the oldest.
|
||||
if u64::try_from(self.short_term_block_weights.len()).unwrap()
|
||||
> self.config.short_term_window
|
||||
{
|
||||
|
@ -192,6 +203,7 @@ impl BlockWeightsCache {
|
|||
median(&self.cached_sorted_long_term_weights)
|
||||
}
|
||||
|
||||
/// Returns the median weight over the last [`SHORT_TERM_WINDOW`] blocks, or custom amount of blocks in the config.
|
||||
pub fn median_short_term_weight(&self) -> usize {
|
||||
median(&self.cached_sorted_short_term_weights)
|
||||
}
|
||||
|
@ -221,6 +233,7 @@ impl BlockWeightsCache {
|
|||
}
|
||||
}
|
||||
|
||||
/// Calculates the effective median with the long term and short term median.
|
||||
fn calculate_effective_median_block_weight(
|
||||
hf: &HardFork,
|
||||
median_short_term_weight: usize,
|
||||
|
@ -247,6 +260,7 @@ fn calculate_effective_median_block_weight(
|
|||
effective_median.max(penalty_free_zone(hf))
|
||||
}
|
||||
|
||||
/// Calculates a blocks long term weight.
|
||||
pub fn calculate_block_long_term_weight(
|
||||
hf: &HardFork,
|
||||
block_weight: usize,
|
||||
|
@ -270,6 +284,7 @@ pub fn calculate_block_long_term_weight(
|
|||
min(short_term_constraint, adjusted_block_weight)
|
||||
}
|
||||
|
||||
/// Gets the block weights from the blocks with heights in the range provided.
|
||||
#[instrument(name = "get_block_weights", skip(database))]
|
||||
async fn get_blocks_weight_in_range<D: Database + Clone>(
|
||||
range: Range<u64>,
|
||||
|
@ -290,6 +305,7 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
|
|||
.collect())
|
||||
}
|
||||
|
||||
/// Gets the block long term weights from the blocks with heights in the range provided.
|
||||
#[instrument(name = "get_long_term_weights", skip(database), level = "info")]
|
||||
async fn get_long_term_weight_in_range<D: Database + Clone>(
|
||||
range: Range<u64>,
|
||||
|
|
|
@ -1,65 +1,64 @@
|
|||
//! Cuprate Consensus
|
||||
//!
|
||||
//! This crate contains 3 [`tower::Service`]s that implement Monero's consensus rules:
|
||||
//!
|
||||
//! - [`BlockChainContextService`] Which handles keeping the current state of the blockchain.
|
||||
//! - [`BlockVerifierService`] Which handles block verification.
|
||||
//! - [`TxVerifierService`] Which handles transaction verification.
|
||||
//!
|
||||
//! This crate is generic over the database which is implemented as a [`tower::Service`]. To
|
||||
//! implement a database you need to have a service which accepts [`DatabaseRequest`] and responds
|
||||
//! with [`DatabaseResponse`].
|
||||
//!
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
};
|
||||
|
||||
use monero_consensus::{transactions::OutputOnChain, ConsensusError, HardFork};
|
||||
use cuprate_consensus_rules::{transactions::OutputOnChain, ConsensusError, HardFork};
|
||||
|
||||
mod batch_verifier;
|
||||
pub mod block;
|
||||
pub mod context;
|
||||
pub mod randomx;
|
||||
#[cfg(feature = "binaries")]
|
||||
pub mod rpc;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub mod transactions;
|
||||
|
||||
pub use block::{
|
||||
PrePreparedBlock, VerifiedBlockInformation, VerifyBlockRequest, VerifyBlockResponse,
|
||||
BlockVerifierService, PrePreparedBlock, VerifiedBlockInformation, VerifyBlockRequest,
|
||||
VerifyBlockResponse,
|
||||
};
|
||||
pub use context::{
|
||||
initialize_blockchain_context, BlockChainContext, BlockChainContextRequest,
|
||||
BlockChainContextResponse, ContextConfig,
|
||||
BlockChainContextResponse, BlockChainContextService, ContextConfig,
|
||||
};
|
||||
pub use transactions::{VerifyTxRequest, VerifyTxResponse};
|
||||
pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse};
|
||||
|
||||
/// An Error returned from one of the consensus services.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ExtendedConsensusError {
|
||||
/// A consensus error.
|
||||
#[error("{0}")]
|
||||
ConErr(#[from] monero_consensus::ConsensusError),
|
||||
ConErr(#[from] ConsensusError),
|
||||
/// A database error.
|
||||
#[error("Database error: {0}")]
|
||||
DBErr(#[from] tower::BoxError),
|
||||
/// The transactions passed in with this block were not the ones needed.
|
||||
#[error("The transactions passed in with the block are incorrect.")]
|
||||
TxsIncludedWithBlockIncorrect,
|
||||
/// One or more statements in the batch verifier was invalid.
|
||||
#[error("One or more statements in the batch verifier was invalid.")]
|
||||
OneOrMoreBatchVerificationStatementsInvalid,
|
||||
}
|
||||
|
||||
// TODO: instead of (ab)using generic returns return the acc type
|
||||
/// Initialize the 2 verifier [`tower::Service`]s (block and transaction).
|
||||
pub async fn initialize_verifier<D, Ctx>(
|
||||
database: D,
|
||||
ctx_svc: Ctx,
|
||||
) -> Result<
|
||||
(
|
||||
impl tower::Service<
|
||||
VerifyBlockRequest,
|
||||
Response = VerifyBlockResponse,
|
||||
Error = ExtendedConsensusError,
|
||||
Future = impl Future<Output = Result<VerifyBlockResponse, ExtendedConsensusError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
> + Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
impl tower::Service<
|
||||
VerifyTxRequest,
|
||||
Response = VerifyTxResponse,
|
||||
Error = ExtendedConsensusError,
|
||||
Future = impl Future<Output = Result<VerifyTxResponse, ExtendedConsensusError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
> + Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
BlockVerifierService<Ctx, TxVerifierService<D>, D>,
|
||||
TxVerifierService<D>,
|
||||
),
|
||||
ConsensusError,
|
||||
>
|
||||
|
@ -76,73 +75,120 @@ where
|
|||
+ 'static,
|
||||
Ctx::Future: Send + 'static,
|
||||
{
|
||||
let tx_svc = transactions::TxVerifierService::new(database.clone());
|
||||
let block_svc = block::BlockVerifierService::new(ctx_svc, tx_svc.clone(), database);
|
||||
let tx_svc = TxVerifierService::new(database.clone());
|
||||
let block_svc = BlockVerifierService::new(ctx_svc, tx_svc.clone(), database);
|
||||
Ok((block_svc, tx_svc))
|
||||
}
|
||||
|
||||
/// An internal trait used to represent a database so we don't have to write [`tower::Service`] bounds
|
||||
/// everywhere.
|
||||
pub trait Database:
|
||||
tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
tower::Service<
|
||||
DatabaseRequest,
|
||||
Response = DatabaseResponse,
|
||||
Error = tower::BoxError,
|
||||
Future = Self::Future2,
|
||||
>
|
||||
{
|
||||
type Future2: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static;
|
||||
}
|
||||
|
||||
impl<T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>>
|
||||
Database for T
|
||||
where
|
||||
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
|
||||
{
|
||||
type Future2 = T::Future;
|
||||
}
|
||||
|
||||
/// An extended block header.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ExtendedBlockHeader {
|
||||
/// The blocks major version.
|
||||
pub version: HardFork,
|
||||
/// The blocks vote.
|
||||
pub vote: HardFork,
|
||||
|
||||
/// The blocks timestamp.
|
||||
pub timestamp: u64,
|
||||
/// The blocks cumulative difficulty.
|
||||
pub cumulative_difficulty: u128,
|
||||
|
||||
/// The blocks weight.
|
||||
pub block_weight: usize,
|
||||
/// The blocks long term weight.
|
||||
pub long_term_weight: usize,
|
||||
}
|
||||
|
||||
/// A database request to the database [`tower::Service`]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DatabaseRequest {
|
||||
/// A block extended header request.
|
||||
/// Must return: [`DatabaseResponse::BlockExtendedHeader`]
|
||||
BlockExtendedHeader(u64),
|
||||
/// A block hash request.
|
||||
/// Must return: [`DatabaseResponse::BlockHash`]
|
||||
BlockHash(u64),
|
||||
|
||||
/// Removes the block hashes that are not in the _main_ chain.
|
||||
///
|
||||
/// This should filter (remove) hashes in alt-blocks as well.
|
||||
FilterUnknownHashes(HashSet<[u8; 32]>),
|
||||
|
||||
/// A request for multiple block extended headers.
|
||||
/// Must return: [`DatabaseResponse::BlockExtendedHeaderInRange`]
|
||||
BlockExtendedHeaderInRange(std::ops::Range<u64>),
|
||||
|
||||
/// A request for the chains height.
|
||||
/// Must return: [`DatabaseResponse::ChainHeight`]
|
||||
ChainHeight,
|
||||
/// A request for the total amount of generated coins.
|
||||
/// Must return: [`DatabaseResponse::GeneratedCoins`]
|
||||
GeneratedCoins,
|
||||
|
||||
/// A request for transaction outputs, this contains a map of amounts to amount indexes.
|
||||
/// Must return: [`DatabaseResponse::Outputs`]
|
||||
Outputs(HashMap<u64, HashSet<u64>>),
|
||||
/// A request for the number of outputs with these amounts.
|
||||
/// Must return: [`DatabaseResponse::NumberOutputsWithAmount`]
|
||||
NumberOutputsWithAmount(Vec<u64>),
|
||||
|
||||
CheckKIsNotSpent(HashSet<[u8; 32]>),
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
BlockBatchInRange(std::ops::Range<u64>),
|
||||
/// A request to check if these key images are in the database.
|
||||
/// Must return: [`DatabaseResponse::KeyImagesSpent`]
|
||||
KeyImagesSpent(HashSet<[u8; 32]>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DatabaseResponse {
|
||||
/// A block extended header response.
|
||||
BlockExtendedHeader(ExtendedBlockHeader),
|
||||
/// A block hash response.
|
||||
BlockHash([u8; 32]),
|
||||
|
||||
FilteredHashes(HashSet<[u8; 32]>),
|
||||
|
||||
/// A batch block extended header response.
|
||||
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
|
||||
|
||||
/// A chain height response.
|
||||
/// Should contains the chains height and top block hash.
|
||||
ChainHeight(u64, [u8; 32]),
|
||||
/// Generated coins response.
|
||||
/// Should contain the total amount of coins emitted in all block rewards.
|
||||
GeneratedCoins(u64),
|
||||
|
||||
/// Outputs response.
|
||||
/// Should contain a map of (amounts, amount_idx) -> Output.
|
||||
/// If an outputs requested does not exist this should *not* be an error, the output
|
||||
/// should just be omitted from the map.
|
||||
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
|
||||
/// Number of outputs response.
|
||||
/// Should contain a map of amounts -> numb outs.
|
||||
/// If there are no outputs with that amount then the numb outs should be zero, *no* amounts
|
||||
/// requested should be omitted.
|
||||
NumberOutputsWithAmount(HashMap<u64, usize>),
|
||||
|
||||
/// Key images spent response.
|
||||
/// returns true if key images are spent
|
||||
CheckKIsNotSpent(bool),
|
||||
|
||||
#[cfg(feature = "binaries")]
|
||||
BlockBatchInRange(
|
||||
Vec<(
|
||||
monero_serai::block::Block,
|
||||
Vec<monero_serai::transaction::Transaction>,
|
||||
)>,
|
||||
),
|
||||
KeyImagesSpent(bool),
|
||||
}
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use monero_consensus::blocks::RandomX;
|
||||
|
||||
pub struct RandomXVM {
|
||||
vms: ThreadLocal<VMInner>,
|
||||
cache: RandomXCache,
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
impl RandomXVM {
|
||||
pub fn new(seed: [u8; 32]) -> Result<Self, RandomXError> {
|
||||
let flags = RandomXFlag::get_recommended_flags();
|
||||
|
||||
let cache = RandomXCache::new(flags, &seed)?;
|
||||
|
||||
Ok(RandomXVM {
|
||||
vms: ThreadLocal::new(),
|
||||
cache,
|
||||
flags,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomX for RandomXVM {
|
||||
type Error = RandomXError;
|
||||
|
||||
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
|
||||
self.vms
|
||||
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))?
|
||||
.calculate_hash(buf)
|
||||
.map(|out| out.try_into().unwrap())
|
||||
}
|
||||
}
|
|
@ -1,288 +0,0 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
ops::Range,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{
|
||||
stream::{FuturesOrdered, FuturesUnordered},
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{balance::p2c::Balance, ServiceExt};
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
|
||||
use crate::{DatabaseRequest, DatabaseResponse};
|
||||
|
||||
pub mod cache;
|
||||
mod connection;
|
||||
mod discover;
|
||||
|
||||
use cache::ScanningCache;
|
||||
|
||||
const MAX_OUTS_PER_RPC: usize = 5000; // the cap for monerod is 5000
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct RpcConfig {
|
||||
pub max_blocks_per_node: u64,
|
||||
pub max_block_headers_per_node: u64,
|
||||
}
|
||||
|
||||
impl RpcConfig {
|
||||
pub fn block_batch_size(&self) -> u64 {
|
||||
self.max_blocks_per_node * 3
|
||||
}
|
||||
|
||||
pub fn new(max_blocks_per_node: u64, max_block_headers_per_node: u64) -> RpcConfig {
|
||||
RpcConfig {
|
||||
max_block_headers_per_node,
|
||||
max_blocks_per_node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Attempts(u64);
|
||||
|
||||
impl<Req: Clone, Res, E> tower::retry::Policy<Req, Res, E> for Attempts {
|
||||
type Future = futures::future::Ready<Self>;
|
||||
fn retry(&self, _: &Req, result: Result<&Res, &E>) -> Option<Self::Future> {
|
||||
if result.is_err() {
|
||||
if self.0 == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(futures::future::ready(Attempts(self.0 - 1)))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_request(&self, req: &Req) -> Option<Req> {
|
||||
Some(req.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_rpc_load_balancer(
|
||||
addresses: Vec<String>,
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
) -> impl tower::Service<
|
||||
DatabaseRequest,
|
||||
Response = DatabaseResponse,
|
||||
Error = tower::BoxError,
|
||||
Future = Pin<
|
||||
Box<dyn Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static>,
|
||||
>,
|
||||
> + Clone {
|
||||
let (rpc_discoverer_tx, rpc_discoverer_rx) = futures::channel::mpsc::channel(0);
|
||||
|
||||
let rpc_balance = Balance::new(Box::pin(
|
||||
rpc_discoverer_rx.map(Result::<_, tower::BoxError>::Ok),
|
||||
));
|
||||
let rpc_buffer = tower::buffer::Buffer::new(rpc_balance, 50);
|
||||
let rpcs = tower::retry::Retry::new(Attempts(10), rpc_buffer);
|
||||
|
||||
let discover = discover::RPCDiscover {
|
||||
initial_list: addresses,
|
||||
ok_channel: rpc_discoverer_tx,
|
||||
already_connected: Default::default(),
|
||||
cache: cache.clone(),
|
||||
};
|
||||
|
||||
tokio::spawn(discover.run());
|
||||
|
||||
RpcBalancer {
|
||||
rpcs,
|
||||
config,
|
||||
cache,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RpcBalancer<T: Clone> {
|
||||
rpcs: T,
|
||||
config: Arc<std::sync::RwLock<RpcConfig>>,
|
||||
cache: Arc<RwLock<ScanningCache>>,
|
||||
}
|
||||
|
||||
impl<T> tower::Service<DatabaseRequest> for RpcBalancer<T>
|
||||
where
|
||||
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
T::Future: Send + 'static,
|
||||
{
|
||||
type Response = DatabaseResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
let this = self.rpcs.clone();
|
||||
let config_mutex = self.config.clone();
|
||||
let config = config_mutex.clone();
|
||||
|
||||
let cache = self.cache.clone();
|
||||
|
||||
match req {
|
||||
DatabaseRequest::CheckKIsNotSpent(kis) => async move {
|
||||
Ok(DatabaseResponse::CheckKIsNotSpent(
|
||||
cache.read().await.are_kis_spent(kis),
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::GeneratedCoins => async move {
|
||||
Ok(DatabaseResponse::GeneratedCoins(
|
||||
cache.read().await.already_generated_coins,
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::NumberOutputsWithAmount(amt) => async move {
|
||||
Ok(DatabaseResponse::NumberOutputsWithAmount(
|
||||
cache.read().await.numb_outs(&amt),
|
||||
))
|
||||
}
|
||||
.boxed(),
|
||||
DatabaseRequest::BlockBatchInRange(range) => {
|
||||
let resp_to_ret = |resp: DatabaseResponse| {
|
||||
let DatabaseResponse::BlockBatchInRange(pow_info) = resp else {
|
||||
panic!("Database sent incorrect response");
|
||||
};
|
||||
pow_info
|
||||
};
|
||||
split_range_request(
|
||||
this,
|
||||
range,
|
||||
DatabaseRequest::BlockBatchInRange,
|
||||
DatabaseResponse::BlockBatchInRange,
|
||||
resp_to_ret,
|
||||
config.read().unwrap().max_blocks_per_node,
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
|
||||
let resp_to_ret = |resp: DatabaseResponse| {
|
||||
let DatabaseResponse::BlockExtendedHeaderInRange(pow_info) = resp else {
|
||||
panic!("Database sent incorrect response");
|
||||
};
|
||||
pow_info
|
||||
};
|
||||
split_range_request(
|
||||
this,
|
||||
range,
|
||||
DatabaseRequest::BlockExtendedHeaderInRange,
|
||||
DatabaseResponse::BlockExtendedHeaderInRange,
|
||||
resp_to_ret,
|
||||
config.read().unwrap().max_block_headers_per_node,
|
||||
)
|
||||
.boxed()
|
||||
}
|
||||
DatabaseRequest::Outputs(outs) => async move {
|
||||
let split_outs = rayon_spawn_async(|| {
|
||||
let mut split_outs: Vec<HashMap<u64, HashSet<u64>>> = Vec::new();
|
||||
let mut i: usize = 0;
|
||||
for (amount, ixs) in outs {
|
||||
if ixs.len() > MAX_OUTS_PER_RPC {
|
||||
for ii in (0..ixs.len()).step_by(MAX_OUTS_PER_RPC) {
|
||||
let mut amt_map = HashSet::with_capacity(MAX_OUTS_PER_RPC);
|
||||
amt_map.extend(ixs.iter().skip(ii).copied().take(MAX_OUTS_PER_RPC));
|
||||
|
||||
let mut map = HashMap::new();
|
||||
map.insert(amount, amt_map);
|
||||
split_outs.push(map);
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(map) = split_outs.get_mut(i.saturating_sub(1)) {
|
||||
if map.iter().map(|(_, amt_map)| amt_map.len()).sum::<usize>()
|
||||
+ ixs.len()
|
||||
< MAX_OUTS_PER_RPC
|
||||
{
|
||||
assert!(map.insert(amount, ixs).is_none());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let mut map = HashMap::new();
|
||||
map.insert(amount, ixs);
|
||||
split_outs.push(map);
|
||||
i += 1;
|
||||
}
|
||||
split_outs
|
||||
})
|
||||
.await;
|
||||
|
||||
let mut futs = FuturesUnordered::from_iter(
|
||||
split_outs
|
||||
.into_iter()
|
||||
.map(|map| this.clone().oneshot(DatabaseRequest::Outputs(map))),
|
||||
);
|
||||
|
||||
let mut outs = HashMap::new();
|
||||
|
||||
while let Some(out_response) = futs.next().await {
|
||||
let DatabaseResponse::Outputs(out_response) = out_response? else {
|
||||
panic!("RPC sent incorrect response!");
|
||||
};
|
||||
out_response.into_iter().for_each(|(amt, amt_map)| {
|
||||
outs.entry(amt).or_insert_with(HashMap::new).extend(amt_map)
|
||||
});
|
||||
}
|
||||
Ok(DatabaseResponse::Outputs(outs))
|
||||
}
|
||||
.boxed(),
|
||||
req => this.oneshot(req).boxed(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn split_range_request<T, Ret>(
|
||||
rpc: T,
|
||||
range: Range<u64>,
|
||||
req: impl Fn(Range<u64>) -> DatabaseRequest + Send + 'static,
|
||||
resp: impl FnOnce(Vec<Ret>) -> DatabaseResponse + Send + 'static,
|
||||
resp_to_ret: impl Fn(DatabaseResponse) -> Vec<Ret> + Copy + Send + 'static,
|
||||
max_request_per_rpc: u64,
|
||||
) -> impl Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static
|
||||
where
|
||||
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
T::Future: Send + 'static,
|
||||
Ret: Send + 'static,
|
||||
{
|
||||
let iter = (0..range.clone().count() as u64)
|
||||
.step_by(max_request_per_rpc as usize)
|
||||
.map(|i| {
|
||||
let new_range =
|
||||
(range.start + i)..(min(range.start + i + max_request_per_rpc, range.end));
|
||||
rpc.clone().oneshot(req(new_range)).map_ok(resp_to_ret)
|
||||
});
|
||||
|
||||
let fut = FuturesOrdered::from_iter(iter);
|
||||
|
||||
let mut res = Vec::with_capacity(range.count());
|
||||
|
||||
async move {
|
||||
for mut rpc_res in fut.try_collect::<Vec<Vec<_>>>().await?.into_iter() {
|
||||
res.append(&mut rpc_res)
|
||||
}
|
||||
|
||||
Ok(resp(res))
|
||||
}
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
#![cfg(feature = "binaries")]
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
collections::HashSet,
|
||||
fmt::{Display, Formatter},
|
||||
io::{BufWriter, Write},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use borsh::{BorshDeserialize, BorshSerialize};
|
||||
use monero_serai::transaction::{Input, Timelock, Transaction};
|
||||
use tracing_subscriber::fmt::MakeWriter;
|
||||
|
||||
use crate::transactions::TransactionVerificationData;
|
||||
|
||||
/// A cache which can keep chain state while scanning.
|
||||
///
|
||||
/// Because we are using a RPC interface with a node we need to keep track
|
||||
/// of certain data that the node doesn't hold or give us like the number
|
||||
/// of outputs at a certain time.
|
||||
#[derive(Debug, Default, Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub struct ScanningCache {
|
||||
// network: u8,
|
||||
numb_outs: HashMap<u64, usize>,
|
||||
time_locked_out: HashMap<[u8; 32], u64>,
|
||||
kis: HashSet<[u8; 32]>,
|
||||
pub already_generated_coins: u64,
|
||||
/// The height of the *next* block to scan.
|
||||
pub height: u64,
|
||||
}
|
||||
|
||||
impl ScanningCache {
|
||||
pub fn save(&self, file: &Path) -> Result<(), tower::BoxError> {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(file)?;
|
||||
let mut writer = BufWriter::new(file.make_writer());
|
||||
borsh::to_writer(&mut writer, &self)?;
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load(file: &Path) -> Result<ScanningCache, tower::BoxError> {
|
||||
let mut file = std::fs::OpenOptions::new().read(true).open(file)?;
|
||||
|
||||
let data: ScanningCache = borsh::from_reader(&mut file)?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
pub fn add_new_block_data(
|
||||
&mut self,
|
||||
generated_coins: u64,
|
||||
miner_tx: &Transaction,
|
||||
txs: &[Arc<TransactionVerificationData>],
|
||||
) {
|
||||
self.add_tx_time_lock(miner_tx.hash(), miner_tx.prefix.timelock);
|
||||
miner_tx.prefix.outputs.iter().for_each(|out| {
|
||||
self.add_outs(miner_tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
|
||||
});
|
||||
|
||||
txs.iter().for_each(|tx| {
|
||||
self.add_tx_time_lock(tx.tx_hash, tx.tx.prefix.timelock);
|
||||
tx.tx.prefix.outputs.iter().for_each(|out| {
|
||||
self.add_outs(tx.tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
|
||||
});
|
||||
|
||||
tx.tx.prefix.inputs.iter().for_each(|inp| match inp {
|
||||
Input::ToKey { key_image, .. } => {
|
||||
assert!(self.kis.insert(key_image.compress().to_bytes()))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
})
|
||||
});
|
||||
|
||||
self.already_generated_coins = self.already_generated_coins.saturating_add(generated_coins);
|
||||
self.height += 1;
|
||||
}
|
||||
|
||||
/// Returns true if any kis are included in our spent set.
|
||||
pub fn are_kis_spent(&self, kis: HashSet<[u8; 32]>) -> bool {
|
||||
!self.kis.is_disjoint(&kis)
|
||||
}
|
||||
|
||||
pub fn outputs_time_lock(&self, tx: &[u8; 32]) -> Timelock {
|
||||
let time_lock = self.time_locked_out.get(tx).copied().unwrap_or(0);
|
||||
match time_lock {
|
||||
0 => Timelock::None,
|
||||
block if block < 500_000_000 => Timelock::Block(block as usize),
|
||||
time => Timelock::Time(time),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tx_time_lock(&mut self, tx: [u8; 32], time_lock: Timelock) {
|
||||
match time_lock {
|
||||
Timelock::None => (),
|
||||
lock => {
|
||||
self.time_locked_out.insert(
|
||||
tx,
|
||||
match lock {
|
||||
Timelock::None => unreachable!(),
|
||||
Timelock::Block(x) => x as u64,
|
||||
Timelock::Time(x) => x,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn total_outs(&self) -> usize {
|
||||
self.numb_outs.values().sum()
|
||||
}
|
||||
|
||||
pub fn numb_outs(&self, amounts: &[u64]) -> HashMap<u64, usize> {
|
||||
amounts
|
||||
.iter()
|
||||
.map(|amount| (*amount, *self.numb_outs.get(amount).unwrap_or(&0)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn add_outs(&mut self, is_v2: bool, amount: u64, count: usize) {
|
||||
let amount = if is_v2 { 0 } else { amount };
|
||||
|
||||
if let Some(numb_outs) = self.numb_outs.get_mut(&amount) {
|
||||
*numb_outs += count;
|
||||
} else {
|
||||
self.numb_outs.insert(amount, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ScanningCache {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let rct_outs = *self.numb_outs(&[0]).get(&0).unwrap();
|
||||
let total_outs = self.total_outs();
|
||||
|
||||
f.debug_struct("Cache")
|
||||
.field("next_block", &self.height)
|
||||
.field("rct_outs", &rct_outs)
|
||||
.field("total_outs", &total_outs)
|
||||
.finish()
|
||||
}
|
||||
}
|
|
@ -1,476 +0,0 @@
|
|||
use std::ops::Deref;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
ops::Range,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
StreamExt,
|
||||
};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
rpc::{HttpRpc, Rpc},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use monero_wire::common::TransactionBlobs;
|
||||
use rayon::prelude::*;
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use tokio::{
|
||||
sync::RwLock,
|
||||
task::JoinHandle,
|
||||
time::{timeout, Duration},
|
||||
};
|
||||
use tower::Service;
|
||||
use tracing::{instrument, Instrument};
|
||||
|
||||
use cuprate_helper::asynch::{rayon_spawn_async, InfallibleOneshotReceiver};
|
||||
|
||||
use super::ScanningCache;
|
||||
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork, OutputOnChain};
|
||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(300);
|
||||
const OUTPUTS_TIMEOUT: Duration = Duration::from_secs(50);
|
||||
|
||||
pub struct RpcConnectionSvc {
|
||||
pub(crate) address: String,
|
||||
|
||||
pub(crate) rpc_task_handle: JoinHandle<()>,
|
||||
pub(crate) rpc_task_chan: mpsc::Sender<RpcReq>,
|
||||
}
|
||||
|
||||
impl Service<DatabaseRequest> for RpcConnectionSvc {
|
||||
type Response = DatabaseResponse;
|
||||
type Error = tower::BoxError;
|
||||
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.rpc_task_handle.is_finished() {
|
||||
return Poll::Ready(Err("RPC task has exited!".into()));
|
||||
}
|
||||
self.rpc_task_chan.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let req = RpcReq {
|
||||
req,
|
||||
res_chan: tx,
|
||||
span: tracing::info_span!(parent: &tracing::Span::current(), "rpc", addr = &self.address),
|
||||
};
|
||||
|
||||
self.rpc_task_chan
|
||||
.try_send(req)
|
||||
.expect("poll_ready should be called first!");
|
||||
|
||||
rx.into()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RpcReq {
|
||||
req: DatabaseRequest,
|
||||
res_chan: oneshot::Sender<Result<DatabaseResponse, tower::BoxError>>,
|
||||
span: tracing::Span,
|
||||
}
|
||||
|
||||
pub struct RpcConnection {
|
||||
pub(crate) address: String,
|
||||
|
||||
pub(crate) con: Rpc<HttpRpc>,
|
||||
pub(crate) cache: Arc<RwLock<ScanningCache>>,
|
||||
|
||||
pub(crate) req_chan: mpsc::Receiver<RpcReq>,
|
||||
}
|
||||
|
||||
impl RpcConnection {
|
||||
async fn get_block_hash(&self, height: u64) -> Result<[u8; 32], tower::BoxError> {
|
||||
self.con
|
||||
.get_block_hash(height.try_into().unwrap())
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn get_extended_block_header(
|
||||
&self,
|
||||
height: u64,
|
||||
) -> Result<ExtendedBlockHeader, tower::BoxError> {
|
||||
tracing::info!("Retrieving block info with height: {}", height);
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Response {
|
||||
block_header: BlockInfo,
|
||||
}
|
||||
|
||||
let info = {
|
||||
let res = self
|
||||
.con
|
||||
.json_rpc_call::<Response>(
|
||||
"get_block_header_by_height",
|
||||
Some(json!({"height": height})),
|
||||
)
|
||||
.await?;
|
||||
res.block_header
|
||||
};
|
||||
|
||||
Ok(ExtendedBlockHeader {
|
||||
version: HardFork::from_version(info.major_version)
|
||||
.expect("previously checked block has incorrect version"),
|
||||
vote: HardFork::from_vote(info.minor_version),
|
||||
timestamp: info.timestamp,
|
||||
cumulative_difficulty: u128_from_low_high(
|
||||
info.cumulative_difficulty,
|
||||
info.cumulative_difficulty_top64,
|
||||
),
|
||||
block_weight: info.block_weight,
|
||||
long_term_weight: info.long_term_weight,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_extended_block_header_in_range(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
) -> Result<Vec<ExtendedBlockHeader>, tower::BoxError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Response {
|
||||
headers: Vec<BlockInfo>,
|
||||
}
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.json_rpc_call::<Response>(
|
||||
"get_block_headers_range",
|
||||
Some(json!({"start_height": range.start, "end_height": range.end - 1})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("Retrieved block headers in range: {:?}", range);
|
||||
|
||||
Ok(rayon_spawn_async(|| {
|
||||
res.headers
|
||||
.into_iter()
|
||||
.map(|info| ExtendedBlockHeader {
|
||||
version: HardFork::from_version(info.major_version)
|
||||
.expect("previously checked block has incorrect version"),
|
||||
vote: HardFork::from_vote(info.minor_version),
|
||||
timestamp: info.timestamp,
|
||||
cumulative_difficulty: u128_from_low_high(
|
||||
info.cumulative_difficulty,
|
||||
info.cumulative_difficulty_top64,
|
||||
),
|
||||
block_weight: info.block_weight,
|
||||
long_term_weight: info.long_term_weight,
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.await)
|
||||
}
|
||||
|
||||
async fn get_blocks_in_range(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
) -> Result<Vec<(Block, Vec<Transaction>)>, tower::BoxError> {
|
||||
tracing::info!("Getting blocks in range: {:?}", range);
|
||||
|
||||
mod items {
|
||||
use monero_wire::common::BlockCompleteEntry;
|
||||
|
||||
pub struct Request {
|
||||
pub heights: Vec<u64>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Request,
|
||||
heights: Vec<u64>,
|
||||
);
|
||||
|
||||
pub struct Response {
|
||||
pub blocks: Vec<BlockCompleteEntry>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Response,
|
||||
blocks: Vec<BlockCompleteEntry>,
|
||||
);
|
||||
}
|
||||
use items::*;
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.bin_call(
|
||||
"get_blocks_by_height.bin",
|
||||
epee_encoding::to_bytes(Request {
|
||||
heights: range.collect(),
|
||||
})?
|
||||
.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let address = self.address.clone();
|
||||
rayon_spawn_async(move || {
|
||||
let blocks: Response =
|
||||
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
|
||||
|
||||
blocks
|
||||
.blocks
|
||||
.into_par_iter()
|
||||
.map(|b| {
|
||||
let block = Block::read(&mut b.block.deref())?;
|
||||
|
||||
let txs = match b.txs {
|
||||
TransactionBlobs::Pruned(_) => return Err("node sent pruned txs!".into()),
|
||||
TransactionBlobs::Normal(txs) => txs
|
||||
.into_par_iter()
|
||||
.map(|tx| Transaction::read(&mut tx.deref()))
|
||||
.collect::<Result<_, _>>()?,
|
||||
TransactionBlobs::None => vec![],
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
block.txs.len(),
|
||||
txs.len(),
|
||||
"node: {}, height: {}, node is pruned, which is not supported!",
|
||||
address,
|
||||
block.number().unwrap(),
|
||||
);
|
||||
|
||||
Ok((block, txs))
|
||||
})
|
||||
.collect::<Result<_, tower::BoxError>>()
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_outputs(
|
||||
&self,
|
||||
out_ids: HashMap<u64, HashSet<u64>>,
|
||||
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, tower::BoxError> {
|
||||
tracing::info!(
|
||||
"Getting outputs len: {}",
|
||||
out_ids.values().map(|amt_map| amt_map.len()).sum::<usize>()
|
||||
);
|
||||
|
||||
mod items {
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct OutputID {
|
||||
pub amount: u64,
|
||||
pub index: u64,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
OutputID,
|
||||
amount: u64,
|
||||
index: u64,
|
||||
);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Request {
|
||||
pub outputs: Vec<OutputID>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Request,
|
||||
outputs: Vec<OutputID>,
|
||||
);
|
||||
|
||||
pub struct OutputRes {
|
||||
pub height: u64,
|
||||
pub key: [u8; 32],
|
||||
pub mask: [u8; 32],
|
||||
pub txid: [u8; 32],
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
OutputRes,
|
||||
height: u64,
|
||||
key: [u8; 32],
|
||||
mask: [u8; 32],
|
||||
txid: [u8; 32],
|
||||
);
|
||||
|
||||
pub struct Response {
|
||||
pub outs: Vec<OutputRes>,
|
||||
}
|
||||
|
||||
epee_encoding::epee_object!(
|
||||
Response,
|
||||
outs: Vec<OutputRes>,
|
||||
);
|
||||
}
|
||||
|
||||
use items::*;
|
||||
|
||||
let outputs = rayon_spawn_async(|| {
|
||||
out_ids
|
||||
.into_iter()
|
||||
.flat_map(|(amt, amt_map)| {
|
||||
amt_map
|
||||
.into_iter()
|
||||
.map(|amt_idx| OutputID {
|
||||
amount: amt,
|
||||
index: amt_idx,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.con
|
||||
.bin_call(
|
||||
"get_outs.bin",
|
||||
epee_encoding::to_bytes(Request {
|
||||
outputs: outputs.clone(),
|
||||
})?
|
||||
.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cache = self.cache.clone().read_owned().await;
|
||||
|
||||
let span = tracing::Span::current();
|
||||
rayon_spawn_async(move || {
|
||||
let outs: Response =
|
||||
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
|
||||
|
||||
tracing::info!(parent: &span, "Got outputs len: {}", outs.outs.len());
|
||||
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
for (out, idx) in outs.outs.into_iter().zip(outputs) {
|
||||
ret.entry(idx.amount).or_insert_with(HashMap::new).insert(
|
||||
idx.index,
|
||||
OutputOnChain {
|
||||
height: out.height,
|
||||
time_lock: cache.outputs_time_lock(&out.txid),
|
||||
// we unwrap these as we are checking already approved rings so if these points are bad
|
||||
// then a bad proof has been approved.
|
||||
key: CompressedEdwardsY::from_slice(&out.key)
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
commitment: CompressedEdwardsY::from_slice(&out.mask)
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
},
|
||||
);
|
||||
}
|
||||
Ok(ret)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_request(
|
||||
&mut self,
|
||||
req: DatabaseRequest,
|
||||
) -> Result<DatabaseResponse, tower::BoxError> {
|
||||
match req {
|
||||
DatabaseRequest::BlockHash(height) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_block_hash(height))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockHash)
|
||||
}
|
||||
DatabaseRequest::ChainHeight => {
|
||||
let height = self.cache.read().await.height;
|
||||
|
||||
let hash = timeout(DEFAULT_TIMEOUT, self.get_block_hash(height - 1)).await??;
|
||||
|
||||
Ok(DatabaseResponse::ChainHeight(height, hash))
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeader(id) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_extended_block_header(id))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockExtendedHeader)
|
||||
}
|
||||
DatabaseRequest::BlockExtendedHeaderInRange(range) => timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
self.get_extended_block_header_in_range(range),
|
||||
)
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockExtendedHeaderInRange),
|
||||
DatabaseRequest::BlockBatchInRange(range) => {
|
||||
timeout(DEFAULT_TIMEOUT, self.get_blocks_in_range(range))
|
||||
.await?
|
||||
.map(DatabaseResponse::BlockBatchInRange)
|
||||
}
|
||||
DatabaseRequest::Outputs(out_ids) => {
|
||||
timeout(OUTPUTS_TIMEOUT, self.get_outputs(out_ids))
|
||||
.await?
|
||||
.map(DatabaseResponse::Outputs)
|
||||
}
|
||||
DatabaseRequest::NumberOutputsWithAmount(_)
|
||||
| DatabaseRequest::GeneratedCoins
|
||||
| DatabaseRequest::CheckKIsNotSpent(_) => {
|
||||
panic!("Request does not need RPC connection!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip(self), fields(addr = self.address))]
|
||||
pub async fn check_rpc_alive(&self) -> Result<(), tower::BoxError> {
|
||||
tracing::debug!("Checking RPC connection");
|
||||
|
||||
let res = timeout(Duration::from_secs(10), self.con.get_height()).await;
|
||||
let ok = matches!(res, Ok(Ok(_)));
|
||||
|
||||
if !ok {
|
||||
tracing::warn!("RPC connection test failed");
|
||||
return Err("RPC connection test failed".into());
|
||||
}
|
||||
tracing::info!("RPC connection Ok");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
while let Some(req) = self.req_chan.next().await {
|
||||
let RpcReq {
|
||||
req,
|
||||
span,
|
||||
res_chan,
|
||||
} = req;
|
||||
|
||||
let res = self.handle_request(req).instrument(span.clone()).await;
|
||||
|
||||
let is_err = res.is_err();
|
||||
if is_err {
|
||||
tracing::warn!(parent: &span, "Error from RPC: {:?}", res)
|
||||
}
|
||||
|
||||
let _ = res_chan.send(res);
|
||||
|
||||
if is_err && self.check_rpc_alive().await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tracing::warn!("Shutting down RPC connection: {}", self.address);
|
||||
|
||||
self.req_chan.close();
|
||||
while let Some(req) = self.req_chan.try_next().unwrap() {
|
||||
let _ = req.res_chan.send(Err("RPC connection closed!".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockInfo {
|
||||
cumulative_difficulty: u64,
|
||||
cumulative_difficulty_top64: u64,
|
||||
timestamp: u64,
|
||||
block_weight: usize,
|
||||
long_term_weight: usize,
|
||||
|
||||
major_version: u8,
|
||||
minor_version: u8,
|
||||
}
|
||||
|
||||
fn u128_from_low_high(low: u64, high: u64) -> u128 {
|
||||
let res: u128 = high as u128;
|
||||
res << 64 | low as u128
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use futures::{
|
||||
channel::mpsc::{self, SendError},
|
||||
stream::FuturesUnordered,
|
||||
SinkExt, StreamExt,
|
||||
};
|
||||
use monero_serai::rpc::HttpRpc;
|
||||
use tokio::sync::RwLock;
|
||||
use tower::{discover::Change, load::PeakEwma};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{
|
||||
cache::ScanningCache,
|
||||
connection::{RpcConnection, RpcConnectionSvc},
|
||||
};
|
||||
|
||||
#[instrument(skip(cache))]
|
||||
async fn check_rpc(addr: String, cache: Arc<RwLock<ScanningCache>>) -> Option<RpcConnectionSvc> {
|
||||
tracing::debug!("Sending request to node.");
|
||||
|
||||
let con = HttpRpc::with_custom_timeout(addr.clone(), Duration::from_secs(u64::MAX))
|
||||
.await
|
||||
.ok()?;
|
||||
let (tx, rx) = mpsc::channel(0);
|
||||
let rpc = RpcConnection {
|
||||
address: addr.clone(),
|
||||
con,
|
||||
cache,
|
||||
req_chan: rx,
|
||||
};
|
||||
|
||||
rpc.check_rpc_alive().await.ok()?;
|
||||
let handle = tokio::spawn(rpc.run());
|
||||
|
||||
Some(RpcConnectionSvc {
|
||||
address: addr,
|
||||
rpc_task_chan: tx,
|
||||
rpc_task_handle: handle,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct RPCDiscover {
|
||||
pub initial_list: Vec<String>,
|
||||
pub ok_channel: mpsc::Sender<Change<usize, PeakEwma<RpcConnectionSvc>>>,
|
||||
pub already_connected: usize,
|
||||
pub cache: Arc<RwLock<ScanningCache>>,
|
||||
}
|
||||
|
||||
impl RPCDiscover {
|
||||
async fn found_rpc(&mut self, rpc: RpcConnectionSvc) -> Result<(), SendError> {
|
||||
self.already_connected += 1;
|
||||
|
||||
self.ok_channel
|
||||
.send(Change::Insert(
|
||||
self.already_connected,
|
||||
PeakEwma::new(
|
||||
rpc,
|
||||
Duration::from_secs(5000),
|
||||
3000.0,
|
||||
tower::load::CompleteOnResponse::default(),
|
||||
),
|
||||
))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
if !self.initial_list.is_empty() {
|
||||
let mut fut = FuturesUnordered::from_iter(
|
||||
self.initial_list
|
||||
.drain(..)
|
||||
.map(|addr| check_rpc(addr, self.cache.clone())),
|
||||
);
|
||||
|
||||
while let Some(res) = fut.next().await {
|
||||
if let Some(rpc) = res {
|
||||
if self.found_rpc(rpc).await.is_err() {
|
||||
tracing::info!("Stopping RPC discover channel closed!");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@ use tower::ServiceExt;
|
|||
use crate::{
|
||||
context::{
|
||||
initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse,
|
||||
ContextConfig, UpdateBlockchainCacheData,
|
||||
ContextConfig, NewBlockData,
|
||||
},
|
||||
tests::mock_db::*,
|
||||
HardFork,
|
||||
|
@ -52,9 +52,8 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
|||
assert!(context.is_still_valid());
|
||||
|
||||
ctx_svc
|
||||
.oneshot(BlockChainContextRequest::Update(
|
||||
UpdateBlockchainCacheData {
|
||||
new_top_hash: [0; 32],
|
||||
.oneshot(BlockChainContextRequest::Update(NewBlockData {
|
||||
block_hash: [0; 32],
|
||||
height: BLOCKCHAIN_HEIGHT,
|
||||
timestamp: 0,
|
||||
weight: 0,
|
||||
|
@ -62,8 +61,7 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
|
|||
generated_coins: 0,
|
||||
vote: HardFork::V1,
|
||||
cumulative_difficulty: 0,
|
||||
},
|
||||
))
|
||||
}))
|
||||
.await?;
|
||||
|
||||
assert!(!context.is_still_valid());
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use monero_consensus::HardFork;
|
||||
use cuprate_consensus_rules::HardFork;
|
||||
|
||||
pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] =
|
||||
include!("./data/hfs_2688888_2689608");
|
||||
|
|
|
@ -181,7 +181,7 @@ proptest! {
|
|||
|
||||
#[test]
|
||||
fn claculating_multiple_diffs_does_not_change_state(
|
||||
mut diff_cache in random_difficulty_cache(),
|
||||
diff_cache in random_difficulty_cache(),
|
||||
timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()),
|
||||
hf in any::<HardFork>(),
|
||||
) {
|
||||
|
@ -189,7 +189,7 @@ proptest! {
|
|||
|
||||
diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf);
|
||||
|
||||
assert_eq!(diff_cache, cache);
|
||||
prop_assert_eq!(diff_cache, cache);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -203,7 +203,7 @@ proptest! {
|
|||
let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf);
|
||||
|
||||
for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) {
|
||||
assert_eq!(diff_cache.next_difficulty(×tamp.1), diff);
|
||||
prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff);
|
||||
diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use monero_consensus::hard_forks::{HFInfo, HardFork, NUMB_OF_HARD_FORKS};
|
||||
use monero_consensus::HFsInfo;
|
||||
use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS};
|
||||
|
||||
use crate::{
|
||||
context::{hardforks::HardForkState, HardForkConfig},
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::collections::VecDeque;
|
|||
use proptest::prelude::*;
|
||||
use tokio::runtime::Builder;
|
||||
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
blocks::{is_randomx_seed_height, randomx_seed_height},
|
||||
HardFork,
|
||||
};
|
||||
|
|
|
@ -1,94 +1,104 @@
|
|||
//! # Transaction Verifier Service.
|
||||
//!
|
||||
//! This module contains the [`TxVerifierService`] which handles consensus validation of transactions.
|
||||
//!
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
future::Future,
|
||||
ops::Deref,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
sync::{Arc, Mutex as StdMutex},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use monero_serai::ringct::RctType;
|
||||
use monero_serai::transaction::Transaction;
|
||||
use monero_serai::{
|
||||
ringct::RctType,
|
||||
transaction::{Input, Timelock, Transaction},
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
transactions::{
|
||||
check_transaction_contextual, check_transaction_semantic, RingCTError, TransactionError,
|
||||
TxRingMembersInfo,
|
||||
check_decoy_info, check_transaction_contextual, check_transaction_semantic,
|
||||
output_unlocked, TransactionError,
|
||||
},
|
||||
ConsensusError, HardFork, TxVersion,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
|
||||
use crate::{
|
||||
batch_verifier::MultiThreadedBatchVerifier, context::ReOrgToken, Database, DatabaseRequest,
|
||||
DatabaseResponse, ExtendedConsensusError,
|
||||
batch_verifier::MultiThreadedBatchVerifier,
|
||||
transactions::contextual_data::{batch_get_decoy_info, batch_get_ring_member_info},
|
||||
Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError,
|
||||
};
|
||||
|
||||
pub mod contextual_data;
|
||||
mod output_cache;
|
||||
|
||||
pub use output_cache::OutputCache;
|
||||
|
||||
pub async fn batch_setup_txs(
|
||||
txs: Vec<(Vec<Transaction>, HardFork)>,
|
||||
) -> Result<Vec<Vec<Arc<TransactionVerificationData>>>, ExtendedConsensusError> {
|
||||
let batch_verifier = Arc::new(MultiThreadedBatchVerifier::new(rayon::current_num_threads()));
|
||||
|
||||
// Move out of the async runtime and use rayon to parallelize the serialisation and hashing of the txs.
|
||||
let txs = rayon_spawn_async(move || {
|
||||
let txs = txs
|
||||
.into_par_iter()
|
||||
.map(|(txs, hf)| {
|
||||
txs.into_par_iter()
|
||||
.map(|tx| {
|
||||
Ok(Arc::new(TransactionVerificationData::new(
|
||||
tx,
|
||||
&hf,
|
||||
batch_verifier.clone(),
|
||||
)?))
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, ConsensusError>>()?;
|
||||
|
||||
if !Arc::into_inner(batch_verifier).unwrap().verify() {
|
||||
Err(ConsensusError::Transaction(TransactionError::RingCTError(
|
||||
RingCTError::BulletproofsRangeInvalid,
|
||||
)))?
|
||||
/// A struct representing the type of validation that needs to be completed for this transaction.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
enum VerificationNeeded {
|
||||
/// Both semantic validation and contextual validation are needed.
|
||||
SemanticAndContextual,
|
||||
/// Only contextual validation is needed.
|
||||
Contextual,
|
||||
}
|
||||
|
||||
Ok::<_, ConsensusError>(txs)
|
||||
})
|
||||
.await?;
|
||||
/// Represents if a transaction has been fully validated and under what conditions
|
||||
/// the transaction is valid in the future.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum CachedVerificationState {
|
||||
/// The transaction has not been validated.
|
||||
NotVerified,
|
||||
/// The transaction is valid* if the block represented by this hash is in the blockchain and the [`HardFork`]
|
||||
/// is the same.
|
||||
///
|
||||
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
|
||||
ValidAtHashAndHF([u8; 32], HardFork),
|
||||
/// The transaction is valid* if the block represented by this hash is in the blockchain _and_ this
|
||||
/// given time lock is unlocked. The time lock here will represent the youngest used time based lock
|
||||
/// (If the transaction uses any time based time locks). This is because time locks are not monotonic
|
||||
/// so unlocked outputs could become re-locked.
|
||||
///
|
||||
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
|
||||
ValidAtHashAndHFWithTimeBasedLock([u8; 32], HardFork, Timelock),
|
||||
}
|
||||
|
||||
Ok(txs)
|
||||
impl CachedVerificationState {
|
||||
/// Returns the block hash this is valid for if in state [`CachedVerificationState::ValidAtHashAndHF`] or [`CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock`].
|
||||
fn verified_at_block_hash(&self) -> Option<[u8; 32]> {
|
||||
match self {
|
||||
CachedVerificationState::NotVerified => None,
|
||||
CachedVerificationState::ValidAtHashAndHF(hash, _)
|
||||
| CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, _, _) => Some(*hash),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Data needed to verify a transaction.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub struct TransactionVerificationData {
|
||||
/// The transaction we are verifying
|
||||
pub tx: Transaction,
|
||||
/// The [`TxVersion`] of this tx.
|
||||
pub version: TxVersion,
|
||||
/// The serialised transaction.
|
||||
pub tx_blob: Vec<u8>,
|
||||
/// The weight of the transaction.
|
||||
pub tx_weight: usize,
|
||||
/// The fee this transaction has paid.
|
||||
pub fee: u64,
|
||||
/// The hash of this transaction.
|
||||
pub tx_hash: [u8; 32],
|
||||
/// We put this behind a mutex as the information is not constant and is based of past outputs idxs
|
||||
/// which could change on re-orgs.
|
||||
rings_member_info: std::sync::Mutex<Option<(TxRingMembersInfo, ReOrgToken)>>,
|
||||
/// The verification state of this transaction.
|
||||
pub cached_verification_state: StdMutex<CachedVerificationState>,
|
||||
}
|
||||
|
||||
impl TransactionVerificationData {
|
||||
pub fn new(
|
||||
tx: Transaction,
|
||||
hf: &HardFork,
|
||||
verifier: Arc<MultiThreadedBatchVerifier>,
|
||||
) -> Result<TransactionVerificationData, ConsensusError> {
|
||||
/// Creates a new [`TransactionVerificationData`] from the given [`Transaction`].
|
||||
pub fn new(tx: Transaction) -> Result<TransactionVerificationData, ConsensusError> {
|
||||
let tx_hash = tx.hash();
|
||||
let tx_blob = tx.serialize();
|
||||
|
||||
|
@ -101,17 +111,12 @@ impl TransactionVerificationData {
|
|||
_ => tx_blob.len(),
|
||||
};
|
||||
|
||||
let fee = verifier.queue_statement(|verifier| {
|
||||
check_transaction_semantic(&tx, tx_blob.len(), tx_weight, &tx_hash, hf, verifier)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
})?;
|
||||
|
||||
Ok(TransactionVerificationData {
|
||||
tx_hash,
|
||||
tx_blob,
|
||||
tx_weight,
|
||||
fee,
|
||||
rings_member_info: std::sync::Mutex::new(None),
|
||||
fee: tx.rct_signatures.base.fee,
|
||||
cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified),
|
||||
version: TxVersion::from_raw(tx.prefix.version)
|
||||
.ok_or(TransactionError::TransactionVersionInvalid)?,
|
||||
tx,
|
||||
|
@ -119,24 +124,49 @@ impl TransactionVerificationData {
|
|||
}
|
||||
}
|
||||
|
||||
/// A request to verify a transaction.
|
||||
pub enum VerifyTxRequest {
|
||||
/// Verifies transactions in the context of a block.
|
||||
Block {
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
/// Verifies a batch of prepared txs.
|
||||
Prepped {
|
||||
/// The transactions to verify.
|
||||
// TODO: Can we use references to remove the outer `Arc`? probably wont play nicely with rayon_spawn_async though
|
||||
txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
/// The current chain height.
|
||||
current_chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_hash: [u8; 32],
|
||||
/// The value for time to use to check time locked outputs.
|
||||
time_for_time_lock: u64,
|
||||
/// The current [`HardFork`]
|
||||
hf: HardFork,
|
||||
},
|
||||
/// Verifies a batch of new txs.
|
||||
/// Returning [`VerifyTxResponse::OkPrepped`]
|
||||
New {
|
||||
/// The transactions to verify.
|
||||
txs: Vec<Transaction>,
|
||||
/// The current chain height.
|
||||
current_chain_height: u64,
|
||||
/// The top block hash.
|
||||
top_hash: [u8; 32],
|
||||
/// The value for time to use to check time locked outputs.
|
||||
time_for_time_lock: u64,
|
||||
/// The current [`HardFork`]
|
||||
hf: HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
},
|
||||
}
|
||||
|
||||
/// A response from a verify transaction request.
|
||||
#[derive(Debug)]
|
||||
pub enum VerifyTxResponse {
|
||||
BatchSetupOk(Vec<Arc<TransactionVerificationData>>),
|
||||
OkPrepped(Arc<[Arc<TransactionVerificationData>]>),
|
||||
Ok,
|
||||
}
|
||||
|
||||
/// The transaction verifier service.
|
||||
#[derive(Clone)]
|
||||
pub struct TxVerifierService<D: Clone> {
|
||||
pub struct TxVerifierService<D> {
|
||||
/// The database.
|
||||
database: D,
|
||||
}
|
||||
|
||||
|
@ -145,6 +175,7 @@ where
|
|||
D: Database + Clone + Send + 'static,
|
||||
D::Future: Send + 'static,
|
||||
{
|
||||
/// Creates a new [`TxVerifierService`].
|
||||
pub fn new(database: D) -> TxVerifierService<D> {
|
||||
TxVerifierService { database }
|
||||
}
|
||||
|
@ -169,20 +200,38 @@ where
|
|||
|
||||
async move {
|
||||
match req {
|
||||
VerifyTxRequest::Block {
|
||||
VerifyTxRequest::New {
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
re_org_token,
|
||||
} => {
|
||||
verify_transactions_for_block(
|
||||
prep_and_verify_transactions(
|
||||
database,
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
VerifyTxRequest::Prepped {
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
} => {
|
||||
verify_prepped_transactions(
|
||||
database,
|
||||
txs,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
re_org_token,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -192,88 +241,320 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[instrument(name = "verify_txs", skip_all, level = "info")]
|
||||
async fn verify_transactions_for_block<D>(
|
||||
/// Prepares transactions for verification, then verifies them.
|
||||
async fn prep_and_verify_transactions<D>(
|
||||
database: D,
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
txs: Vec<Transaction>,
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
) -> Result<VerifyTxResponse, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
tracing::debug!("Verifying transactions for block, amount: {}", txs.len());
|
||||
let span = tracing::info_span!("prep_txs", amt = txs.len());
|
||||
|
||||
contextual_data::batch_refresh_ring_member_info(
|
||||
&txs,
|
||||
&hf,
|
||||
re_org_token,
|
||||
database.clone(),
|
||||
None,
|
||||
)
|
||||
tracing::debug!(parent: &span, "prepping transactions for verification.");
|
||||
let txs = rayon_spawn_async(|| {
|
||||
txs.into_par_iter()
|
||||
.map(|tx| TransactionVerificationData::new(tx).map(Arc::new))
|
||||
.collect::<Result<Arc<_>, _>>()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let spent_kis = Arc::new(std::sync::Mutex::new(HashSet::new()));
|
||||
|
||||
let cloned_spent_kis = spent_kis.clone();
|
||||
|
||||
rayon_spawn_async(move || {
|
||||
txs.par_iter().try_for_each(|tx| {
|
||||
verify_transaction_for_block(
|
||||
tx,
|
||||
verify_prepped_transactions(
|
||||
database,
|
||||
txs.clone(),
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
cloned_spent_kis.clone(),
|
||||
)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
let DatabaseResponse::CheckKIsNotSpent(kis_spent) = database
|
||||
.oneshot(DatabaseRequest::CheckKIsNotSpent(
|
||||
Arc::into_inner(spent_kis).unwrap().into_inner().unwrap(),
|
||||
))
|
||||
Ok(VerifyTxResponse::OkPrepped(txs))
|
||||
}
|
||||
|
||||
#[instrument(name = "verify_txs", skip_all, fields(amt = txs.len()) level = "info")]
|
||||
async fn verify_prepped_transactions<D>(
|
||||
mut database: D,
|
||||
txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
time_for_time_lock: u64,
|
||||
hf: HardFork,
|
||||
) -> Result<VerifyTxResponse, ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
tracing::debug!("Verifying transactions");
|
||||
|
||||
tracing::trace!("Checking for duplicate key images");
|
||||
|
||||
let mut spent_kis = HashSet::with_capacity(txs.len());
|
||||
|
||||
txs.iter().try_for_each(|tx| {
|
||||
tx.tx.prefix.inputs.iter().try_for_each(|input| {
|
||||
if let Input::ToKey { key_image, .. } = input {
|
||||
if !spent_kis.insert(key_image.compress().0) {
|
||||
tracing::debug!("Duplicate key image found in batch.");
|
||||
return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})?;
|
||||
|
||||
let DatabaseResponse::KeyImagesSpent(kis_spent) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::KeyImagesSpent(spent_kis))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
if kis_spent {
|
||||
tracing::debug!("One or more key images in batch already spent.");
|
||||
Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?;
|
||||
}
|
||||
|
||||
let mut verified_at_block_hashes = txs
|
||||
.iter()
|
||||
.filter_map(|txs| {
|
||||
txs.cached_verification_state
|
||||
.lock()
|
||||
.unwrap()
|
||||
.verified_at_block_hash()
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
tracing::trace!(
|
||||
"Verified at hashes len: {}.",
|
||||
verified_at_block_hashes.len()
|
||||
);
|
||||
|
||||
if !verified_at_block_hashes.is_empty() {
|
||||
tracing::trace!("Filtering block hashes not in the main chain.");
|
||||
|
||||
let DatabaseResponse::FilteredHashes(known_hashes) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::FilterUnknownHashes(
|
||||
verified_at_block_hashes,
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database returned wrong response!");
|
||||
};
|
||||
verified_at_block_hashes = known_hashes;
|
||||
}
|
||||
|
||||
let (txs_needing_full_verification, txs_needing_partial_verification) =
|
||||
transactions_needing_verification(
|
||||
txs,
|
||||
verified_at_block_hashes,
|
||||
&hf,
|
||||
current_chain_height,
|
||||
time_for_time_lock,
|
||||
)?;
|
||||
|
||||
futures::try_join!(
|
||||
verify_transactions_decoy_info(txs_needing_partial_verification, hf, database.clone()),
|
||||
verify_transactions(
|
||||
txs_needing_full_verification,
|
||||
current_chain_height,
|
||||
top_hash,
|
||||
time_for_time_lock,
|
||||
hf,
|
||||
database
|
||||
)
|
||||
)?;
|
||||
|
||||
Ok(VerifyTxResponse::Ok)
|
||||
}
|
||||
|
||||
fn verify_transaction_for_block(
|
||||
tx_verification_data: &TransactionVerificationData,
|
||||
#[allow(clippy::type_complexity)] // I don't think the return is too complex
|
||||
fn transactions_needing_verification(
|
||||
txs: Arc<[Arc<TransactionVerificationData>]>,
|
||||
hashes_in_main_chain: HashSet<[u8; 32]>,
|
||||
current_hf: &HardFork,
|
||||
current_chain_height: u64,
|
||||
time_for_time_lock: u64,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
),
|
||||
ConsensusError,
|
||||
> {
|
||||
// txs needing full validation: semantic and/or contextual
|
||||
let mut full_validation_transactions = Vec::new();
|
||||
// txs needing partial _contextual_ validation, not semantic.
|
||||
let mut partial_validation_transactions = Vec::new();
|
||||
|
||||
for tx in txs.iter() {
|
||||
let guard = tx.cached_verification_state.lock().unwrap();
|
||||
|
||||
match guard.deref() {
|
||||
CachedVerificationState::NotVerified => {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
CachedVerificationState::ValidAtHashAndHF(hash, hf) => {
|
||||
if current_hf != hf {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
if !hashes_in_main_chain.contains(hash) {
|
||||
drop(guard);
|
||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, hf, lock) => {
|
||||
if current_hf != hf {
|
||||
drop(guard);
|
||||
full_validation_transactions
|
||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
if !hashes_in_main_chain.contains(hash) {
|
||||
drop(guard);
|
||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the time lock is still locked then the transaction is invalid.
|
||||
if !output_unlocked(lock, current_chain_height, time_for_time_lock, hf) {
|
||||
return Err(ConsensusError::Transaction(
|
||||
TransactionError::OneOrMoreRingMembersLocked,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tx.version == TxVersion::RingSignatures {
|
||||
drop(guard);
|
||||
partial_validation_transactions.push(tx.clone());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Ok((
|
||||
full_validation_transactions,
|
||||
partial_validation_transactions,
|
||||
))
|
||||
}
|
||||
|
||||
async fn verify_transactions_decoy_info<D>(
|
||||
txs: Vec<Arc<TransactionVerificationData>>,
|
||||
hf: HardFork,
|
||||
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
tracing::debug!(
|
||||
"Verifying transaction: {}",
|
||||
hex::encode(tx_verification_data.tx_hash)
|
||||
);
|
||||
|
||||
let rings_member_info_lock = tx_verification_data.rings_member_info.lock().unwrap();
|
||||
let rings_member_info = match rings_member_info_lock.deref() {
|
||||
Some(rings_member_info) => rings_member_info,
|
||||
None => panic!("rings_member_info needs to be set to be able to verify!"),
|
||||
};
|
||||
|
||||
check_transaction_contextual(
|
||||
&tx_verification_data.tx,
|
||||
&rings_member_info.0,
|
||||
current_chain_height,
|
||||
time_for_time_lock,
|
||||
&hf,
|
||||
spent_kis,
|
||||
)?;
|
||||
database: D,
|
||||
) -> Result<(), ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
batch_get_decoy_info(&txs, hf, database)
|
||||
.await?
|
||||
.try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn verify_transactions<D>(
|
||||
txs: Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
|
||||
current_chain_height: u64,
|
||||
top_hash: [u8; 32],
|
||||
current_time_lock_timestamp: u64,
|
||||
hf: HardFork,
|
||||
database: D,
|
||||
) -> Result<(), ExtendedConsensusError>
|
||||
where
|
||||
D: Database + Clone + Sync + Send + 'static,
|
||||
{
|
||||
let txs_ring_member_info =
|
||||
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
|
||||
|
||||
rayon_spawn_async(move || {
|
||||
let batch_veriifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
|
||||
|
||||
txs.par_iter()
|
||||
.zip(txs_ring_member_info.par_iter())
|
||||
.try_for_each(|((tx, verification_needed), ring)| {
|
||||
// do semantic validation if needed.
|
||||
if *verification_needed == VerificationNeeded::SemanticAndContextual {
|
||||
batch_veriifier.queue_statement(|verifier| {
|
||||
let fee = check_transaction_semantic(
|
||||
&tx.tx,
|
||||
tx.tx_blob.len(),
|
||||
tx.tx_weight,
|
||||
&tx.tx_hash,
|
||||
&hf,
|
||||
verifier,
|
||||
)?;
|
||||
// make sure monero-serai calculated the same fee.
|
||||
assert_eq!(fee, tx.fee);
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
|
||||
// Both variants of `VerificationNeeded` require contextual validation.
|
||||
check_transaction_contextual(
|
||||
&tx.tx,
|
||||
ring,
|
||||
current_chain_height,
|
||||
current_time_lock_timestamp,
|
||||
&hf,
|
||||
)?;
|
||||
|
||||
Ok::<_, ConsensusError>(())
|
||||
})?;
|
||||
|
||||
if !batch_veriifier.verify() {
|
||||
return Err(ExtendedConsensusError::OneOrMoreBatchVerificationStatementsInvalid);
|
||||
}
|
||||
|
||||
txs.iter()
|
||||
.zip(txs_ring_member_info)
|
||||
.for_each(|((tx, _), ring)| {
|
||||
if ring.time_locked_outs.is_empty() {
|
||||
*tx.cached_verification_state.lock().unwrap() =
|
||||
CachedVerificationState::ValidAtHashAndHF(top_hash, hf);
|
||||
} else {
|
||||
let youngest_timebased_lock = ring
|
||||
.time_locked_outs
|
||||
.iter()
|
||||
.filter_map(|lock| match lock {
|
||||
Timelock::Time(time) => Some(*time),
|
||||
_ => None,
|
||||
})
|
||||
.min();
|
||||
|
||||
*tx.cached_verification_state.lock().unwrap() =
|
||||
if let Some(time) = youngest_timebased_lock {
|
||||
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(
|
||||
top_hash,
|
||||
hf,
|
||||
Timelock::Time(time),
|
||||
)
|
||||
} else {
|
||||
CachedVerificationState::ValidAtHashAndHF(top_hash, hf)
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! # Contextual Data
|
||||
//!
|
||||
//! This module contains [`TxRingMembersInfo`] which is a struct made up from blockchain information about the
|
||||
//! This module fills [`TxRingMembersInfo`] which is a struct made up from blockchain information about the
|
||||
//! ring members of inputs. This module does minimal consensus checks, only when needed, and should not be relied
|
||||
//! upon to do any.
|
||||
//!
|
||||
|
@ -10,17 +10,16 @@
|
|||
//!
|
||||
//! Because this data is unique for *every* transaction and the context service is just for blockchain state data.
|
||||
//!
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
ops::Deref,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use monero_serai::transaction::Input;
|
||||
use tower::ServiceExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use monero_consensus::{
|
||||
use cuprate_consensus_rules::{
|
||||
transactions::{
|
||||
get_ring_members_for_inputs, insert_ring_member_ids, DecoyInfo, TxRingMembersInfo,
|
||||
},
|
||||
|
@ -28,148 +27,19 @@ use monero_consensus::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
context::ReOrgToken,
|
||||
transactions::{output_cache::OutputCache, TransactionVerificationData},
|
||||
Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError,
|
||||
transactions::TransactionVerificationData, Database, DatabaseRequest, DatabaseResponse,
|
||||
ExtendedConsensusError,
|
||||
};
|
||||
|
||||
pub async fn batch_refresh_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>(
|
||||
txs_verification_data: &'a [Arc<TransactionVerificationData>],
|
||||
hf: &HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
mut database: D,
|
||||
out_cache: Option<&OutputCache<'a>>,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
let (txs_needing_full_refresh, txs_needing_partial_refresh) =
|
||||
ring_member_info_needing_refresh(txs_verification_data, hf);
|
||||
|
||||
if !txs_needing_full_refresh.is_empty() {
|
||||
batch_fill_ring_member_info(
|
||||
txs_needing_full_refresh.iter(),
|
||||
hf,
|
||||
re_org_token,
|
||||
database.clone(),
|
||||
out_cache,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let unique_input_amounts = txs_needing_partial_refresh
|
||||
.iter()
|
||||
.flat_map(|tx_info| {
|
||||
tx_info
|
||||
.tx
|
||||
.prefix
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|input| match input {
|
||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||
_ => 0,
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
for tx_v_data in txs_needing_partial_refresh {
|
||||
let decoy_info = if hf != &HardFork::V1 {
|
||||
// this data is only needed after hard-fork 1.
|
||||
Some(
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Temporarily acquirer the mutex lock to add the ring member info.
|
||||
tx_v_data
|
||||
.rings_member_info
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_mut()
|
||||
// this unwrap is safe as otherwise this would require a full refresh not a partial one.
|
||||
.unwrap()
|
||||
.0
|
||||
.decoy_info = decoy_info;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This function returns the transaction verification data that need refreshing.
|
||||
///
|
||||
/// The first returned vec needs a full refresh.
|
||||
/// The second returned vec only needs a partial refresh.
|
||||
///
|
||||
/// A full refresh is a refresh of all the ring members and the decoy info.
|
||||
/// A partial refresh is just a refresh of the decoy info.
|
||||
fn ring_member_info_needing_refresh(
|
||||
txs_verification_data: &[Arc<TransactionVerificationData>],
|
||||
hf: &HardFork,
|
||||
) -> (
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
Vec<Arc<TransactionVerificationData>>,
|
||||
) {
|
||||
let mut txs_needing_full_refresh = Vec::new();
|
||||
let mut txs_needing_partial_refresh = Vec::new();
|
||||
|
||||
for tx in txs_verification_data {
|
||||
let tx_ring_member_info = tx.rings_member_info.lock().unwrap();
|
||||
|
||||
// if we don't have ring members or if a re-org has happened do a full refresh.
|
||||
if let Some(tx_ring_member_info) = tx_ring_member_info.deref() {
|
||||
if tx_ring_member_info.1.reorg_happened() {
|
||||
txs_needing_full_refresh.push(tx.clone());
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
txs_needing_full_refresh.push(tx.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// if any input does not have a 0 amount do a partial refresh, this is because some decoy info
|
||||
// data is based on the amount of non-ringCT outputs at a certain point.
|
||||
// Or if a hf has happened as this will change the default minimum decoys.
|
||||
if &tx_ring_member_info
|
||||
.as_ref()
|
||||
.expect("We just checked if this was None")
|
||||
.0
|
||||
.hf
|
||||
!= hf
|
||||
|| tx.tx.prefix.inputs.iter().any(|inp| match inp {
|
||||
Input::Gen(_) => false,
|
||||
Input::ToKey { amount, .. } => amount.is_some(),
|
||||
})
|
||||
{
|
||||
txs_needing_partial_refresh.push(tx.clone());
|
||||
}
|
||||
}
|
||||
|
||||
(txs_needing_full_refresh, txs_needing_partial_refresh)
|
||||
}
|
||||
|
||||
/// Fills the `rings_member_info` field on the inputted [`TransactionVerificationData`].
|
||||
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
|
||||
///
|
||||
/// This function batch gets all the ring members for the inputted transactions and fills in data about
|
||||
/// them.
|
||||
pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>(
|
||||
pub async fn batch_get_ring_member_info<D: Database>(
|
||||
txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone,
|
||||
hf: &HardFork,
|
||||
re_org_token: ReOrgToken,
|
||||
mut database: D,
|
||||
out_cache: Option<&OutputCache<'a>>,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
||||
let mut output_ids = HashMap::new();
|
||||
|
||||
for tx_v_data in txs_verification_data.clone() {
|
||||
|
@ -197,17 +67,12 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
|
|||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
for tx_v_data in txs_verification_data {
|
||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||
|amt, idx| {
|
||||
if let Some(cached_outs) = out_cache {
|
||||
if let Some(out) = cached_outs.get_out(amt, idx) {
|
||||
return Some(out);
|
||||
}
|
||||
}
|
||||
Ok(txs_verification_data
|
||||
.map(move |tx_v_data| {
|
||||
let numb_outputs = |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0);
|
||||
|
||||
outputs.get(&amt)?.get(&idx)
|
||||
},
|
||||
let ring_members_for_tx = get_ring_members_for_inputs(
|
||||
|amt, idx| outputs.get(&amt)?.get(&idx).copied(),
|
||||
&tx_v_data.tx.prefix.inputs,
|
||||
)
|
||||
.map_err(ConsensusError::Transaction)?;
|
||||
|
@ -215,20 +80,71 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
|
|||
let decoy_info = if hf != &HardFork::V1 {
|
||||
// this data is only needed after hard-fork 1.
|
||||
Some(
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
|
||||
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Temporarily acquirer the mutex lock to add the ring member info.
|
||||
let _ = tx_v_data.rings_member_info.lock().unwrap().insert((
|
||||
TxRingMembersInfo::new(ring_members_for_tx, decoy_info, tx_v_data.version, *hf)
|
||||
.map_err(ConsensusError::Transaction)?,
|
||||
re_org_token.clone(),
|
||||
));
|
||||
TxRingMembersInfo::new(ring_members_for_tx, decoy_info, tx_v_data.version)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
})
|
||||
.collect::<Result<_, _>>()?)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
/// Refreshes the transactions [`TxRingMembersInfo`], if needed.
|
||||
///
|
||||
/// # Panics
|
||||
/// This functions panics if `hf == HardFork::V1` as decoy info
|
||||
/// should not be needed for V1.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||
txs_verification_data: &'a [Arc<TransactionVerificationData>],
|
||||
hf: HardFork,
|
||||
mut database: D,
|
||||
) -> Result<impl Iterator<Item = Result<DecoyInfo, ConsensusError>> + 'a, ExtendedConsensusError> {
|
||||
// decoy info is not needed for V1.
|
||||
assert_ne!(hf, HardFork::V1);
|
||||
|
||||
tracing::debug!(
|
||||
"Retrieving decoy info for {} txs.",
|
||||
txs_verification_data.len()
|
||||
);
|
||||
|
||||
// Get all the different input amounts.
|
||||
let unique_input_amounts = txs_verification_data
|
||||
.iter()
|
||||
.flat_map(|tx_info| {
|
||||
tx_info.tx.prefix.inputs.iter().map(|input| match input {
|
||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||
_ => 0,
|
||||
})
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
tracing::debug!(
|
||||
"Getting the amount of outputs with certain amounts for {} amounts",
|
||||
unique_input_amounts.len()
|
||||
);
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
unique_input_amounts.into_iter().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!")
|
||||
};
|
||||
|
||||
Ok(txs_verification_data.iter().map(move |tx_v_data| {
|
||||
DecoyInfo::new(
|
||||
&tx_v_data.tx.prefix.inputs,
|
||||
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
||||
&hf,
|
||||
)
|
||||
.map_err(ConsensusError::Transaction)
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
iter::once,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, EdwardsPoint, Scalar,
|
||||
};
|
||||
use monero_consensus::{
|
||||
blocks::BlockError,
|
||||
miner_tx::MinerTxError,
|
||||
transactions::{OutputOnChain, TransactionError},
|
||||
ConsensusError,
|
||||
};
|
||||
use monero_serai::{
|
||||
block::Block,
|
||||
transaction::{Input, Timelock},
|
||||
H,
|
||||
};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use crate::{
|
||||
transactions::TransactionVerificationData, Database, DatabaseRequest, DatabaseResponse,
|
||||
ExtendedConsensusError,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CachedAmount<'a> {
|
||||
Clear(u64),
|
||||
Commitment(&'a EdwardsPoint),
|
||||
}
|
||||
|
||||
impl<'a> CachedAmount<'a> {
|
||||
fn get_commitment(&self) -> EdwardsPoint {
|
||||
match self {
|
||||
CachedAmount::Commitment(commitment) => **commitment,
|
||||
// TODO: Setup a table with common amounts.
|
||||
CachedAmount::Clear(amt) => ED25519_BASEPOINT_POINT + H() * Scalar::from(*amt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CachedOutput<'a> {
|
||||
height: u64,
|
||||
time_lock: &'a Timelock,
|
||||
key: &'a CompressedEdwardsY,
|
||||
amount: CachedAmount<'a>,
|
||||
|
||||
cached_created: OnceLock<OutputOnChain>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OutputCache<'a>(HashMap<u64, BTreeMap<u64, CachedOutput<'a>>>);
|
||||
|
||||
impl<'a> OutputCache<'a> {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
OutputCache(HashMap::new())
|
||||
}
|
||||
|
||||
pub fn get_out(&self, amt: u64, idx: u64) -> Option<&OutputOnChain> {
|
||||
let cached_out = self.0.get(&amt)?.get(&idx)?;
|
||||
|
||||
Some(cached_out.cached_created.get_or_init(|| OutputOnChain {
|
||||
height: cached_out.height,
|
||||
time_lock: *cached_out.time_lock,
|
||||
key: cached_out.key.decompress(),
|
||||
commitment: cached_out.amount.get_commitment(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn extend_from_block<'b: 'a, D: Database>(
|
||||
&mut self,
|
||||
blocks: impl Iterator<Item = (&'b Block, &'b [Arc<TransactionVerificationData>])> + 'b,
|
||||
database: &mut D,
|
||||
) -> Result<(), ExtendedConsensusError> {
|
||||
let mut idx_needed = HashMap::new();
|
||||
|
||||
for (block, txs) in blocks {
|
||||
for tx in once(&block.miner_tx).chain(txs.iter().map(|tx| &tx.tx)) {
|
||||
let is_rct = tx.prefix.version == 2;
|
||||
let is_miner = matches!(tx.prefix.inputs.as_slice(), &[Input::Gen(_)]);
|
||||
|
||||
for (i, out) in tx.prefix.outputs.iter().enumerate() {
|
||||
let amt = out.amount.unwrap_or(0);
|
||||
// The amt this output will be stored under.
|
||||
let amt_table_key = if is_rct { 0 } else { amt };
|
||||
|
||||
let amount_commitment = match (is_rct, is_miner) {
|
||||
(true, false) => CachedAmount::Commitment(
|
||||
tx.rct_signatures.base.commitments.get(i).ok_or(
|
||||
ConsensusError::Transaction(TransactionError::NonZeroOutputForV2),
|
||||
)?,
|
||||
),
|
||||
_ => CachedAmount::Clear(amt),
|
||||
};
|
||||
let output_to_cache = CachedOutput {
|
||||
height: block.number().ok_or(ConsensusError::Block(
|
||||
BlockError::MinerTxError(MinerTxError::InputNotOfTypeGen),
|
||||
))?,
|
||||
time_lock: &tx.prefix.timelock,
|
||||
key: &out.key,
|
||||
amount: amount_commitment,
|
||||
|
||||
cached_created: OnceLock::new(),
|
||||
};
|
||||
|
||||
let Some(amt_table) = self.0.get_mut(&amt_table_key) else {
|
||||
idx_needed
|
||||
.entry(amt_table_key)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(output_to_cache);
|
||||
continue;
|
||||
};
|
||||
|
||||
let top_idx = *amt_table.last_key_value().unwrap().0;
|
||||
amt_table.insert(top_idx + 1, output_to_cache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if idx_needed.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let DatabaseResponse::NumberOutputsWithAmount(numb_outs) = database
|
||||
.ready()
|
||||
.await?
|
||||
.call(DatabaseRequest::NumberOutputsWithAmount(
|
||||
idx_needed.keys().copied().collect(),
|
||||
))
|
||||
.await?
|
||||
else {
|
||||
panic!("Database sent incorrect response!");
|
||||
};
|
||||
|
||||
for (amt_table_key, out) in idx_needed {
|
||||
let numb_outs = *numb_outs
|
||||
.get(&amt_table_key)
|
||||
.expect("DB did not return all results!");
|
||||
|
||||
self.0.entry(amt_table_key).or_default().extend(
|
||||
out.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, out)| (u64::try_from(i + numb_outs).unwrap(), out)),
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
141
consensus/tests/verify_correct_txs.rs
Normal file
141
consensus/tests/verify_correct_txs.rs
Normal file
|
@ -0,0 +1,141 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
future::ready,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY};
|
||||
use monero_serai::transaction::{Timelock, Transaction};
|
||||
use tower::{service_fn, Service, ServiceExt};
|
||||
|
||||
use cuprate_consensus::{
|
||||
Database, DatabaseRequest, DatabaseResponse, TxVerifierService, VerifyTxRequest,
|
||||
VerifyTxResponse,
|
||||
};
|
||||
|
||||
use cuprate_consensus_rules::{transactions::OutputOnChain, HardFork};
|
||||
|
||||
use cuprate_test_utils::data::TX_E2D393;
|
||||
|
||||
fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clone {
|
||||
let outputs = Arc::new(outputs);
|
||||
|
||||
service_fn(move |req: DatabaseRequest| {
|
||||
ready(Ok(match req {
|
||||
DatabaseRequest::NumberOutputsWithAmount(_) => {
|
||||
DatabaseResponse::NumberOutputsWithAmount(HashMap::new())
|
||||
}
|
||||
DatabaseRequest::Outputs(outs) => {
|
||||
let idxs = outs.get(&0).unwrap();
|
||||
|
||||
let mut ret = HashMap::new();
|
||||
|
||||
ret.insert(
|
||||
0_u64,
|
||||
idxs.iter()
|
||||
.map(|idx| (*idx, *outputs.get(idx).unwrap()))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
);
|
||||
|
||||
DatabaseResponse::Outputs(ret)
|
||||
}
|
||||
DatabaseRequest::KeyImagesSpent(_) => DatabaseResponse::KeyImagesSpent(false),
|
||||
_ => panic!("Database request not needed for this test"),
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
macro_rules! test_verify_valid_v2_tx {
|
||||
(
|
||||
$test_name: ident,
|
||||
$tx: ident,
|
||||
Rings: $([
|
||||
$($idx: literal: ($ring_member: literal, $commitment: literal),)+
|
||||
],)+
|
||||
$hf: ident
|
||||
) => {
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(const_item_mutation)]
|
||||
async fn $test_name() {
|
||||
let members = vec![
|
||||
$($(($idx,
|
||||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: CompressedEdwardsY::from_slice(&hex_literal::hex!($commitment))
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap(),
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
}),)+)+
|
||||
];
|
||||
|
||||
let map = BTreeMap::from_iter(members);
|
||||
let database = dummy_database(map);
|
||||
|
||||
let mut tx_verifier = TxVerifierService::new(database);
|
||||
|
||||
assert!(matches!(tx_verifier.ready().await.unwrap().call(
|
||||
VerifyTxRequest::New {
|
||||
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
|
||||
current_chain_height: 10,
|
||||
top_hash: [0; 32],
|
||||
hf: HardFork::$hf,
|
||||
time_for_time_lock: u64::MAX
|
||||
}
|
||||
).await.unwrap(), VerifyTxResponse::OkPrepped(_)));
|
||||
|
||||
// Check verification fails if we put random ring members
|
||||
|
||||
let members = vec![
|
||||
$($(($idx,
|
||||
OutputOnChain {
|
||||
height: 0,
|
||||
time_lock: Timelock::None,
|
||||
commitment: ED25519_BASEPOINT_POINT,
|
||||
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
|
||||
.unwrap()
|
||||
.decompress(),
|
||||
}),)+)+
|
||||
];
|
||||
|
||||
let map = BTreeMap::from_iter(members);
|
||||
let database = dummy_database(map);
|
||||
|
||||
let mut tx_verifier = TxVerifierService::new(database);
|
||||
|
||||
assert!(tx_verifier.ready().await.unwrap().call(
|
||||
VerifyTxRequest::New {
|
||||
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
|
||||
current_chain_height: 10,
|
||||
top_hash: [0; 32],
|
||||
hf: HardFork::$hf,
|
||||
time_for_time_lock: u64::MAX
|
||||
}
|
||||
).await.is_err());
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test_verify_valid_v2_tx! {
|
||||
verify_tx_e2d393,
|
||||
TX_E2D393,
|
||||
Rings: [
|
||||
7567582: ("5fa4f8b160c0877476e78094d0ce4951b20f43088f6e3698fa4d3154069c7c1b", "9a41189729e8cf113cee0b126e22653f3f551227947f54fbbb16ae8d535d757d"),
|
||||
7958047: ("0febe3d139bf3db267c2efdc714ea9b42e437a5aa16e42848a835d009108fcdf", "ecca12345c02c6b0348cfa988a0d86d34e3a89cd8b53dd4ffdb860cee0eda487"),// miner amt: 3551239030364
|
||||
8224417: ("bdd1fb8a725ae15ce37bc8090925126396f87c2972d728814f2d622baa77ebf6", "24624e957c351727deadafda531f7bed433220e72dc85f8aa8d3d32cd7df42e1"),
|
||||
8225772: ("cddef0210ed3113f3362ecb7aa43003c6c3ed4bcac09dc4d9d8d015472c8a3d8", "f61b954879a0f3cc3540f0364ad108fe286162f993f4b435b42038c29d07b8c2"),
|
||||
8234785: ("4edf5a8448e133fcb7914ea161dbb8eb0057e44284d0315839d9fce4cdb063e8", "1cec1e2f88268d6f164f07f79c663bd1af09920a9254164f518faff45dd42138"),
|
||||
8247173: ("cbee0e5fa9c31689b174862a6eb0a164a2d807d2862ac0ad50c0030f0af6c5e7", "f229752b609d923cda89735ed2a42a9af6fc3e3219ac164f17d5eac4f85f391c"),
|
||||
8285361: ("f16dbd9542e7dd575c15e2c9217f5cecb6d134383e5e8416da4affab132f1ff8", "7e31ad658fff150b0ae3a9329e353522ed20dd3ac8df8cd965fa4369164857b4"),
|
||||
8308826: ("4ce2b333cc421237fc96f1a0719d4ac0892f0ff457f3a14f2e499fc045cd4714", "2f7f240e42cbd3a5f02b0b185465263b6a4c6df609dcf928314ea7ddbec3d3dc"),// miner amt: 3408911250482
|
||||
8312407: ("ead8dfb7423f5c3fa7f10663ce885d27d1b7eeb634ac05fd74d3b080440819bf", "236c3fde472978aff92aeb6e752eeb681dfdbb9a84d7e049238f7f544b85062a"),
|
||||
8314321: ("24d3dadeef6b0aff3ee7288cd391823b0020ba3fab42085f66765fc2a164f879", "bffce0393f1fc96e3d83a057208b506c9f7ad52e012e20b228918932c6c8287a"),
|
||||
8315222: ("a8b165589dffa4c31c27fb432cfdd4855b0d04102b79e439720bb80198d5b9c0", "c3febd29c1a3cc397639ff7fdb357d22a900821bef956af626651f2a916cf6f6"),
|
||||
],
|
||||
V9
|
||||
}
|
Loading…
Reference in a new issue