Merge branch 'main' into block-downloader

This commit is contained in:
Boog900 2024-06-06 01:36:15 +01:00
commit 9edcb760f7
No known key found for this signature in database
GPG key ID: 42AB1287CB0041C2
73 changed files with 2276 additions and 3428 deletions

1
.gitignore vendored
View file

@ -1,3 +1,4 @@
target/ target/
.vscode .vscode
monerod monerod
books/*/book

View file

@ -64,3 +64,8 @@ This section is primarily targeted at maintainers. Most contributors aren't able
[I-]: https://github.com/Cuprate/cuprate/labels?q=I [I-]: https://github.com/Cuprate/cuprate/labels?q=I
[O-]: https://github.com/Cuprate/cuprate/labels?q=O [O-]: https://github.com/Cuprate/cuprate/labels?q=O
[P-]: https://github.com/Cuprate/cuprate/labels?q=P [P-]: https://github.com/Cuprate/cuprate/labels?q=P
## Books
Cuprate has various documentation books whose source files live in [`books/`](https://github.com/Cuprate/cuprate/tree/main/books).
Please contribute if you found a mistake! The files are mostly [markdown](https://wikipedia.org/wiki/Markdown) files and can be easily edited. See the `books/` directory for more information.

312
Cargo.lock generated
View file

@ -50,64 +50,6 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "anstream"
version = "0.6.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
[[package]]
name = "anstyle-parse"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
"windows-sys 0.52.0",
]
[[package]]
name = "async-buffer"
version = "0.1.0"
dependencies = [
"futures",
"pin-project",
"thiserror",
"tokio",
]
[[package]] [[package]]
name = "async-lock" name = "async-lock"
version = "3.3.0" version = "3.3.0"
@ -275,18 +217,18 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]] [[package]]
name = "bytemuck" name = "bytemuck"
version = "1.15.0" version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5"
dependencies = [ dependencies = [
"bytemuck_derive", "bytemuck_derive",
] ]
[[package]] [[package]]
name = "bytemuck_derive" name = "bytemuck_derive"
version = "1.6.0" version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -335,52 +277,6 @@ dependencies = [
"windows-targets 0.52.5", "windows-targets 0.52.5",
] ]
[[package]]
name = "clap"
version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap_builder"
version = "4.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "clap_lex"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]] [[package]]
name = "concurrent-queue" name = "concurrent-queue"
version = "2.5.0" version = "2.5.0"
@ -430,9 +326,9 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-channel" name = "crossbeam-channel"
version = "0.5.12" version = "0.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
dependencies = [ dependencies = [
"crossbeam-utils", "crossbeam-utils",
] ]
@ -542,33 +438,49 @@ dependencies = [
name = "cuprate-consensus" name = "cuprate-consensus"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"borsh", "cuprate-consensus-rules",
"clap",
"cuprate-helper", "cuprate-helper",
"cuprate-test-utils",
"cuprate-types",
"curve25519-dalek", "curve25519-dalek",
"dalek-ff-group", "dalek-ff-group",
"dirs",
"epee-encoding",
"futures", "futures",
"hex", "hex",
"monero-consensus", "hex-literal",
"monero-serai", "monero-serai",
"monero-wire",
"multiexp", "multiexp",
"proptest", "proptest",
"proptest-derive", "proptest-derive",
"randomx-rs", "randomx-rs",
"rayon", "rayon",
"serde",
"serde_json",
"syn 2.0.60",
"thiserror", "thiserror",
"thread_local", "thread_local",
"tokio", "tokio",
"tokio-util", "tokio-util",
"tower", "tower",
"tracing", "tracing",
"tracing-subscriber", ]
[[package]]
name = "cuprate-consensus-rules"
version = "0.1.0"
dependencies = [
"crypto-bigint",
"cryptonight-cuprate",
"cuprate-helper",
"curve25519-dalek",
"dalek-ff-group",
"hex",
"hex-literal",
"monero-serai",
"multiexp",
"proptest",
"proptest-derive",
"rand",
"rayon",
"thiserror",
"tokio",
"tracing",
] ]
[[package]] [[package]]
@ -590,7 +502,6 @@ dependencies = [
name = "cuprate-p2p" name = "cuprate-p2p"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-buffer",
"bytes", "bytes",
"cuprate-helper", "cuprate-helper",
"cuprate-test-utils", "cuprate-test-utils",
@ -614,9 +525,12 @@ dependencies = [
"tokio-util", "tokio-util",
"tower", "tower",
"tracing", "tracing",
"tracing-subscriber",
] ]
[[package]]
name = "cuprate-rpc-interface"
version = "0.0.0"
[[package]] [[package]]
name = "cuprate-test-utils" name = "cuprate-test-utils"
version = "0.1.0" version = "0.1.0"
@ -648,11 +562,8 @@ version = "0.0.0"
name = "cuprate-types" name = "cuprate-types"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"borsh",
"cfg-if",
"curve25519-dalek", "curve25519-dalek",
"monero-serai", "monero-serai",
"serde",
] ]
[[package]] [[package]]
@ -688,7 +599,7 @@ dependencies = [
[[package]] [[package]]
name = "dalek-ff-group" name = "dalek-ff-group"
version = "0.4.1" version = "0.4.1"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"crypto-bigint", "crypto-bigint",
"curve25519-dalek", "curve25519-dalek",
@ -702,7 +613,7 @@ dependencies = [
] ]
[[package]] [[package]]
name = "dandelion-tower" name = "dandelion_tower"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"futures", "futures",
@ -784,21 +695,6 @@ dependencies = [
"windows-sys 0.48.0", "windows-sys 0.48.0",
] ]
[[package]]
name = "dleq"
version = "0.4.1"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629"
dependencies = [
"digest",
"ff",
"flexible-transcript",
"group",
"multiexp",
"rand_core",
"rustversion",
"zeroize",
]
[[package]] [[package]]
name = "doxygen-rs" name = "doxygen-rs"
version = "0.4.2" version = "0.4.2"
@ -898,7 +794,7 @@ dependencies = [
[[package]] [[package]]
name = "flexible-transcript" name = "flexible-transcript"
version = "0.3.2" version = "0.3.2"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"blake2", "blake2",
"digest", "digest",
@ -1076,17 +972,11 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]] [[package]]
name = "heed" name = "heed"
version = "0.20.0" version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7a300b0deeb2957162d7752b0f063b3be1c88333af5bb4e7a57d8fb3716f50b" checksum = "f60d7cff16094be9627830b399c087a25017e93fb3768b87cd656a68ccb1ebe8"
dependencies = [ dependencies = [
"bitflags 2.5.0", "bitflags 2.5.0",
"byteorder", "byteorder",
@ -1208,9 +1098,9 @@ dependencies = [
[[package]] [[package]]
name = "hyper-rustls" name = "hyper-rustls"
version = "0.26.0" version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b"
dependencies = [ dependencies = [
"futures-util", "futures-util",
"http", "http",
@ -1312,6 +1202,10 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]]
name = "json-rpc"
version = "0.0.0"
[[package]] [[package]]
name = "keccak" name = "keccak"
version = "0.1.5" version = "0.1.5"
@ -1372,9 +1266,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]] [[package]]
name = "lmdb-master-sys" name = "lmdb-master-sys"
version = "0.2.0" version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc9048db3a58c0732d7236abc4909058f9d2708cfb6d7d047eb895fddec6419a" checksum = "a5142795c220effa4c8f4813537bd4c88113a07e45e93100ccb2adc5cec6c7f3"
dependencies = [ dependencies = [
"cc", "cc",
"doxygen-rs", "doxygen-rs",
@ -1464,32 +1358,10 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "monero-consensus"
version = "0.1.0"
dependencies = [
"crypto-bigint",
"cryptonight-cuprate",
"cuprate-helper",
"curve25519-dalek",
"dalek-ff-group",
"hex",
"hex-literal",
"monero-serai",
"multiexp",
"proptest",
"proptest-derive",
"rand",
"rayon",
"thiserror",
"tokio",
"tracing",
]
[[package]] [[package]]
name = "monero-generators" name = "monero-generators"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"curve25519-dalek", "curve25519-dalek",
"dalek-ff-group", "dalek-ff-group",
@ -1528,10 +1400,14 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "monero-rpc-types"
version = "0.0.0"
[[package]] [[package]]
name = "monero-serai" name = "monero-serai"
version = "0.1.4-alpha" version = "0.1.4-alpha"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"async-lock", "async-lock",
"async-trait", "async-trait",
@ -1539,7 +1415,6 @@ dependencies = [
"curve25519-dalek", "curve25519-dalek",
"dalek-ff-group", "dalek-ff-group",
"digest_auth", "digest_auth",
"dleq",
"flexible-transcript", "flexible-transcript",
"group", "group",
"hex", "hex",
@ -1578,7 +1453,7 @@ dependencies = [
[[package]] [[package]]
name = "multiexp" name = "multiexp"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"ff", "ff",
"group", "group",
@ -1588,16 +1463,6 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
dependencies = [
"overload",
"winapi",
]
[[package]] [[package]]
name = "num-traits" name = "num-traits"
version = "0.2.18" version = "0.2.18"
@ -1645,12 +1510,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]] [[package]]
name = "page_size" name = "page_size"
version = "0.6.0" version = "0.6.0"
@ -2093,10 +1952,11 @@ dependencies = [
[[package]] [[package]]
name = "rustls" name = "rustls"
version = "0.22.4" version = "0.23.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e"
dependencies = [ dependencies = [
"once_cell",
"ring", "ring",
"rustls-pki-types", "rustls-pki-types",
"rustls-webpki", "rustls-webpki",
@ -2189,7 +2049,7 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d" checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
dependencies = [ dependencies = [
"heck 0.4.1", "heck",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.60", "syn 2.0.60",
@ -2276,15 +2136,6 @@ dependencies = [
"keccak", "keccak",
] ]
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]] [[package]]
name = "signal-hook-registry" name = "signal-hook-registry"
version = "1.4.2" version = "1.4.2"
@ -2297,7 +2148,7 @@ dependencies = [
[[package]] [[package]]
name = "simple-request" name = "simple-request"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -2347,18 +2198,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]] [[package]]
name = "std-shims" name = "std-shims"
version = "0.1.1" version = "0.1.1"
source = "git+https://github.com/Cuprate/serai.git?rev=347d4cf#347d4cf4135c92bc5b0a3e3cb66fa3ff51b1c629" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1"
dependencies = [ dependencies = [
"hashbrown 0.14.5", "hashbrown 0.14.5",
"spin", "spin",
] ]
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]] [[package]]
name = "subtle" name = "subtle"
version = "2.5.0" version = "2.5.0"
@ -2512,9 +2357,9 @@ dependencies = [
[[package]] [[package]]
name = "tokio-rustls" name = "tokio-rustls"
version = "0.25.0" version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
dependencies = [ dependencies = [
"rustls", "rustls",
"rustls-pki-types", "rustls-pki-types",
@ -2631,18 +2476,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
] ]
[[package]] [[package]]
@ -2651,12 +2484,7 @@ version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [ dependencies = [
"nu-ansi-term",
"sharded-slab",
"smallvec",
"thread_local",
"tracing-core", "tracing-core",
"tracing-log",
] ]
[[package]] [[package]]
@ -2715,18 +2543,6 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "valuable"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
[[package]] [[package]]
name = "version_check" name = "version_check"
version = "0.9.4" version = "0.9.4"

View file

@ -21,6 +21,9 @@ members = [
"pruning", "pruning",
"test-utils", "test-utils",
"types", "types",
"rpc/json-rpc",
"rpc/monero-rpc-types",
"rpc/cuprate-rpc-interface",
] ]
[profile.release] [profile.release]
@ -53,15 +56,15 @@ chrono = { version = "0.4.31", default-features = false }
crypto-bigint = { version = "0.5.5", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false }
crossbeam = { version = "0.8.4", default-features = false } crossbeam = { version = "0.8.4", default-features = false }
curve25519-dalek = { version = "4.1.1", default-features = false } curve25519-dalek = { version = "4.1.1", default-features = false }
dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false } dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
dashmap = { version = "5.5.3", default-features = false } dashmap = { version = "5.5.3", default-features = false }
dirs = { version = "5.0.1", default-features = false } dirs = { version = "5.0.1", default-features = false }
futures = { version = "0.3.29", default-features = false } futures = { version = "0.3.29", default-features = false }
hex = { version = "0.4.3", default-features = false } hex = { version = "0.4.3", default-features = false }
hex-literal = { version = "0.4", default-features = false } hex-literal = { version = "0.4", default-features = false }
indexmap = { version = "2.2.5", default-features = false } indexmap = { version = "2.2.5", default-features = false }
monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false } monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "347d4cf", default-features = false } multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false }
paste = { version = "1.0.14", default-features = false } paste = { version = "1.0.14", default-features = false }
pin-project = { version = "1.1.3", default-features = false } pin-project = { version = "1.1.3", default-features = false }
randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false }
@ -86,7 +89,6 @@ pretty_assertions = { version = "1.4.0" }
proptest = { version = "1" } proptest = { version = "1" }
proptest-derive = { version = "0.4.0" } proptest-derive = { version = "0.4.0" }
## TODO: ## TODO:
## Potential dependencies. ## Potential dependencies.
# arc-swap = { version = "1.6.0" } # Atomically swappable Arc<T> | https://github.com/vorner/arc-swap # arc-swap = { version = "1.6.0" } # Atomically swappable Arc<T> | https://github.com/vorner/arc-swap

View file

@ -3,3 +3,6 @@ depending on the crate in question. Each crate declares their license in their
`Cargo.toml`. Additionally, a full copy of both licenses are included in the `Cargo.toml`. Additionally, a full copy of both licenses are included in the
root of this repository for reference. These copies should be provided with root of this repository for reference. These copies should be provided with
any distribution of a crate, as per the respective license's terms. any distribution of a crate, as per the respective license's terms.
All documentation, including the books in the `books/` directory, is licensed
under the MIT license.

View file

@ -1 +1,28 @@
# TODO ## Books
This directory contains the source files for Cuprate's various books.
The source files are edited here, and published in other repositories, see:
- [Cuprate's architecture book](https://github.com/Cuprate/architecture-book)
- [Cuprate's protocol book](https://github.com/Cuprate/monero-book)
## Build tools
Building the book(s) requires [Rust's cargo tool](https://doc.rust-lang.org/cargo/getting-started/installation.html) and [mdBook](https://github.com/rust-lang/mdBook).
After installing `cargo`, install `mdbook` with:
```bash
cargo install mdbook
```
## Building
To build a book, go into a book's directory and build:
```bash
# This build Cuprate's architecture book.
cd architecture/
mdbook build
```
The output will be in the `book` subdirectory (`architecture/book` for the above example). To open the book, you can open it in your web browser like so:
```bash
mdbook build --open
```

View file

@ -1 +1,6 @@
# TODO ## Cuprate's architecture (implementation) book
This book documents Cuprate's architecture and implementation.
See:
- <https://architecture.cuprate.org>
- <https://github.com/Cuprate/architecture-book>

View file

@ -0,0 +1,19 @@
[book]
authors = ["hinto-janai"]
language = "en"
multilingual = false
src = "src"
title = "Cuprate Architecture"
git-repository-url = "https://github.com/Cuprate/architecture-book"
# TODO: fix after importing real files.
#
# [preprocessor.last-changed]
# command = "mdbook-last-changed"
# renderer = ["html"]
#
# [output.html]
# default-theme = "ayu"
# preferred-dark-theme = "ayu"
# git-repository-url = "https://github.com/hinto-janai/cuprate-architecture"
# additional-css = ["last-changed.css"]

View file

@ -0,0 +1,3 @@
# Summary
- [TODO](todo.md)

View file

@ -0,0 +1 @@
# TODO

View file

@ -1 +1,6 @@
# TODO ## Cuprate's protocol book
This book documents the Monero protocol.
See:
- <https://monero-book.cuprate.org>
- <https://github.com/Cuprate/monero-book>

View file

@ -7,34 +7,15 @@ license = "MIT"
authors = ["Boog900"] authors = ["Boog900"]
repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
[features]
default = []
binaries = [
"tokio/rt-multi-thread",
"tokio/macros",
"tower/retry",
"tower/balance",
"tower/buffer",
"tower/timeout",
"monero-serai/http-rpc",
"dep:tracing-subscriber",
"dep:serde_json",
"dep:serde",
"dep:epee-encoding",
"dep:monero-wire",
"dep:borsh",
"dep:dirs",
"dep:clap"
]
[dependencies] [dependencies]
cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] } cuprate-helper = { path = "../helper", default-features = false, features = ["std", "asynch", "num"] }
monero-consensus = {path = "./rules", features = ["rayon"]} cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
cuprate-types = { path = "../types" }
thiserror = { workspace = true } thiserror = { workspace = true }
tower = { workspace = true, features = ["util"] } tower = { workspace = true, features = ["util"] }
tracing = { workspace = true, features = ["std", "attributes"] } tracing = { workspace = true, features = ["std", "attributes"] }
futures = { workspace = true, features = ["std"] } futures = { workspace = true, features = ["std", "async-await"] }
randomx-rs = { workspace = true } randomx-rs = { workspace = true }
monero-serai = { workspace = true, features = ["std"] } monero-serai = { workspace = true, features = ["std"] }
@ -47,23 +28,13 @@ thread_local = { workspace = true }
tokio = { workspace = true, features = ["rt"] } tokio = { workspace = true, features = ["rt"] }
tokio-util = { workspace = true } tokio-util = { workspace = true }
hex = "0.4" hex = { workspace = true }
# used in binaries
monero-wire = {path="../net/monero-wire", optional = true}
epee-encoding = { path="../net/epee-encoding" , optional = true}
serde_json = {version = "1", optional = true}
serde = {version = "1", optional = true, features = ["derive"]}
tracing-subscriber = {version = "0.3", optional = true}
borsh = { workspace = true, optional = true}
dirs = {version="5.0", optional = true}
clap = { version = "4.4.8", optional = true, features = ["derive"] }
# here to help cargo to pick a version - remove me
syn = "2.0.37"
[dev-dependencies] [dev-dependencies]
monero-consensus = {path = "./rules", features = ["proptest"]} cuprate-test-utils = { path = "../test-utils" }
cuprate-consensus-rules = {path = "./rules", features = ["proptest"]}
hex-literal = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
proptest = { workspace = true } proptest = { workspace = true }

View file

@ -1,37 +1,14 @@
# Consensus Rules # Consensus Rules
This folder contains 2 crates: `monero-consensus` (rules) and `cuprate-consensus`. `monero-consensus` contains the raw-rules This folder contains 2 crates:
and is built to be a more flexible library which requires the user to give the correct data and do minimal calculations, `cuprate-consensus` - `cuprate-consensus-rules` (`rules/` directory)
on the other hand contains multiple tower::Services that handle tx/ block verification as a whole with a `context` service that - `cuprate-consensus`
keeps track of blockchain state. `cuprate-consensus` uses `monero-consensus` internally.
If you are looking to use monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back to `cuprate-consensus-rules` contains the raw-rules and is built to be a more flexible library which requires the user
`monero-consensus` if you need more flexibility. to give the correct data and do minimal calculations.
## scan_chain `cuprate-consensus` on the other hand contains multiple `tower::Service`s that handle transaction/block verification as a
whole with a `context` service that keeps track of blockchain state. `cuprate-consensus` uses `cuprate-consensus-rules` internally.
`cuprate-consensus` contains a binary,`scan_chain`, which uses multiple RPC connections to scan the blockchain and verify it against the If you are looking to use Monero consensus rules it's recommended you try to integrate `cuprate-consensus` and fall back
consensus rules. It keeps track of minimal data and uses the RPC connection to get blocks/transactions/outputs. to `cuprate-consensus-rules` if you need more flexibility.
`scan_chain` was not built for wide usage, so you may find issues, if you do, open an issue in Cuprates issue tracker and or join our matrix
room for help. `scan_chain` has only been verified on `x86_64-unknown-linux-gnu`.
`scan_chain` will take at least a day for stagenet and testnet and 6 for mainnet but expect it to be longer. If you are just looking to verify
previous transactions it may be worth using `monerod` with `--fast-block-sync 0` this will probably be faster to complete and you will have a
usable node at the end!
### How to run
First you will need to install Rust/Cargo: https://www.rust-lang.org/tools/install
Next you need to clone Cuprates git repo, enter the root of Cuprate, then run:
```
cargo run --bin scan_chain -r
```
If you want to pass in options you need to add `--` then the option(s), so to list the options do:
```
cargo run --bin scan_chain -r -- --help
```

View file

@ -1,5 +1,5 @@
[package] [package]
name = "monero-consensus" name = "cuprate-consensus-rules"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
license = "MIT" license = "MIT"

View file

@ -1,3 +1,5 @@
use std::collections::HashSet;
use crypto_bigint::{CheckedMul, U256}; use crypto_bigint::{CheckedMul, U256};
use monero_serai::block::Block; use monero_serai::block::Block;
@ -196,12 +198,13 @@ fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockErro
/// ///
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#no-duplicate-transactions> /// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#no-duplicate-transactions>
fn check_txs_unique(txs: &[[u8; 32]]) -> Result<(), BlockError> { fn check_txs_unique(txs: &[[u8; 32]]) -> Result<(), BlockError> {
txs.windows(2).try_for_each(|window| { let set = txs.iter().collect::<HashSet<_>>();
if window[0] == window[1] {
Err(BlockError::DuplicateTransaction)?; if set.len() == txs.len() {
}
Ok(()) Ok(())
}) } else {
Err(BlockError::DuplicateTransaction)
}
} }
/// This struct contains the data needed to verify a block, implementers MUST make sure /// This struct contains the data needed to verify a block, implementers MUST make sure
@ -275,3 +278,28 @@ pub fn check_block(
Ok((vote, generated_coins)) Ok((vote, generated_coins))
} }
#[cfg(test)]
mod tests {
use proptest::{collection::vec, prelude::*};
use super::*;
proptest! {
#[test]
fn test_check_unique_txs(
mut txs in vec(any::<[u8; 32]>(), 2..3000),
duplicate in any::<[u8; 32]>(),
dup_idx_1 in any::<usize>(),
dup_idx_2 in any::<usize>(),
) {
prop_assert!(check_txs_unique(&txs).is_ok());
txs.insert(dup_idx_1 % txs.len(), duplicate);
txs.insert(dup_idx_2 % txs.len(), duplicate);
prop_assert!(check_txs_unique(&txs).is_err());
}
}
}

View file

@ -163,6 +163,7 @@ impl HardFork {
/// Returns the hard-fork for a blocks `major_version` field. /// Returns the hard-fork for a blocks `major_version` field.
/// ///
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote> /// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
#[inline]
pub fn from_version(version: u8) -> Result<HardFork, HardForkError> { pub fn from_version(version: u8) -> Result<HardFork, HardForkError> {
Ok(match version { Ok(match version {
1 => HardFork::V1, 1 => HardFork::V1,
@ -188,6 +189,7 @@ impl HardFork {
/// Returns the hard-fork for a blocks `minor_version` (vote) field. /// Returns the hard-fork for a blocks `minor_version` (vote) field.
/// ///
/// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote> /// <https://monero-book.cuprate.org/consensus_rules/hardforks.html#blocks-version-and-vote>
#[inline]
pub fn from_vote(vote: u8) -> HardFork { pub fn from_vote(vote: u8) -> HardFork {
if vote == 0 { if vote == 0 {
// A vote of 0 is interpreted as 1 as that's what Monero used to default to. // A vote of 0 is interpreted as 1 as that's what Monero used to default to.
@ -197,6 +199,7 @@ impl HardFork {
Self::from_version(vote).unwrap_or(HardFork::V16) Self::from_version(vote).unwrap_or(HardFork::V16)
} }
#[inline]
pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> { pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> {
Ok(( Ok((
HardFork::from_version(header.major_version)?, HardFork::from_version(header.major_version)?,

View file

@ -6,6 +6,12 @@ use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS};
const TEST_WINDOW_SIZE: u64 = 25; const TEST_WINDOW_SIZE: u64 = 25;
#[test]
fn target_block_time() {
assert_eq!(HardFork::V1.block_time().as_secs(), 60);
assert_eq!(HardFork::V2.block_time().as_secs(), 120);
}
#[test] #[test]
fn next_hard_forks() { fn next_hard_forks() {
let mut prev = HardFork::V1; let mut prev = HardFork::V1;

View file

@ -207,3 +207,17 @@ pub fn check_miner_tx(
check_total_output_amt(total_outs, reward, total_fees, hf) check_total_output_amt(total_outs, reward, total_fees, hf)
} }
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::*;
proptest! {
#[test]
fn tail_emission(generated_coins in any::<u64>(), hf in any::<HardFork>()) {
prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60)
}
}
}

View file

@ -1,5 +1,6 @@
use std::cmp::Ordering;
use monero_serai::ringct::RctType; use monero_serai::ringct::RctType;
use std::{cmp::Ordering, collections::HashSet, sync::Arc};
use monero_serai::transaction::{Input, Output, Timelock, Transaction}; use monero_serai::transaction::{Input, Output, Timelock, Transaction};
use multiexp::BatchVerifier; use multiexp::BatchVerifier;
@ -11,6 +12,8 @@ use crate::{
mod contextual_data; mod contextual_data;
mod ring_ct; mod ring_ct;
mod ring_signatures; mod ring_signatures;
#[cfg(test)]
mod tests;
pub use contextual_data::*; pub use contextual_data::*;
pub use ring_ct::RingCTError; pub use ring_ct::RingCTError;
@ -57,7 +60,7 @@ pub enum TransactionError {
#[error("The transaction inputs are not ordered.")] #[error("The transaction inputs are not ordered.")]
InputsAreNotOrdered, InputsAreNotOrdered,
#[error("The transaction spends a decoy which is too young.")] #[error("The transaction spends a decoy which is too young.")]
OneOrMoreDecoysLocked, OneOrMoreRingMembersLocked,
#[error("The transaction inputs overflow.")] #[error("The transaction inputs overflow.")]
InputsOverflow, InputsOverflow,
#[error("The transaction has no inputs.")] #[error("The transaction has no inputs.")]
@ -124,7 +127,7 @@ pub(crate) fn check_output_types(
) -> Result<(), TransactionError> { ) -> Result<(), TransactionError> {
if hf == &HardFork::V15 { if hf == &HardFork::V15 {
for outs in outputs.windows(2) { for outs in outputs.windows(2) {
if outs[0].view_tag.is_some() != outs[0].view_tag.is_some() { if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() {
return Err(TransactionError::OutputTypeInvalid); return Err(TransactionError::OutputTypeInvalid);
} }
} }
@ -213,7 +216,10 @@ fn check_number_of_outputs(
} }
match rct_type { match rct_type {
RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::BulletproofsPlus => { RctType::Bulletproofs
| RctType::BulletproofsCompactAmount
| RctType::Clsag
| RctType::BulletproofsPlus => {
if outputs <= MAX_BULLETPROOFS_OUTPUTS { if outputs <= MAX_BULLETPROOFS_OUTPUTS {
Ok(()) Ok(())
} else { } else {
@ -247,7 +253,7 @@ fn check_outputs_semantics(
/// Checks if an outputs unlock time has passed. /// Checks if an outputs unlock time has passed.
/// ///
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html> /// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
fn output_unlocked( pub fn output_unlocked(
time_lock: &Timelock, time_lock: &Timelock,
current_chain_height: u64, current_chain_height: u64,
current_time_lock_timestamp: u64, current_time_lock_timestamp: u64,
@ -272,7 +278,7 @@ fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool
unlock_height <= current_chain_height unlock_height <= current_chain_height
} }
/// Returns if a locked output, which uses a block height, can be spend. /// Returns if a locked output, which uses a block height, can be spent.
/// ///
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp> /// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp>
fn check_timestamp_time_lock( fn check_timestamp_time_lock(
@ -303,7 +309,7 @@ fn check_all_time_locks(
hf, hf,
) { ) {
tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}."); tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}.");
Err(TransactionError::OneOrMoreDecoysLocked) Err(TransactionError::OneOrMoreRingMembersLocked)
} else { } else {
Ok(()) Ok(())
} }
@ -316,7 +322,7 @@ fn check_all_time_locks(
/// ///
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys> /// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys>
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys> /// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys>
fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> { pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
if hf == &HardFork::V15 { if hf == &HardFork::V15 {
// Hard-fork 15 allows both v14 and v16 rules // Hard-fork 15 allows both v14 and v16 rules
return check_decoy_info(decoy_info, &HardFork::V14) return check_decoy_info(decoy_info, &HardFork::V14)
@ -347,26 +353,16 @@ fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Transac
Ok(()) Ok(())
} }
/// Checks the inputs key images for torsion and for duplicates in the spent_kis list. /// Checks the inputs key images for torsion.
/// ///
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions /// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
/// key images, for example transactions in a block. The chain will be checked for duplicates later. fn check_key_images(input: &Input) -> Result<(), TransactionError> {
///
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#torsion-free-key-image>
fn check_key_images(
input: &Input,
spent_kis: &mut HashSet<[u8; 32]>,
) -> Result<(), TransactionError> {
match input { match input {
Input::ToKey { key_image, .. } => { Input::ToKey { key_image, .. } => {
// this happens in monero-serai but we may as well duplicate the check. // this happens in monero-serai but we may as well duplicate the check.
if !key_image.is_torsion_free() { if !key_image.is_torsion_free() {
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup); return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
} }
if !spent_kis.insert(key_image.compress().to_bytes()) {
return Err(TransactionError::KeyImageSpent);
}
} }
_ => Err(TransactionError::IncorrectInputType)?, _ => Err(TransactionError::IncorrectInputType)?,
} }
@ -455,7 +451,7 @@ fn check_10_block_lock(
tracing::debug!( tracing::debug!(
"Transaction invalid: One or more ring members younger than 10 blocks." "Transaction invalid: One or more ring members younger than 10 blocks."
); );
Err(TransactionError::OneOrMoreDecoysLocked) Err(TransactionError::OneOrMoreRingMembersLocked)
} else { } else {
Ok(()) Ok(())
} }
@ -510,23 +506,19 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result<u64, Transa
/// ///
/// Contextual rules are rules that require blockchain context to check. /// Contextual rules are rules that require blockchain context to check.
/// ///
/// This function does not check signatures. /// This function does not check signatures or for duplicate key-images.
///
/// The `spent_kis` parameter is not meant to be a complete list of key images, just a list of related transactions
/// key images, for example transactions in a block. The chain should be checked for duplicates later.
fn check_inputs_contextual( fn check_inputs_contextual(
inputs: &[Input], inputs: &[Input],
tx_ring_members_info: &TxRingMembersInfo, tx_ring_members_info: &TxRingMembersInfo,
current_chain_height: u64, current_chain_height: u64,
hf: &HardFork, hf: &HardFork,
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), TransactionError> { ) -> Result<(), TransactionError> {
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
// When picking ring members monerod will only look in the DB at past blocks so an output has to be younger // When picking ring members monerod will only look in the DB at past blocks so an output has to be younger
// than this transaction to be used in this tx. // than this transaction to be used in this tx.
if tx_ring_members_info.youngest_used_out_height >= current_chain_height { if tx_ring_members_info.youngest_used_out_height >= current_chain_height {
tracing::debug!("Transaction invalid: One or more ring members too young."); tracing::debug!("Transaction invalid: One or more ring members too young.");
Err(TransactionError::OneOrMoreDecoysLocked)?; Err(TransactionError::OneOrMoreRingMembersLocked)?;
} }
check_10_block_lock( check_10_block_lock(
@ -541,11 +533,9 @@ fn check_inputs_contextual(
assert_eq!(hf, &HardFork::V1); assert_eq!(hf, &HardFork::V1);
} }
let mut spent_kis_lock = spent_kis.lock().unwrap();
for input in inputs { for input in inputs {
check_key_images(input, &mut spent_kis_lock)?; check_key_images(input)?;
} }
drop(spent_kis_lock);
Ok(()) Ok(())
} }
@ -608,7 +598,7 @@ fn transaction_weight_limit(hf: &HardFork) -> usize {
/// - The tx-pool will use the current hard-fork /// - The tx-pool will use the current hard-fork
/// - When syncing the hard-fork is in the block header. /// - When syncing the hard-fork is in the block header.
/// ///
/// To fully verify a transaction this must be accompanied with [`check_transaction_contextual`] /// To fully verify a transaction this must be accompanied by [`check_transaction_contextual`]
/// ///
pub fn check_transaction_semantic( pub fn check_transaction_semantic(
tx: &Transaction, tx: &Transaction,
@ -655,9 +645,11 @@ pub fn check_transaction_semantic(
/// Checks the transaction is contextually valid. /// Checks the transaction is contextually valid.
/// ///
/// To fully verify a transaction this must be accompanied with [`check_transaction_semantic`] /// To fully verify a transaction this must be accompanied by [`check_transaction_semantic`].
/// ///
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time> /// This function also does _not_ check for duplicate key-images: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>.
///
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>.
pub fn check_transaction_contextual( pub fn check_transaction_contextual(
tx: &Transaction, tx: &Transaction,
@ -665,7 +657,6 @@ pub fn check_transaction_contextual(
current_chain_height: u64, current_chain_height: u64,
current_time_lock_timestamp: u64, current_time_lock_timestamp: u64,
hf: &HardFork, hf: &HardFork,
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), TransactionError> { ) -> Result<(), TransactionError> {
let tx_version = TxVersion::from_raw(tx.prefix.version) let tx_version = TxVersion::from_raw(tx.prefix.version)
.ok_or(TransactionError::TransactionVersionInvalid)?; .ok_or(TransactionError::TransactionVersionInvalid)?;
@ -675,7 +666,6 @@ pub fn check_transaction_contextual(
tx_ring_members_info, tx_ring_members_info,
current_chain_height, current_chain_height,
hf, hf,
spent_kis,
)?; )?;
check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?; check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?;

View file

@ -6,22 +6,13 @@ use std::{
use curve25519_dalek::EdwardsPoint; use curve25519_dalek::EdwardsPoint;
use monero_serai::transaction::{Input, Timelock}; use monero_serai::transaction::{Input, Timelock};
use crate::{transactions::TransactionError, HardFork, TxVersion}; use crate::{transactions::TransactionError, HardFork};
/// An already approved previous transaction output.
#[derive(Debug)]
pub struct OutputOnChain {
pub height: u64,
pub time_lock: Timelock,
pub key: Option<EdwardsPoint>,
pub commitment: EdwardsPoint,
}
/// Gets the absolute offsets from the relative offsets. /// Gets the absolute offsets from the relative offsets.
/// ///
/// This function will return an error if the relative offsets are empty. /// This function will return an error if the relative offsets are empty.
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#inputs-must-have-decoys> /// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#inputs-must-have-decoys>
fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> { pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, TransactionError> {
if relative_offsets.is_empty() { if relative_offsets.is_empty() {
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
} }
@ -64,35 +55,6 @@ pub fn insert_ring_member_ids(
Ok(()) Ok(())
} }
/// Get the ring members for the inputs from the outputs on the chain.
///
/// Will error if `outputs` does not contain the outputs needed.
pub fn get_ring_members_for_inputs<'a>(
get_outputs: impl Fn(u64, u64) -> Option<&'a OutputOnChain>,
inputs: &[Input],
) -> Result<Vec<Vec<&'a OutputOnChain>>, TransactionError> {
inputs
.iter()
.map(|inp| match inp {
Input::ToKey {
amount,
key_offsets,
..
} => {
let offsets = get_absolute_offsets(key_offsets)?;
Ok(offsets
.iter()
.map(|offset| {
get_outputs(amount.unwrap_or(0), *offset)
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)
})
.collect::<Result<_, TransactionError>>()?)
}
_ => Err(TransactionError::IncorrectInputType),
})
.collect::<Result<_, TransactionError>>()
}
/// Represents the ring members of all the inputs. /// Represents the ring members of all the inputs.
#[derive(Debug)] #[derive(Debug)]
pub enum Rings { pub enum Rings {
@ -102,46 +64,7 @@ pub enum Rings {
RingCT(Vec<Vec<[EdwardsPoint; 2]>>), RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
} }
impl Rings { /// Information on the outputs the transaction is referencing for inputs (ring members).
/// Builds the rings for the transaction inputs, from the given outputs.
fn new(
outputs: Vec<Vec<&OutputOnChain>>,
tx_version: TxVersion,
) -> Result<Rings, TransactionError> {
Ok(match tx_version {
TxVersion::RingSignatures => Rings::Legacy(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
.collect::<Result<Vec<_>, TransactionError>>()
})
.collect::<Result<Vec<_>, TransactionError>>()?,
),
TxVersion::RingCT => Rings::RingCT(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| {
Ok([
out.key
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
out.commitment,
])
})
.collect::<Result<_, TransactionError>>()
})
.collect::<Result<_, _>>()?,
),
})
}
}
/// Information on the outputs the transaction is is referencing for inputs (ring members).
#[derive(Debug)] #[derive(Debug)]
pub struct TxRingMembersInfo { pub struct TxRingMembersInfo {
pub rings: Rings, pub rings: Rings,
@ -149,49 +72,6 @@ pub struct TxRingMembersInfo {
pub decoy_info: Option<DecoyInfo>, pub decoy_info: Option<DecoyInfo>,
pub youngest_used_out_height: u64, pub youngest_used_out_height: u64,
pub time_locked_outs: Vec<Timelock>, pub time_locked_outs: Vec<Timelock>,
pub hf: HardFork,
}
impl TxRingMembersInfo {
/// Construct a [`TxRingMembersInfo`] struct.
///
/// The used outs must be all the ring members used in the transactions inputs.
pub fn new(
used_outs: Vec<Vec<&OutputOnChain>>,
decoy_info: Option<DecoyInfo>,
tx_version: TxVersion,
hf: HardFork,
) -> Result<TxRingMembersInfo, TransactionError> {
Ok(TxRingMembersInfo {
youngest_used_out_height: used_outs
.iter()
.map(|inp_outs| {
inp_outs
.iter()
// the output with the highest height is the youngest
.map(|out| out.height)
.max()
.expect("Input must have ring members")
})
.max()
.expect("Tx must have inputs"),
time_locked_outs: used_outs
.iter()
.flat_map(|inp_outs| {
inp_outs
.iter()
.filter_map(|out| match out.time_lock {
Timelock::None => None,
lock => Some(lock),
})
.collect::<Vec<_>>()
})
.collect(),
hf,
rings: Rings::new(used_outs, tx_version)?,
decoy_info,
})
}
} }
/// A struct holding information about the inputs and their decoys. This data can vary by block so /// A struct holding information about the inputs and their decoys. This data can vary by block so
@ -202,7 +82,7 @@ impl TxRingMembersInfo {
/// - The top block hash is the same as when this data was retrieved (the blockchain state is unchanged). /// - The top block hash is the same as when this data was retrieved (the blockchain state is unchanged).
/// ///
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html> /// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html>
#[derive(Debug)] #[derive(Debug, Copy, Clone)]
pub struct DecoyInfo { pub struct DecoyInfo {
/// The number of inputs that have enough outputs on the chain to mix with. /// The number of inputs that have enough outputs on the chain to mix with.
pub mixable: usize, pub mixable: usize,
@ -229,7 +109,7 @@ impl DecoyInfo {
/// ///
pub fn new( pub fn new(
inputs: &[Input], inputs: &[Input],
outputs_with_amount: &HashMap<u64, usize>, outputs_with_amount: impl Fn(u64) -> usize,
hf: &HardFork, hf: &HardFork,
) -> Result<DecoyInfo, TransactionError> { ) -> Result<DecoyInfo, TransactionError> {
let mut min_decoys = usize::MAX; let mut min_decoys = usize::MAX;
@ -247,9 +127,7 @@ impl DecoyInfo {
.. ..
} => { } => {
if let Some(amount) = amount { if let Some(amount) = amount {
let outs_with_amt = *outputs_with_amount let outs_with_amt = outputs_with_amount(*amount);
.get(amount)
.expect("outputs_with_amount does not include needed amount.");
// <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html#mixable-and-unmixable-inputs> // <https://cuprate.github.io/monero-book/consensus_rules/transactions/decoys.html#mixable-and-unmixable-inputs>
if outs_with_amt <= minimum_decoys { if outs_with_amt <= minimum_decoys {

View file

@ -154,6 +154,13 @@ pub(crate) fn check_input_signatures(
Err(RingCTError::RingInvalid)?; Err(RingCTError::RingInvalid)?;
} }
let pseudo_outs = match &rct_sig.prunable {
RctPrunable::MlsagBulletproofs { pseudo_outs, .. }
| RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(),
RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(),
RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[],
};
match &rct_sig.prunable { match &rct_sig.prunable {
RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?, RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?,
RctPrunable::AggregateMlsagBorromean { mlsag, .. } => { RctPrunable::AggregateMlsagBorromean { mlsag, .. } => {
@ -174,7 +181,7 @@ pub(crate) fn check_input_signatures(
} }
RctPrunable::MlsagBorromean { mlsags, .. } RctPrunable::MlsagBorromean { mlsags, .. }
| RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags) | RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags)
.zip(&rct_sig.base.pseudo_outs) .zip(pseudo_outs)
.zip(inputs) .zip(inputs)
.zip(rings) .zip(rings)
.try_for_each(|(((mlsag, pseudo_out), input), ring)| { .try_for_each(|(((mlsag, pseudo_out), input), ring)| {
@ -189,7 +196,7 @@ pub(crate) fn check_input_signatures(
)?) )?)
}), }),
RctPrunable::Clsag { clsags, .. } => try_par_iter(clsags) RctPrunable::Clsag { clsags, .. } => try_par_iter(clsags)
.zip(&rct_sig.base.pseudo_outs) .zip(pseudo_outs)
.zip(inputs) .zip(inputs)
.zip(rings) .zip(rings)
.try_for_each(|(((clsags, pseudo_out), input), ring)| { .try_for_each(|(((clsags, pseudo_out), input), ring)| {

View file

@ -0,0 +1,298 @@
use std::ops::Range;
use curve25519_dalek::{
constants::{ED25519_BASEPOINT_POINT, EIGHT_TORSION},
edwards::CompressedEdwardsY,
EdwardsPoint,
};
use proptest::{collection::vec, prelude::*};
use monero_serai::transaction::Output;
use super::*;
use crate::decomposed_amount::decomposed_amounts;
#[test]
fn test_check_output_amount_v1() {
for amount in decomposed_amounts() {
assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok())
}
proptest!(|(amount in any::<u64>().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| {
prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err());
prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok())
});
}
#[test]
fn test_sum_outputs() {
let mut output_10 = Output {
key: CompressedEdwardsY([0; 32]),
amount: None,
view_tag: None,
};
output_10.amount = Some(10);
let mut outputs_20 = output_10.clone();
outputs_20.amount = Some(20);
let outs = [output_10, outputs_20];
let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap();
assert_eq!(sum, 30);
assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err())
}
#[test]
fn test_decoy_info() {
let decoy_info = DecoyInfo {
mixable: 0,
not_mixable: 0,
min_decoys: minimum_decoys(&HardFork::V8),
max_decoys: minimum_decoys(&HardFork::V8) + 1,
};
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err());
let mut decoy_info = DecoyInfo {
mixable: 0,
not_mixable: 0,
min_decoys: minimum_decoys(&HardFork::V8) - 1,
max_decoys: minimum_decoys(&HardFork::V8) + 1,
};
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
decoy_info.not_mixable = 1;
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
decoy_info.mixable = 2;
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
let mut decoy_info = DecoyInfo {
mixable: 0,
not_mixable: 0,
min_decoys: minimum_decoys(&HardFork::V12),
max_decoys: minimum_decoys(&HardFork::V12) + 1,
};
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err());
decoy_info.max_decoys = decoy_info.min_decoys;
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok());
}
#[test]
fn test_torsion_ki() {
for &key_image in EIGHT_TORSION[1..].iter() {
assert!(check_key_images(&Input::ToKey {
key_image,
amount: None,
key_offsets: vec![],
})
.is_err())
}
}
/// Returns a strategy that resolves to a [`RctType`] that uses
/// BPs(+).
#[allow(unreachable_code)]
#[allow(clippy::diverging_sub_expression)]
fn bulletproof_rct_type() -> BoxedStrategy<RctType> {
return prop_oneof![
Just(RctType::Bulletproofs),
Just(RctType::BulletproofsCompactAmount),
Just(RctType::Clsag),
Just(RctType::BulletproofsPlus),
]
.boxed();
// Here to make sure this is updated when needed.
match unreachable!() {
RctType::Null => {}
RctType::MlsagAggregate => {}
RctType::MlsagIndividual => {}
RctType::Bulletproofs => {}
RctType::BulletproofsCompactAmount => {}
RctType::Clsag => {}
RctType::BulletproofsPlus => {}
};
}
prop_compose! {
/// Returns a valid prime-order point.
fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint {
EdwardsPoint::mul_base_clamped(bytes)
}
}
prop_compose! {
/// Returns a valid torsioned point.
fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint {
point + curve25519_dalek::constants::EIGHT_TORSION[torsion]
}
}
prop_compose! {
/// Returns a random [`Output`].
///
/// `key` is always valid.
fn random_out(rct: bool, view_tagged: bool)(
point in random_point(),
amount in any::<u64>(),
view_tag in any::<u8>(),
) -> Output {
Output {
amount: if rct { None } else { Some(amount) },
key: point.compress(),
view_tag: if view_tagged { Some(view_tag) } else { None },
}
}
}
prop_compose! {
/// Returns a random [`Output`].
///
/// `key` is always valid but torsioned.
fn random_torsioned_out(rct: bool, view_tagged: bool)(
point in random_torsioned_point(),
amount in any::<u64>(),
view_tag in any::<u8>(),
) -> Output {
Output {
amount: if rct { None } else { Some(amount) },
key: point.compress(),
view_tag: if view_tagged { Some(view_tag) } else { None },
}
}
}
prop_compose! {
/// Returns a [`HardFork`] in a specific range.
fn hf_in_range(range: Range<u8>)(
hf in range,
) -> HardFork {
HardFork::from_version(hf).unwrap()
}
}
prop_compose! {
/// Returns a [`Timelock`] that is locked given a height and time.
fn locked_timelock(height: u64, time_for_time_lock: u64)(
timebased in any::<bool>(),
lock_height in (height+1)..500_000_001,
time_for_time_lock in (time_for_time_lock+121)..,
) -> Timelock {
if timebased || lock_height > 500_000_000 {
Timelock::Time(time_for_time_lock)
} else {
Timelock::Block(usize::try_from(lock_height).unwrap())
}
}
}
prop_compose! {
/// Returns a [`Timelock`] that is unlocked given a height and time.
fn unlocked_timelock(height: u64, time_for_time_lock: u64)(
ty in 0..3,
lock_height in 0..(height+1),
time_for_time_lock in 0..(time_for_time_lock+121),
) -> Timelock {
match ty {
0 => Timelock::None,
1 => Timelock::Time(time_for_time_lock),
_ => Timelock::Block(usize::try_from(lock_height).unwrap())
}
}
}
proptest! {
#[test]
fn test_check_output_keys(
outs in vec(random_out(true, true), 0..16),
torsioned_outs in vec(random_torsioned_out(false, true), 0..16)
) {
prop_assert!(check_output_keys(&outs).is_ok());
prop_assert!(check_output_keys(&torsioned_outs).is_ok());
}
#[test]
fn output_types(
mut view_tag_outs in vec(random_out(true, true), 1..16),
mut non_view_tag_outs in vec(random_out(true, false), 1..16),
hf_no_view_tags in hf_in_range(1..14),
hf_view_tags in hf_in_range(16..17),
) {
prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok());
prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err());
prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok());
prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err());
prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok());
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok());
view_tag_outs.append(&mut non_view_tag_outs);
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err());
}
#[test]
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) {
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok());
}
#[test]
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) {
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err());
}
#[test]
fn test_check_output_amount_v2(amt in 1..u64::MAX) {
prop_assert!(check_output_amount_v2(amt).is_err());
prop_assert!(check_output_amount_v2(0).is_ok())
}
#[test]
fn test_block_unlock_time(height in 1..u64::MAX) {
prop_assert!(check_block_time_lock(height, height));
prop_assert!(!check_block_time_lock(height, height - 1));
prop_assert!(check_block_time_lock(height, height+1));
}
#[test]
fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) {
prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16));
prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16));
prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16));
}
#[test]
fn test_time_locks(
mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50),
mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50)
) {
assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok());
unlocked_locks.append(&mut locked_locks);
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
}
#[test]
fn test_check_input_has_decoys(key_offsets in vec(any::<u64>(), 1..10_000)) {
assert!(check_input_has_decoys(&Input::ToKey {
key_image: ED25519_BASEPOINT_POINT,
amount: None,
key_offsets,
}).is_ok());
assert!(check_input_has_decoys(&Input::ToKey {
key_image: ED25519_BASEPOINT_POINT,
amount: None,
key_offsets: vec![],
}).is_err());
}
}

View file

@ -1,4 +1,4 @@
use std::cell::UnsafeCell; use std::{cell::RefCell, ops::DerefMut};
use multiexp::BatchVerifier as InternalBatchVerifier; use multiexp::BatchVerifier as InternalBatchVerifier;
use rayon::prelude::*; use rayon::prelude::*;
@ -6,9 +6,9 @@ use thread_local::ThreadLocal;
use crate::ConsensusError; use crate::ConsensusError;
/// A multi threaded batch verifier. /// A multithreaded batch verifier.
pub struct MultiThreadedBatchVerifier { pub struct MultiThreadedBatchVerifier {
internal: ThreadLocal<UnsafeCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>, internal: ThreadLocal<RefCell<InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>>>,
} }
impl MultiThreadedBatchVerifier { impl MultiThreadedBatchVerifier {
@ -27,19 +27,15 @@ impl MultiThreadedBatchVerifier {
) -> Result<R, ConsensusError> { ) -> Result<R, ConsensusError> {
let verifier_cell = self let verifier_cell = self
.internal .internal
.get_or(|| UnsafeCell::new(InternalBatchVerifier::new(0))); .get_or(|| RefCell::new(InternalBatchVerifier::new(8)));
// SAFETY: This is safe for 2 reasons: // TODO: this is not ok as a rayon par_iter could be called in stmt.
// 1. each thread gets a different batch verifier. stmt(verifier_cell.borrow_mut().deref_mut())
// 2. only this function `queue_statement` will get the inner batch verifier, it's private.
//
// TODO: it's probably ok to just use RefCell
stmt(unsafe { &mut *verifier_cell.get() })
} }
pub fn verify(self) -> bool { pub fn verify(self) -> bool {
self.internal self.internal
.into_iter() .into_iter()
.map(UnsafeCell::into_inner) .map(RefCell::into_inner)
.par_bridge() .par_bridge()
.find_any(|batch_verifier| !batch_verifier.verify_vartime()) .find_any(|batch_verifier| !batch_verifier.verify_vartime())
.is_none() .is_none()

View file

@ -1,374 +0,0 @@
#[cfg(feature = "binaries")]
mod bin {
use std::{ops::Range, path::PathBuf, sync::Arc};
use clap::Parser;
use futures::{channel::mpsc, SinkExt, StreamExt};
use monero_serai::{block::Block, transaction::Transaction};
use tokio::sync::RwLock;
use tower::{Service, ServiceExt};
use tracing::level_filters::LevelFilter;
use cuprate_helper::network::Network;
use cuprate_consensus::{
context::{
BlockChainContextRequest, BlockChainContextResponse, ContextConfig,
UpdateBlockchainCacheData,
},
initialize_blockchain_context, initialize_verifier,
rpc::{cache::ScanningCache, init_rpc_load_balancer, RpcConfig},
Database, DatabaseRequest, DatabaseResponse, VerifiedBlockInformation, VerifyBlockRequest,
VerifyBlockResponse,
};
const MAX_BLOCKS_IN_RANGE: u64 = 500;
const BATCHES_IN_REQUEST: u64 = 3;
const MAX_BLOCKS_HEADERS_IN_RANGE: u64 = 1000;
/// Calls for a batch of blocks, returning the response and the time it took.
async fn call_batch<D: Database>(
range: Range<u64>,
database: D,
) -> Result<DatabaseResponse, tower::BoxError> {
database
.oneshot(DatabaseRequest::BlockBatchInRange(range))
.await
}
async fn update_cache_and_context<Ctx>(
cache: &RwLock<ScanningCache>,
context_updater: &mut Ctx,
verified_block_info: VerifiedBlockInformation,
) -> Result<(), tower::BoxError>
where
Ctx: tower::Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
>,
{
// add the new block to the cache
cache.write().await.add_new_block_data(
verified_block_info.generated_coins,
&verified_block_info.block.miner_tx,
&verified_block_info.txs,
);
// update the chain context svc with the new block
context_updater
.ready()
.await?
.call(BlockChainContextRequest::Update(
UpdateBlockchainCacheData {
new_top_hash: verified_block_info.block_hash,
height: verified_block_info.height,
timestamp: verified_block_info.block.header.timestamp,
weight: verified_block_info.weight,
long_term_weight: verified_block_info.long_term_weight,
vote: verified_block_info.hf_vote,
generated_coins: verified_block_info.generated_coins,
cumulative_difficulty: verified_block_info.cumulative_difficulty,
},
))
.await?;
Ok(())
}
async fn call_blocks<D>(
mut block_chan: mpsc::Sender<Vec<(Block, Vec<Transaction>)>>,
start_height: u64,
chain_height: u64,
database: D,
) -> Result<(), tower::BoxError>
where
D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static,
{
let mut next_fut = tokio::spawn(call_batch(
start_height
..(start_height + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST)).min(chain_height),
database.clone(),
));
for next_batch_start in (start_height..chain_height)
.step_by((MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST) as usize)
.skip(1)
{
// Call the next batch while we handle this batch.
let current_fut = std::mem::replace(
&mut next_fut,
tokio::spawn(call_batch(
next_batch_start
..(next_batch_start + (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))
.min(chain_height),
database.clone(),
)),
);
let DatabaseResponse::BlockBatchInRange(blocks) = current_fut.await?? else {
panic!("Database sent incorrect response!");
};
tracing::info!(
"Got batch: {:?}, chain height: {}",
(next_batch_start - (MAX_BLOCKS_IN_RANGE * BATCHES_IN_REQUEST))..(next_batch_start),
chain_height
);
block_chan.send(blocks).await?;
}
Ok(())
}
async fn scan_chain<D>(
cache: Arc<RwLock<ScanningCache>>,
save_file: PathBuf,
_rpc_config: Arc<std::sync::RwLock<RpcConfig>>,
database: D,
net: Network,
) -> Result<(), tower::BoxError>
where
D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static,
{
tracing::info!("Beginning chain scan");
// TODO: when we implement all rules use the RPCs chain height, for now we don't check v2 txs.
let chain_height = 3_152_725;
tracing::info!("scanning to chain height: {}", chain_height);
let config = match net {
Network::Mainnet => ContextConfig::main_net(),
Network::Stagenet => ContextConfig::stage_net(),
Network::Testnet => ContextConfig::test_net(),
};
let mut ctx_svc = initialize_blockchain_context(config, database.clone()).await?;
let (mut block_verifier, _) =
initialize_verifier(database.clone(), ctx_svc.clone()).await?;
let start_height = cache.read().await.height;
let (block_tx, mut incoming_blocks) = mpsc::channel(3);
tokio::spawn(
async move { call_blocks(block_tx, start_height, chain_height, database).await },
);
while let Some(incoming_blocks) = incoming_blocks.next().await {
let VerifyBlockResponse::MainChainBatchPrep(blocks, txs) = block_verifier
.ready()
.await?
.call(VerifyBlockRequest::MainChainBatchPrep(incoming_blocks))
.await?
else {
panic!()
};
let mut height;
for (block, txs) in blocks.into_iter().zip(txs) {
let VerifyBlockResponse::MainChain(verified_block_info) = block_verifier
.ready()
.await?
.call(VerifyBlockRequest::MainChainPrepared(block, txs))
.await?
else {
panic!()
};
height = verified_block_info.height;
if verified_block_info.height % 5000 == 0 {
tracing::info!("saving cache to: {}", save_file.display());
cache.write().await.save(&save_file).unwrap();
}
update_cache_and_context(&cache, &mut ctx_svc, verified_block_info).await?;
if height % 200 == 0 {
tracing::info!(
"verified blocks: {:?}, chain height: {}",
0..height,
chain_height
);
}
}
}
Ok(())
}
#[derive(Parser)]
struct Args {
/// The log level, valid values:
/// "off", "error", "warn", "info", "debug", "trace", or a number 0-5.
#[arg(short, long, default_value = "info")]
log_level: LevelFilter,
/// The network we should scan, valid values:
/// "mainnet", "testnet", "stagenet".
#[arg(short, long, default_value = "mainnet")]
network: String,
/// A list of RPC nodes we should use.
/// Example: <http://xmr-node.cakewallet.com:18081>
#[arg(long)]
rpc_nodes: Vec<String>,
/// Stops the scanner from including the default list of nodes, this is not
/// recommended unless you have sufficient self defined nodes with `rpc_nodes`
#[arg(long)]
dont_use_default_nodes: bool,
/// The directory/ folder to save the scanning cache in.
/// This will default to your user cache directory.
#[arg(long)]
cache_dir: Option<PathBuf>,
}
pub async fn run() {
let args = Args::parse();
if args.dont_use_default_nodes & args.rpc_nodes.is_empty() {
panic!("Can't run scanner with no RPC nodes, see `--help` ")
}
tracing_subscriber::fmt()
.with_max_level(args.log_level)
.init();
let network = match args.network.as_str() {
"mainnet" => Network::Mainnet,
"testnet" => Network::Testnet,
"stagenet" => Network::Stagenet,
_ => panic!("Invalid network, scanner currently only supports mainnet"),
};
let mut file_for_cache = match args.cache_dir {
Some(dir) => dir,
None => dirs::cache_dir().unwrap(),
};
match network {
Network::Mainnet => file_for_cache.push("cuprate_rpc_scanning_cache.bin"),
Network::Stagenet => file_for_cache.push("cuprate_rpc_scanning_cache_stage_net.bin"),
Network::Testnet => file_for_cache.push("cuprate_rpc_scanning_cache_test_net.bin"),
}
let mut urls = if args.dont_use_default_nodes {
vec![]
} else {
match network {
Network::Mainnet => vec![
"http://xmr-node.cakewallet.com:18081".to_string(),
"https://node.sethforprivacy.com".to_string(),
// "http://nodex.monerujo.io:18081".to_string(),
"http://nodes.hashvault.pro:18081".to_string(),
"http://node.c3pool.com:18081".to_string(),
"http://node.trocador.app:18089".to_string(),
"http://xmr.lukas.services:18089".to_string(),
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
"http://68.118.241.70:18089".to_string(),
"http://145.239.97.211:18089".to_string(),
//
"http://xmr-node.cakewallet.com:18081".to_string(),
"https://node.sethforprivacy.com".to_string(),
// "http://nodex.monerujo.io:18081".to_string(),
"http://nodes.hashvault.pro:18081".to_string(),
"http://node.c3pool.com:18081".to_string(),
"http://node.trocador.app:18089".to_string(),
"http://xmr.lukas.services:18089".to_string(),
"http://xmr-node-eu.cakewallet.com:18081".to_string(),
"http://68.118.241.70:18089".to_string(),
"http://145.239.97.211:18089".to_string(),
],
Network::Testnet => vec![
"http://testnet.xmr-tw.org:28081".to_string(),
"http://node3.monerodevs.org:28089".to_string(),
"http://node.monerodevs.org:28089".to_string(),
"http://125.229.105.12:28081".to_string(),
"http://node2.monerodevs.org:28089".to_string(),
"https://testnet.xmr.ditatompel.com".to_string(),
"http://singapore.node.xmr.pm:28081".to_string(),
//
"http://testnet.xmr-tw.org:28081".to_string(),
"http://node3.monerodevs.org:28089".to_string(),
"http://node.monerodevs.org:28089".to_string(),
"http://125.229.105.12:28081".to_string(),
"http://node2.monerodevs.org:28089".to_string(),
"https://testnet.xmr.ditatompel.com".to_string(),
"http://singapore.node.xmr.pm:28081".to_string(),
],
Network::Stagenet => vec![
"http://125.229.105.12:38081".to_string(),
"http://90.189.159.23:38089".to_string(),
"http://stagenet.xmr-tw.org:38081".to_string(),
"http://node.monerodevs.org:38089".to_string(),
"http://stagenet.community.rino.io:38081".to_string(),
"http://node2.monerodevs.org:38089".to_string(),
"http://node3.monerodevs.org:38089".to_string(),
"http://singapore.node.xmr.pm:38081".to_string(),
"https://stagenet.xmr.ditatompel.com".to_string(),
"http://3.10.182.182:38081".to_string(),
//
"http://125.229.105.12:38081".to_string(),
"http://90.189.159.23:38089".to_string(),
"http://stagenet.xmr-tw.org:38081".to_string(),
"http://node.monerodevs.org:38089".to_string(),
"http://stagenet.community.rino.io:38081".to_string(),
"http://node2.monerodevs.org:38089".to_string(),
"http://node3.monerodevs.org:38089".to_string(),
"http://singapore.node.xmr.pm:38081".to_string(),
"https://stagenet.xmr.ditatompel.com".to_string(),
"http://3.10.182.182:38081".to_string(),
],
}
};
urls.extend(args.rpc_nodes.into_iter());
let rpc_config = RpcConfig::new(MAX_BLOCKS_IN_RANGE, MAX_BLOCKS_HEADERS_IN_RANGE);
let rpc_config = Arc::new(std::sync::RwLock::new(rpc_config));
tracing::info!("Attempting to open cache at: {}", file_for_cache.display());
let cache = match ScanningCache::load(&file_for_cache) {
Ok(cache) => {
tracing::info!("Reloaded from cache, chain height: {}", cache.height);
Arc::new(RwLock::new(cache))
}
Err(_) => {
tracing::warn!("Couldn't load from cache starting from scratch");
let mut cache = ScanningCache::default();
let genesis = monero_consensus::genesis::generate_genesis_block(&network);
let total_outs = genesis
.miner_tx
.prefix
.outputs
.iter()
.map(|out| out.amount.unwrap_or(0))
.sum::<u64>();
cache.add_new_block_data(total_outs, &genesis.miner_tx, &[]);
Arc::new(RwLock::new(cache))
}
};
let rpc = init_rpc_load_balancer(urls, cache.clone(), rpc_config.clone());
scan_chain(cache, file_for_cache, rpc_config, rpc, network)
.await
.unwrap();
}
}
#[cfg(feature = "binaries")]
#[tokio::main]
async fn main() {
bin::run().await
}
#[cfg(not(feature = "binaries"))]
fn main() {
panic!("must run with feature `binaries`")
}

View file

@ -1,5 +1,6 @@
//! Block Verifier Service.
use std::{ use std::{
collections::HashSet, collections::HashMap,
future::Future, future::Future,
pin::Pin, pin::Pin,
sync::Arc, sync::Arc,
@ -8,98 +9,53 @@ use std::{
use cuprate_helper::asynch::rayon_spawn_async; use cuprate_helper::asynch::rayon_spawn_async;
use futures::FutureExt; use futures::FutureExt;
use monero_serai::{ use monero_serai::{block::Block, transaction::Input};
block::Block,
transaction::{Input, Transaction},
};
use rayon::prelude::*;
use tower::{Service, ServiceExt}; use tower::{Service, ServiceExt};
use monero_consensus::{ use cuprate_consensus_rules::{
blocks::{ blocks::{calculate_pow_hash, check_block, check_block_pow, BlockError, RandomX},
calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height,
randomx_seed_height, BlockError, RandomX,
},
miner_tx::MinerTxError, miner_tx::MinerTxError,
ConsensusError, HardFork, ConsensusError, HardFork,
}; };
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::{ use crate::{
context::{ context::{BlockChainContextRequest, BlockChainContextResponse},
rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse, transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse},
RawBlockChainContext,
},
transactions::{
batch_setup_txs, contextual_data, OutputCache, TransactionVerificationData,
VerifyTxRequest, VerifyTxResponse,
},
Database, ExtendedConsensusError, Database, ExtendedConsensusError,
}; };
#[derive(Debug)] /// A pre-prepared block with all data needed to verify it.
pub struct PrePreparedBlockExPOW {
pub block: Block,
pub block_blob: Vec<u8>,
pub hf_vote: HardFork,
pub hf_version: HardFork,
pub block_hash: [u8; 32],
pub height: u64,
pub miner_tx_weight: usize,
}
impl PrePreparedBlockExPOW {
pub fn new(block: Block) -> Result<PrePreparedBlockExPOW, ConsensusError> {
let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PrePreparedBlockExPOW {
block_blob: block.serialize(),
hf_vote,
hf_version,
block_hash: block.hash(),
height: *height,
miner_tx_weight: block.miner_tx.weight(),
block,
})
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct PrePreparedBlock { pub struct PrePreparedBlock {
/// The block
pub block: Block, pub block: Block,
/// The serialised blocks bytes
pub block_blob: Vec<u8>, pub block_blob: Vec<u8>,
/// The blocks hf vote
pub hf_vote: HardFork, pub hf_vote: HardFork,
/// The blocks hf version
pub hf_version: HardFork, pub hf_version: HardFork,
/// The blocks hash
pub block_hash: [u8; 32], pub block_hash: [u8; 32],
/// The blocks POW hash.
pub pow_hash: [u8; 32], pub pow_hash: [u8; 32],
/// The weight of the blocks miner transaction.
pub miner_tx_weight: usize, pub miner_tx_weight: usize,
} }
impl PrePreparedBlock { impl PrePreparedBlock {
pub fn new(block: Block) -> Result<PrePreparedBlock, ConsensusError> { /// Creates a new [`PrePreparedBlock`].
struct DummyRX; ///
/// The randomX VM must be Some if RX is needed or this will panic.
impl RandomX for DummyRX { /// The randomX VM must also be initialised with the correct seed.
type Error = (); fn new<R: RandomX>(
fn calculate_hash(&self, _: &[u8]) -> Result<[u8; 32], Self::Error> { block: Block,
panic!("DummyRX cant calculate hash") randomx_vm: Option<&R>,
} ) -> Result<PrePreparedBlock, ConsensusError> {
}
let (hf_version, hf_vote) = let (hf_version, hf_vote) =
HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?;
@ -115,86 +71,43 @@ impl PrePreparedBlock {
hf_version, hf_version,
block_hash: block.hash(), block_hash: block.hash(),
pow_hash: calculate_pow_hash(
pow_hash: calculate_pow_hash::<DummyRX>( randomx_vm,
None,
&block.serialize_hashable(), &block.serialize_hashable(),
*height, *height,
&hf_version, &hf_version,
)?, )?,
miner_tx_weight: block.miner_tx.weight(), miner_tx_weight: block.miner_tx.weight(),
block, block,
}) })
} }
pub fn new_rx<R: RandomX>(
block: PrePreparedBlockExPOW,
randomx_vm: Option<&R>,
) -> Result<PrePreparedBlock, ConsensusError> {
let Some(Input::Gen(height)) = block.block.miner_tx.prefix.inputs.first() else {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputNotOfTypeGen,
)))?
};
Ok(PrePreparedBlock {
block_blob: block.block_blob,
hf_vote: block.hf_vote,
hf_version: block.hf_version,
block_hash: block.block_hash,
pow_hash: calculate_pow_hash(
randomx_vm,
&block.block.serialize_hashable(),
*height,
&block.hf_version,
)?,
miner_tx_weight: block.block.miner_tx.weight(),
block: block.block,
})
}
}
#[derive(Debug)]
pub struct VerifiedBlockInformation {
pub block: Block,
pub hf_vote: HardFork,
pub txs: Vec<Arc<TransactionVerificationData>>,
pub block_hash: [u8; 32],
pub pow_hash: [u8; 32],
pub height: u64,
pub generated_coins: u64,
pub weight: usize,
pub long_term_weight: usize,
pub cumulative_difficulty: u128,
} }
/// A request to verify a block.
pub enum VerifyBlockRequest { pub enum VerifyBlockRequest {
MainChainBatchPrep(Vec<(Block, Vec<Transaction>)>), /// A request to verify a block.
MainChain { MainChain {
block: Block, block: Block,
prepared_txs: Vec<Arc<TransactionVerificationData>>, prepared_txs: HashMap<[u8; 32], TransactionVerificationData>,
txs: Vec<Transaction>,
}, },
MainChainPrepared(PrePreparedBlock, Vec<Arc<TransactionVerificationData>>),
} }
/// A response from a verify block request.
pub enum VerifyBlockResponse { pub enum VerifyBlockResponse {
/// This block is valid.
MainChain(VerifiedBlockInformation), MainChain(VerifiedBlockInformation),
MainChainBatchPrep(
Vec<PrePreparedBlock>,
Vec<Vec<Arc<TransactionVerificationData>>>,
),
} }
// TODO: it is probably a bad idea for this to derive clone, if 2 places (RPC, P2P) receive valid but different blocks /// The block verifier service.
// then they will both get approved but only one should go to main chain. pub struct BlockVerifierService<C, TxV, D> {
#[derive(Clone)] /// The context service.
pub struct BlockVerifierService<C: Clone, TxV: Clone, D> {
context_svc: C, context_svc: C,
/// The tx verifier service.
tx_verifier_svc: TxV, tx_verifier_svc: TxV,
database: D, /// The database.
// Not use yet but will be.
_database: D,
} }
impl<C, TxV, D> BlockVerifierService<C, TxV, D> impl<C, TxV, D> BlockVerifierService<C, TxV, D>
@ -210,7 +123,8 @@ where
D: Database + Clone + Send + Sync + 'static, D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static, D::Future: Send + 'static,
{ {
pub fn new( /// Creates a new block verifier.
pub(crate) fn new(
context_svc: C, context_svc: C,
tx_verifier_svc: TxV, tx_verifier_svc: TxV,
database: D, database: D,
@ -218,7 +132,7 @@ where
BlockVerifierService { BlockVerifierService {
context_svc, context_svc,
tx_verifier_svc, tx_verifier_svc,
database, _database: database,
} }
} }
} }
@ -255,30 +169,14 @@ where
fn call(&mut self, req: VerifyBlockRequest) -> Self::Future { fn call(&mut self, req: VerifyBlockRequest) -> Self::Future {
let context_svc = self.context_svc.clone(); let context_svc = self.context_svc.clone();
let tx_verifier_svc = self.tx_verifier_svc.clone(); let tx_verifier_svc = self.tx_verifier_svc.clone();
let database = self.database.clone();
async move { async move {
match req { match req {
VerifyBlockRequest::MainChain { VerifyBlockRequest::MainChain {
block, block,
prepared_txs, prepared_txs,
txs,
} => { } => {
verify_main_chain_block(block, txs, prepared_txs, context_svc, tx_verifier_svc) verify_main_chain_block(block, prepared_txs, context_svc, tx_verifier_svc).await
.await
}
VerifyBlockRequest::MainChainPrepared(prepped_block, txs) => {
verify_main_chain_block_prepared(
prepped_block,
txs,
context_svc,
tx_verifier_svc,
None,
)
.await
}
VerifyBlockRequest::MainChainBatchPrep(blocks) => {
batch_verify_main_chain_block(blocks, context_svc, database).await
} }
} }
} }
@ -286,188 +184,12 @@ where
} }
} }
async fn batch_verify_main_chain_block<C, D>( /// Verifies a prepared block.
blocks: Vec<(Block, Vec<Transaction>)>, async fn verify_main_chain_block<C, TxV>(
mut context_svc: C, block: Block,
mut database: D, mut txs: HashMap<[u8; 32], TransactionVerificationData>,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static,
{
let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
tracing::debug!("Calculating block hashes.");
let blocks: Vec<PrePreparedBlockExPOW> = rayon_spawn_async(|| {
blocks
.into_iter()
.map(PrePreparedBlockExPOW::new)
.collect::<Result<Vec<_>, _>>()
})
.await?;
let mut timestamps_hfs = Vec::with_capacity(blocks.len());
let mut new_rx_vm = None;
for window in blocks.windows(2) {
if window[0].block_hash != window[1].block.header.previous
|| window[0].height != window[1].height - 1
{
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
if is_randomx_seed_height(window[0].height) {
new_rx_vm = Some((window[0].height, window[0].block_hash));
}
timestamps_hfs.push((window[0].block.header.timestamp, window[0].hf_version))
}
tracing::debug!("getting blockchain context");
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::BatchGetDifficulties(
timestamps_hfs,
))
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
if context.chain_height != blocks[0].height {
Err(ConsensusError::Block(BlockError::MinerTxError(
MinerTxError::InputsHeightIncorrect,
)))?;
}
if context.top_hash != blocks[0].block.header.previous {
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
}
let mut rx_vms = context.rx_vms;
if let Some((new_vm_height, new_vm_seed)) = new_rx_vm {
let new_vm = rayon_spawn_async(move || {
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
})
.await;
context_svc
.ready()
.await?
.call(BlockChainContextRequest::NewRXVM((
new_vm_seed,
new_vm.clone(),
)))
.await
.map_err(Into::<ExtendedConsensusError>::into)?;
rx_vms.insert(new_vm_height, new_vm);
}
let blocks = rayon_spawn_async(move || {
blocks
.into_par_iter()
.zip(difficulties)
.map(|(block, difficultly)| {
let height = block.height;
let block = PrePreparedBlock::new_rx(
block,
rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref),
)?;
check_block_pow(&block.pow_hash, difficultly)?;
Ok(block)
})
.collect::<Result<Vec<_>, ConsensusError>>()
})
.await?;
let txs = batch_setup_txs(
txs.into_iter()
.zip(blocks.iter().map(|block| block.hf_version))
.collect(),
)
.await?;
let mut complete_block_idx = 0;
let mut out_cache = OutputCache::new();
out_cache
.extend_from_block(
blocks
.iter()
.map(|block| &block.block)
.zip(txs.iter().map(Vec::as_slice)),
&mut database,
)
.await?;
for (idx, hf) in blocks
.windows(2)
.enumerate()
.filter(|(_, block)| block[0].hf_version != blocks[1].hf_version)
.map(|(i, block)| (i, &block[0].hf_version))
{
contextual_data::batch_fill_ring_member_info(
txs.iter()
.take(idx + 1)
.skip(complete_block_idx)
.flat_map(|txs| txs.iter()),
hf,
context.re_org_token.clone(),
database.clone(),
Some(&out_cache),
)
.await?;
complete_block_idx = idx + 1;
}
if complete_block_idx != blocks.len() {
contextual_data::batch_fill_ring_member_info(
txs.iter()
.skip(complete_block_idx)
.flat_map(|txs| txs.iter()),
&blocks.last().unwrap().hf_version,
context.re_org_token.clone(),
database.clone(),
Some(&out_cache),
)
.await?;
}
Ok(VerifyBlockResponse::MainChainBatchPrep(blocks, txs))
}
async fn verify_main_chain_block_prepared<C, TxV>(
prepped_block: PrePreparedBlock,
txs: Vec<Arc<TransactionVerificationData>>,
context_svc: C, context_svc: C,
tx_verifier_svc: TxV, tx_verifier_svc: TxV,
context: Option<RawBlockChainContext>,
) -> Result<VerifyBlockResponse, ExtendedConsensusError> ) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where where
C: Service< C: Service<
@ -479,56 +201,66 @@ where
C::Future: Send + 'static, C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>, TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{ {
let context = match context { tracing::debug!("getting blockchain context");
Some(context) => context,
None => {
tracing::debug!("getting blockchain context");
let BlockChainContextResponse::Context(checked_context) = context_svc
.oneshot(BlockChainContextRequest::GetContext)
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone(); let BlockChainContextResponse::Context(checked_context) = context_svc
.oneshot(BlockChainContextRequest::GetContext)
tracing::debug!("got blockchain context: {:?}", context); .await
context .map_err(Into::<ExtendedConsensusError>::into)?
} else {
panic!("Context service returned wrong response!");
}; };
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
// Set up the block and just pass it to [`verify_main_chain_block_prepared`]
let rx_vms = context.rx_vms.clone();
let height = context.chain_height;
let prepped_block = rayon_spawn_async(move || {
PrePreparedBlock::new(block, rx_vms.get(&height).map(AsRef::as_ref))
})
.await?;
tracing::debug!("verifying block: {}", hex::encode(prepped_block.block_hash));
check_block_pow(&prepped_block.pow_hash, context.next_difficulty) check_block_pow(&prepped_block.pow_hash, context.next_difficulty)
.map_err(ConsensusError::Block)?; .map_err(ConsensusError::Block)?;
// Check that the txs included are what we need and that there are not any extra. // Check that the txs included are what we need and that there are not any extra.
// Collecting into a HashSet could hide duplicates but we check Key Images are unique so someone would have to find
// a hash collision to include duplicate txs here. let mut ordered_txs = Vec::with_capacity(txs.len());
let mut tx_hashes = txs.iter().map(|tx| &tx.tx_hash).collect::<HashSet<_>>();
tracing::debug!("Checking we have correct transactions for block.");
for tx_hash in &prepped_block.block.txs { for tx_hash in &prepped_block.block.txs {
if !tx_hashes.remove(tx_hash) { let tx = txs
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); .remove(tx_hash)
} .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?;
} ordered_txs.push(Arc::new(tx));
if !tx_hashes.is_empty() {
return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect);
} }
drop(txs);
tracing::debug!("Verifying transactions for block.");
tx_verifier_svc tx_verifier_svc
.oneshot(VerifyTxRequest::Block { .oneshot(VerifyTxRequest::Prepped {
txs: txs.clone(), txs: ordered_txs.clone(),
current_chain_height: context.chain_height, current_chain_height: context.chain_height,
top_hash: context.top_hash,
time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(), time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(),
hf: context.current_hf, hf: context.current_hf,
re_org_token: context.re_org_token.clone(),
}) })
.await?; .await?;
let block_weight = let block_weight =
prepped_block.miner_tx_weight + txs.iter().map(|tx| tx.tx_weight).sum::<usize>(); prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
let total_fees = txs.iter().map(|tx| tx.fee).sum::<u64>(); let total_fees = ordered_txs.iter().map(|tx| tx.fee).sum::<u64>();
let (hf_vote, generated_coins) = check_block( tracing::debug!("Verifying block header.");
let (_, generated_coins) = check_block(
&prepped_block.block, &prepped_block.block,
total_fees, total_fees,
block_weight, block_weight,
@ -540,68 +272,30 @@ where
Ok(VerifyBlockResponse::MainChain(VerifiedBlockInformation { Ok(VerifyBlockResponse::MainChain(VerifiedBlockInformation {
block_hash: prepped_block.block_hash, block_hash: prepped_block.block_hash,
block: prepped_block.block, block: prepped_block.block,
txs, block_blob: prepped_block.block_blob,
txs: ordered_txs
.into_iter()
.map(|tx| {
// Note: it would be possible for the transaction verification service to hold onto the tx after the call
// if one of txs was invalid and the rest are still in rayon threads.
let tx = Arc::into_inner(tx).expect(
"Transaction verification service should not hold onto valid transactions.",
);
VerifiedTransactionInformation {
tx_blob: tx.tx_blob,
tx_weight: tx.tx_weight,
fee: tx.fee,
tx_hash: tx.tx_hash,
tx: tx.tx,
}
})
.collect(),
pow_hash: prepped_block.pow_hash, pow_hash: prepped_block.pow_hash,
generated_coins, generated_coins,
weight: block_weight, weight: block_weight,
height: context.chain_height, height: context.chain_height,
long_term_weight: context.next_block_long_term_weight(block_weight), long_term_weight: context.next_block_long_term_weight(block_weight),
hf_vote,
cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty, cumulative_difficulty: context.cumulative_difficulty + context.next_difficulty,
})) }))
} }
async fn verify_main_chain_block<C, TxV>(
block: Block,
txs: Vec<Transaction>,
mut prepared_txs: Vec<Arc<TransactionVerificationData>>,
mut context_svc: C,
tx_verifier_svc: TxV,
) -> Result<VerifyBlockResponse, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,
Response = BlockChainContextResponse,
Error = tower::BoxError,
> + Send
+ 'static,
C::Future: Send + 'static,
TxV: Service<VerifyTxRequest, Response = VerifyTxResponse, Error = ExtendedConsensusError>,
{
tracing::debug!("getting blockchain context");
let BlockChainContextResponse::Context(checked_context) = context_svc
.ready()
.await?
.call(BlockChainContextRequest::GetContext)
.await
.map_err(Into::<ExtendedConsensusError>::into)?
else {
panic!("Context service returned wrong response!");
};
let context = checked_context.unchecked_blockchain_context().clone();
tracing::debug!("got blockchain context: {:?}", context);
let rx_vms = context.rx_vms.clone();
let prepped_block = rayon_spawn_async(move || {
let prepped_block_ex_pow = PrePreparedBlockExPOW::new(block)?;
let height = prepped_block_ex_pow.height;
PrePreparedBlock::new_rx(prepped_block_ex_pow, rx_vms.get(&height).map(AsRef::as_ref))
})
.await?;
check_block_pow(&prepped_block.pow_hash, context.cumulative_difficulty)
.map_err(ConsensusError::Block)?;
prepared_txs.append(&mut batch_setup_txs(vec![(txs, context.current_hf)]).await?[0]);
verify_main_chain_block_prepared(
prepped_block,
prepared_txs,
context_svc,
tx_verifier_svc,
Some(context),
)
.await
}

View file

@ -4,32 +4,30 @@
//! This is used during contextual validation, this does not have all the data for contextual validation //! This is used during contextual validation, this does not have all the data for contextual validation
//! (outputs) for that you will need a [`Database`]. //! (outputs) for that you will need a [`Database`].
//! //!
use std::{ use std::{
cmp::min, cmp::min,
collections::HashMap, collections::HashMap,
future::Future, future::Future,
ops::DerefMut,
pin::Pin, pin::Pin,
sync::Arc, sync::Arc,
task::{Context, Poll}, task::{Context, Poll},
}; };
use futures::{ use futures::{channel::oneshot, FutureExt};
lock::{Mutex, OwnedMutexGuard, OwnedMutexLockFuture}, use tokio::sync::mpsc;
FutureExt, use tokio_util::sync::PollSender;
}; use tower::Service;
use tower::{Service, ServiceExt};
use monero_consensus::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork}; use cuprate_consensus_rules::{blocks::ContextToVerifyBlock, current_unix_timestamp, HardFork};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError}; use crate::{Database, ExtendedConsensusError};
pub(crate) mod difficulty; pub(crate) mod difficulty;
pub(crate) mod hardforks; pub(crate) mod hardforks;
pub(crate) mod rx_vms; pub(crate) mod rx_vms;
pub(crate) mod weight; pub(crate) mod weight;
mod task;
mod tokens; mod tokens;
pub use difficulty::DifficultyCacheConfig; pub use difficulty::DifficultyCacheConfig;
@ -40,13 +38,18 @@ pub use weight::BlockWeightsCacheConfig;
const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60;
/// Config for the context service.
pub struct ContextConfig { pub struct ContextConfig {
/// Hard-forks config.
pub hard_fork_cfg: HardForkConfig, pub hard_fork_cfg: HardForkConfig,
/// Difficulty config.
pub difficulty_cfg: DifficultyCacheConfig, pub difficulty_cfg: DifficultyCacheConfig,
/// Block weight config.
pub weights_config: BlockWeightsCacheConfig, pub weights_config: BlockWeightsCacheConfig,
} }
impl ContextConfig { impl ContextConfig {
/// Get the config for main-net.
pub fn main_net() -> ContextConfig { pub fn main_net() -> ContextConfig {
ContextConfig { ContextConfig {
hard_fork_cfg: HardForkConfig::main_net(), hard_fork_cfg: HardForkConfig::main_net(),
@ -55,26 +58,33 @@ impl ContextConfig {
} }
} }
/// Get the config for stage-net.
pub fn stage_net() -> ContextConfig { pub fn stage_net() -> ContextConfig {
ContextConfig { ContextConfig {
hard_fork_cfg: HardForkConfig::stage_net(), hard_fork_cfg: HardForkConfig::stage_net(),
// These 2 have the same config as main-net.
difficulty_cfg: DifficultyCacheConfig::main_net(), difficulty_cfg: DifficultyCacheConfig::main_net(),
weights_config: BlockWeightsCacheConfig::main_net(), weights_config: BlockWeightsCacheConfig::main_net(),
} }
} }
/// Get the config for test-net.
pub fn test_net() -> ContextConfig { pub fn test_net() -> ContextConfig {
ContextConfig { ContextConfig {
hard_fork_cfg: HardForkConfig::test_net(), hard_fork_cfg: HardForkConfig::test_net(),
// These 2 have the same config as main-net.
difficulty_cfg: DifficultyCacheConfig::main_net(), difficulty_cfg: DifficultyCacheConfig::main_net(),
weights_config: BlockWeightsCacheConfig::main_net(), weights_config: BlockWeightsCacheConfig::main_net(),
} }
} }
} }
/// Initialize the blockchain context service.
///
/// This function will request a lot of data from the database so it may take a while.
pub async fn initialize_blockchain_context<D>( pub async fn initialize_blockchain_context<D>(
cfg: ContextConfig, cfg: ContextConfig,
mut database: D, database: D,
) -> Result< ) -> Result<
impl Service< impl Service<
BlockChainContextRequest, BlockChainContextRequest,
@ -93,74 +103,16 @@ where
D: Database + Clone + Send + Sync + 'static, D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static, D::Future: Send + 'static,
{ {
let ContextConfig { let context_task = task::ContextTask::init_context(cfg, database).await?;
difficulty_cfg,
weights_config,
hard_fork_cfg,
} = cfg;
tracing::debug!("Initialising blockchain context"); // TODO: make buffer size configurable.
let (tx, rx) = mpsc::channel(15);
let DatabaseResponse::ChainHeight(chain_height, top_block_hash) = database tokio::spawn(context_task.run(rx));
.ready()
.await?
.call(DatabaseRequest::ChainHeight)
.await?
else {
panic!("Database sent incorrect response!");
};
let DatabaseResponse::GeneratedCoins(already_generated_coins) = database Ok(BlockChainContextService {
.ready() channel: PollSender::new(tx),
.await? })
.call(DatabaseRequest::GeneratedCoins)
.await?
else {
panic!("Database sent incorrect response!");
};
let db = database.clone();
let hardfork_state_handle = tokio::spawn(async move {
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
});
let db = database.clone();
let difficulty_cache_handle = tokio::spawn(async move {
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db).await
});
let db = database.clone();
let weight_cache_handle = tokio::spawn(async move {
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db).await
});
let hardfork_state = hardfork_state_handle.await.unwrap()?;
let current_hf = hardfork_state.current_hardfork();
let db = database.clone();
let rx_seed_handle = tokio::spawn(async move {
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, &current_hf, db).await
});
let context_svc = BlockChainContextService {
internal_blockchain_context: Arc::new(
InternalBlockChainContext {
current_validity_token: ValidityToken::new(),
current_reorg_token: ReOrgToken::new(),
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
weight_cache: weight_cache_handle.await.unwrap()?,
rx_seed_cache: rx_seed_handle.await.unwrap()?,
hardfork_state,
chain_height,
already_generated_coins,
top_block_hash,
}
.into(),
),
lock_state: MutexLockState::Locked,
};
Ok(context_svc)
} }
/// Raw blockchain context, gotten from [`BlockChainContext`]. This data may turn invalid so is not ok to keep /// Raw blockchain context, gotten from [`BlockChainContext`]. This data may turn invalid so is not ok to keep
@ -169,12 +121,14 @@ where
pub struct RawBlockChainContext { pub struct RawBlockChainContext {
/// The current cumulative difficulty. /// The current cumulative difficulty.
pub cumulative_difficulty: u128, pub cumulative_difficulty: u128,
/// A token which is used to signal if a reorg has happened since creating the token. /// RandomX VMs, this maps seeds height to VM. Will definitely contain the VM required to calculate the current blocks
pub re_org_token: ReOrgToken, /// POW hash (if a RX VM is required), may contain more.
pub rx_vms: HashMap<u64, Arc<RandomXVM>>, pub rx_vms: HashMap<u64, Arc<RandomXVM>>,
/// Context to verify a block, as needed by [`cuprate-consensus-rules`]
pub context_to_verify_block: ContextToVerifyBlock, pub context_to_verify_block: ContextToVerifyBlock,
/// The median long term block weight. /// The median long term block weight.
median_long_term_weight: usize, median_long_term_weight: usize,
/// The top blocks timestamp (will be [`None`] if the top block is the genesis).
top_block_timestamp: Option<u64>, top_block_timestamp: Option<u64>,
} }
@ -188,7 +142,7 @@ impl std::ops::Deref for RawBlockChainContext {
impl RawBlockChainContext { impl RawBlockChainContext {
/// Returns the timestamp the should be used when checking locked outputs. /// Returns the timestamp the should be used when checking locked outputs.
/// ///
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time> /// ref: <https://cuprate.github.io/monero-book/consensus_rules/transactions/unlock_time.html#getting-the-current-time>
pub fn current_adjusted_timestamp_for_time_lock(&self) -> u64 { pub fn current_adjusted_timestamp_for_time_lock(&self) -> u64 {
if self.current_hf < HardFork::V13 || self.median_block_timestamp.is_none() { if self.current_hf < HardFork::V13 || self.median_block_timestamp.is_none() {
current_unix_timestamp() current_unix_timestamp()
@ -208,14 +162,7 @@ impl RawBlockChainContext {
} }
} }
pub fn block_blob_size_limit(&self) -> usize { /// Returns the next blocks long term weight from it's block weight.
self.effective_median_weight * 2 - 600
}
pub fn block_weight_limit(&self) -> usize {
self.median_weight_for_block_reward * 2
}
pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize { pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize {
weight::calculate_block_long_term_weight( weight::calculate_block_long_term_weight(
&self.current_hf, &self.current_hf,
@ -259,20 +206,31 @@ impl BlockChainContext {
} }
} }
/// Data needed from a new block to add it to the context cache.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct UpdateBlockchainCacheData { pub struct NewBlockData {
pub new_top_hash: [u8; 32], /// The blocks hash.
pub block_hash: [u8; 32],
/// The blocks height.
pub height: u64, pub height: u64,
/// The blocks timestamp.
pub timestamp: u64, pub timestamp: u64,
/// The blocks weight.
pub weight: usize, pub weight: usize,
/// long term weight of this block.
pub long_term_weight: usize, pub long_term_weight: usize,
/// The coins generated by this block.
pub generated_coins: u64, pub generated_coins: u64,
/// The blocks hf vote.
pub vote: HardFork, pub vote: HardFork,
/// The cumulative difficulty of the chain.
pub cumulative_difficulty: u128, pub cumulative_difficulty: u128,
} }
/// A request to the blockchain context cache.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum BlockChainContextRequest { pub enum BlockChainContextRequest {
/// Get the current blockchain context.
GetContext, GetContext,
/// Get the next difficulties for these blocks. /// Get the next difficulties for these blocks.
/// ///
@ -280,49 +238,30 @@ pub enum BlockChainContextRequest {
/// ///
/// The number of difficulties returned will be one more than the number of timestamps/ hfs. /// The number of difficulties returned will be one more than the number of timestamps/ hfs.
BatchGetDifficulties(Vec<(u64, HardFork)>), BatchGetDifficulties(Vec<(u64, HardFork)>),
/// Add a VM that has been created outside of the blockchain context service to the blockchain context.
/// This is useful when batch calculating POW as you may need to create a new VM if you batch a lot of blocks together,
/// it would be wasteful to then not give this VM to the context service to then use when it needs to init a VM with the same
/// seed.
///
/// This should include the seed used to init this VM and the VM.
NewRXVM(([u8; 32], Arc<RandomXVM>)), NewRXVM(([u8; 32], Arc<RandomXVM>)),
Update(UpdateBlockchainCacheData), /// A request to add a new block to the cache.
Update(NewBlockData),
} }
pub enum BlockChainContextResponse { pub enum BlockChainContextResponse {
/// Blockchain context response.
Context(BlockChainContext), Context(BlockChainContext),
/// A list of difficulties.
BatchDifficulties(Vec<u128>), BatchDifficulties(Vec<u128>),
/// Ok response.
Ok, Ok,
} }
struct InternalBlockChainContext {
/// A token used to invalidate previous contexts when a new
/// block is added to the chain.
current_validity_token: ValidityToken,
/// A token which is used to signal a reorg has happened.
current_reorg_token: ReOrgToken,
difficulty_cache: difficulty::DifficultyCache, /// The blockchain context service.
weight_cache: weight::BlockWeightsCache, #[derive(Clone)]
rx_seed_cache: rx_vms::RandomXVMCache,
hardfork_state: hardforks::HardForkState,
chain_height: u64,
top_block_hash: [u8; 32],
already_generated_coins: u64,
}
enum MutexLockState {
Locked,
Acquiring(OwnedMutexLockFuture<InternalBlockChainContext>),
Acquired(OwnedMutexGuard<InternalBlockChainContext>),
}
pub struct BlockChainContextService { pub struct BlockChainContextService {
internal_blockchain_context: Arc<Mutex<InternalBlockChainContext>>, channel: PollSender<task::ContextTaskRequest>,
lock_state: MutexLockState,
}
impl Clone for BlockChainContextService {
fn clone(&self) -> Self {
BlockChainContextService {
internal_blockchain_context: self.internal_blockchain_context.clone(),
lock_state: MutexLockState::Locked,
}
}
} }
impl Service<BlockChainContextRequest> for BlockChainContextService { impl Service<BlockChainContextRequest> for BlockChainContextService {
@ -332,111 +271,25 @@ impl Service<BlockChainContextRequest> for BlockChainContextService {
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
loop { self.channel
match &mut self.lock_state { .poll_reserve(cx)
MutexLockState::Locked => { .map_err(|_| "Context service channel closed".into())
self.lock_state = MutexLockState::Acquiring(
Arc::clone(&self.internal_blockchain_context).lock_owned(),
)
}
MutexLockState::Acquiring(lock) => {
self.lock_state = MutexLockState::Acquired(futures::ready!(lock.poll_unpin(cx)))
}
MutexLockState::Acquired(_) => return Poll::Ready(Ok(())),
}
}
} }
fn call(&mut self, req: BlockChainContextRequest) -> Self::Future { fn call(&mut self, req: BlockChainContextRequest) -> Self::Future {
let MutexLockState::Acquired(mut internal_blockchain_context) = let (tx, rx) = oneshot::channel();
std::mem::replace(&mut self.lock_state, MutexLockState::Locked)
else { let req = task::ContextTaskRequest {
panic!("poll_ready() was not called first!") req,
tx,
span: tracing::Span::current(),
}; };
let res = self.channel.send_item(req);
async move { async move {
let InternalBlockChainContext { res.map_err(|_| "Context service closed.")?;
current_validity_token, rx.await.expect("Oneshot closed without response!")
current_reorg_token,
difficulty_cache,
weight_cache,
rx_seed_cache,
hardfork_state,
chain_height,
top_block_hash,
already_generated_coins,
} = internal_blockchain_context.deref_mut();
let res = match req {
BlockChainContextRequest::GetContext => {
let current_hf = hardfork_state.current_hardfork();
BlockChainContextResponse::Context(BlockChainContext {
validity_token: current_validity_token.clone(),
raw: RawBlockChainContext {
context_to_verify_block: ContextToVerifyBlock {
median_weight_for_block_reward: weight_cache
.median_for_block_reward(&current_hf),
effective_median_weight: weight_cache
.effective_median_block_weight(&current_hf),
top_hash: *top_block_hash,
median_block_timestamp: difficulty_cache.median_timestamp(
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
),
chain_height: *chain_height,
current_hf,
next_difficulty: difficulty_cache.next_difficulty(&current_hf),
already_generated_coins: *already_generated_coins,
},
rx_vms: rx_seed_cache.get_vms(),
cumulative_difficulty: difficulty_cache.cumulative_difficulty(),
median_long_term_weight: weight_cache.median_long_term_weight(),
top_block_timestamp: difficulty_cache.top_block_timestamp(),
re_org_token: current_reorg_token.clone(),
},
})
}
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
let next_diffs = difficulty_cache
.next_difficulties(blocks, &hardfork_state.current_hardfork());
BlockChainContextResponse::BatchDifficulties(next_diffs)
}
BlockChainContextRequest::NewRXVM(vm) => {
rx_seed_cache.add_vm(vm);
BlockChainContextResponse::Ok
}
BlockChainContextRequest::Update(new) => {
// Cancel the validity token and replace it with a new one.
std::mem::replace(current_validity_token, ValidityToken::new())
.set_data_invalid();
difficulty_cache.new_block(
new.height,
new.timestamp,
new.cumulative_difficulty,
);
weight_cache.new_block(new.height, new.weight, new.long_term_weight);
hardfork_state.new_block(new.vote, new.height);
rx_seed_cache
.new_block(
new.height,
&new.new_top_hash,
&hardfork_state.current_hardfork(),
)
.await;
*chain_height = new.height + 1;
*top_block_hash = new.new_top_hash;
*already_generated_coins =
already_generated_coins.saturating_add(new.generated_coins);
BlockChainContextResponse::Ok
}
};
Ok(res)
} }
.boxed() .boxed()
} }

View file

@ -1,11 +1,20 @@
//! Difficulty Module
//!
//! This module handles keeping track of the data required to calculate block difficulty.
//! This data is currently the cumulative difficulty of each block and its timestamp.
//!
//! The timestamps are also used in other consensus rules so instead of duplicating the same
//! data in a different cache, the timestamps needed are retrieved from here.
//!
use std::{collections::VecDeque, ops::Range}; use std::{collections::VecDeque, ops::Range};
use tower::ServiceExt; use tower::ServiceExt;
use tracing::instrument; use tracing::instrument;
use cuprate_helper::num::median; use cuprate_helper::num::median;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork}; use crate::{Database, ExtendedConsensusError, HardFork};
/// The amount of blocks we account for to calculate difficulty /// The amount of blocks we account for to calculate difficulty
const DIFFICULTY_WINDOW: usize = 720; const DIFFICULTY_WINDOW: usize = 720;
@ -27,6 +36,10 @@ pub struct DifficultyCacheConfig {
} }
impl DifficultyCacheConfig { impl DifficultyCacheConfig {
/// Create a new difficulty cache config.
///
/// # Notes
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig { pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
DifficultyCacheConfig { window, cut, lag } DifficultyCacheConfig { window, cut, lag }
} }
@ -41,7 +54,9 @@ impl DifficultyCacheConfig {
self.window - 2 * self.cut self.window - 2 * self.cut
} }
pub fn main_net() -> DifficultyCacheConfig { /// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the
/// config for all other current networks.
pub const fn main_net() -> DifficultyCacheConfig {
DifficultyCacheConfig { DifficultyCacheConfig {
window: DIFFICULTY_WINDOW, window: DIFFICULTY_WINDOW,
cut: DIFFICULTY_CUT, cut: DIFFICULTY_CUT,
@ -66,6 +81,7 @@ pub(crate) struct DifficultyCache {
} }
impl DifficultyCache { impl DifficultyCache {
/// Initialize the difficulty cache from the specified chain height.
#[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))] #[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))]
pub async fn init_from_chain_height<D: Database + Clone>( pub async fn init_from_chain_height<D: Database + Clone>(
chain_height: u64, chain_height: u64,
@ -100,13 +116,19 @@ impl DifficultyCache {
Ok(diff) Ok(diff)
} }
/// Add a new block to the difficulty cache.
pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) { pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) {
assert_eq!(self.last_accounted_height + 1, height); assert_eq!(self.last_accounted_height + 1, height);
self.last_accounted_height += 1; self.last_accounted_height += 1;
tracing::debug!(
"Accounting for new blocks timestamp ({timestamp}) and cumulative_difficulty ({cumulative_difficulty})",
);
self.timestamps.push_back(timestamp); self.timestamps.push_back(timestamp);
self.cumulative_difficulties self.cumulative_difficulties
.push_back(cumulative_difficulty); .push_back(cumulative_difficulty);
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() { if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() {
self.timestamps.pop_front(); self.timestamps.pop_front();
self.cumulative_difficulties.pop_front(); self.cumulative_difficulties.pop_front();
@ -117,47 +139,28 @@ impl DifficultyCache {
/// ///
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty
pub fn next_difficulty(&self, hf: &HardFork) -> u128 { pub fn next_difficulty(&self, hf: &HardFork) -> u128 {
if self.timestamps.len() <= 1 { next_difficulty(
return 1; &self.config,
} &self.timestamps,
&self.cumulative_difficulties,
let mut timestamps = self.timestamps.clone(); hf,
if timestamps.len() > self.config.window { )
// remove the lag.
timestamps.drain(self.config.window..);
};
let timestamps_slice = timestamps.make_contiguous();
let (window_start, window_end) = get_window_start_and_end(
timestamps_slice.len(),
self.config.accounted_window_len(),
self.config.window,
);
// We don't sort the whole timestamp list
let mut time_span = u128::from(
*timestamps_slice.select_nth_unstable(window_end - 1).1
- *timestamps_slice.select_nth_unstable(window_start).1,
);
let windowed_work = self.cumulative_difficulties[window_end - 1]
- self.cumulative_difficulties[window_start];
if time_span == 0 {
time_span = 1;
}
// TODO: do checked operations here and unwrap so we don't silently overflow?
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
} }
/// Returns the difficulties for multiple next blocks, using the provided timestamps and hard-forks when needed.
///
/// The first difficulty will be the same as the difficulty from [`DifficultyCache::next_difficulty`] after that the
/// first timestamp and hf will be applied to the cache and the difficulty from that will be added to the list.
///
/// After all timestamps and hfs have been dealt with the cache will be returned back to its original state and the
/// difficulties will be returned.
pub fn next_difficulties( pub fn next_difficulties(
&mut self, &self,
blocks: Vec<(u64, HardFork)>, blocks: Vec<(u64, HardFork)>,
current_hf: &HardFork, current_hf: &HardFork,
) -> Vec<u128> { ) -> Vec<u128> {
let new_timestamps_len = blocks.len(); let mut timestamps = self.timestamps.clone();
let initial_len = self.timestamps.len(); let mut cumulative_difficulties = self.cumulative_difficulties.clone();
let mut difficulties = Vec::with_capacity(blocks.len() + 1); let mut difficulties = Vec::with_capacity(blocks.len() + 1);
@ -166,30 +169,24 @@ impl DifficultyCache {
let mut diff_info_popped = Vec::new(); let mut diff_info_popped = Vec::new();
for (new_timestamp, hf) in blocks { for (new_timestamp, hf) in blocks {
self.timestamps.push_back(new_timestamp); timestamps.push_back(new_timestamp);
self.cumulative_difficulties
.push_back(self.cumulative_difficulty() + *difficulties.last().unwrap()); let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1);
if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() { cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() {
diff_info_popped.push(( diff_info_popped.push((
self.timestamps.pop_front().unwrap(), timestamps.pop_front().unwrap(),
self.cumulative_difficulties.pop_front().unwrap(), cumulative_difficulties.pop_front().unwrap(),
)); ));
} }
difficulties.push(self.next_difficulty(&hf)); difficulties.push(next_difficulty(
} &self.config,
&timestamps,
self.cumulative_difficulties.drain( &cumulative_difficulties,
self.cumulative_difficulties &hf,
.len() ));
.saturating_sub(new_timestamps_len)..,
);
self.timestamps
.drain(self.timestamps.len().saturating_sub(new_timestamps_len)..);
for (timestamp, cum_dif) in diff_info_popped.into_iter().take(initial_len).rev() {
self.timestamps.push_front(timestamp);
self.cumulative_difficulties.push_front(cum_dif);
} }
difficulties difficulties
@ -227,11 +224,55 @@ impl DifficultyCache {
self.cumulative_difficulties.back().copied().unwrap_or(1) self.cumulative_difficulties.back().copied().unwrap_or(1)
} }
/// Returns the top block's timestamp, returns [`None`] if the top block is the genesis block.
pub fn top_block_timestamp(&self) -> Option<u64> { pub fn top_block_timestamp(&self) -> Option<u64> {
self.timestamps.back().copied() self.timestamps.back().copied()
} }
} }
/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties.
fn next_difficulty(
config: &DifficultyCacheConfig,
timestamps: &VecDeque<u64>,
cumulative_difficulties: &VecDeque<u128>,
hf: &HardFork,
) -> u128 {
if timestamps.len() <= 1 {
return 1;
}
let mut timestamps = timestamps.clone();
if timestamps.len() > config.window {
// remove the lag.
timestamps.drain(config.window..);
};
let timestamps_slice = timestamps.make_contiguous();
let (window_start, window_end) = get_window_start_and_end(
timestamps_slice.len(),
config.accounted_window_len(),
config.window,
);
// We don't sort the whole timestamp list
let mut time_span = u128::from(
*timestamps_slice.select_nth_unstable(window_end - 1).1
- *timestamps_slice.select_nth_unstable(window_start).1,
);
let windowed_work =
cumulative_difficulties[window_end - 1] - cumulative_difficulties[window_start];
if time_span == 0 {
time_span = 1;
}
// TODO: do checked operations here and unwrap so we don't silently overflow?
(windowed_work * hf.block_time().as_secs() as u128 + time_span - 1) / time_span
}
/// Get the start and end of the window to calculate difficulty.
fn get_window_start_and_end( fn get_window_start_and_end(
window_len: usize, window_len: usize,
accounted_window: usize, accounted_window: usize,
@ -253,6 +294,7 @@ fn get_window_start_and_end(
} }
} }
/// Returns the timestamps and cumulative difficulty for the blocks with heights in the specified range.
#[instrument(name = "get_blocks_timestamps", skip(database), level = "info")] #[instrument(name = "get_blocks_timestamps", skip(database), level = "info")]
async fn get_blocks_in_pow_info<D: Database + Clone>( async fn get_blocks_in_pow_info<D: Database + Clone>(
database: D, database: D,
@ -260,8 +302,8 @@ async fn get_blocks_in_pow_info<D: Database + Clone>(
) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> { ) -> Result<(VecDeque<u64>, VecDeque<u128>), ExtendedConsensusError> {
tracing::info!("Getting blocks timestamps"); tracing::info!("Getting blocks timestamps");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_header) = database let BCResponse::BlockExtendedHeaderInRange(ext_header) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights)) .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.await? .await?
else { else {
panic!("Database sent incorrect response"); panic!("Database sent incorrect response");

View file

@ -3,11 +3,14 @@ use std::ops::Range;
use tower::ServiceExt; use tower::ServiceExt;
use tracing::instrument; use tracing::instrument;
use monero_consensus::{HFVotes, HFsInfo, HardFork}; use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork};
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError}; use crate::{Database, ExtendedConsensusError};
// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork /// The default amount of hard-fork votes to track to decide on activation of a hard-fork.
///
/// ref: <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week
/// Configuration for hard-forks. /// Configuration for hard-forks.
@ -21,6 +24,7 @@ pub struct HardForkConfig {
} }
impl HardForkConfig { impl HardForkConfig {
/// Config for main-net.
pub const fn main_net() -> HardForkConfig { pub const fn main_net() -> HardForkConfig {
Self { Self {
info: HFsInfo::main_net(), info: HFsInfo::main_net(),
@ -28,6 +32,7 @@ impl HardForkConfig {
} }
} }
/// Config for stage-net.
pub const fn stage_net() -> HardForkConfig { pub const fn stage_net() -> HardForkConfig {
Self { Self {
info: HFsInfo::stage_net(), info: HFsInfo::stage_net(),
@ -35,6 +40,7 @@ impl HardForkConfig {
} }
} }
/// Config for test-net.
pub const fn test_net() -> HardForkConfig { pub const fn test_net() -> HardForkConfig {
Self { Self {
info: HFsInfo::test_net(), info: HFsInfo::test_net(),
@ -46,15 +52,20 @@ impl HardForkConfig {
/// A struct that keeps track of the current hard-fork and current votes. /// A struct that keeps track of the current hard-fork and current votes.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct HardForkState { pub struct HardForkState {
/// The current active hard-fork.
pub(crate) current_hardfork: HardFork, pub(crate) current_hardfork: HardFork,
/// The hard-fork config.
pub(crate) config: HardForkConfig, pub(crate) config: HardForkConfig,
/// The votes in the current window.
pub(crate) votes: HFVotes, pub(crate) votes: HFVotes,
/// The last block height accounted for.
pub(crate) last_height: u64, pub(crate) last_height: u64,
} }
impl HardForkState { impl HardForkState {
/// Initialize the [`HardForkState`] from the specified chain height.
#[instrument(name = "init_hardfork_state", skip(config, database), level = "info")] #[instrument(name = "init_hardfork_state", skip(config, database), level = "info")]
pub async fn init_from_chain_height<D: Database + Clone>( pub async fn init_from_chain_height<D: Database + Clone>(
chain_height: u64, chain_height: u64,
@ -76,16 +87,17 @@ impl HardForkState {
debug_assert_eq!(votes.total_votes(), config.window) debug_assert_eq!(votes.total_votes(), config.window)
} }
let DatabaseResponse::BlockExtendedHeader(ext_header) = database let BCResponse::BlockExtendedHeader(ext_header) = database
.ready() .ready()
.await? .await?
.call(DatabaseRequest::BlockExtendedHeader(chain_height - 1)) .call(BCReadRequest::BlockExtendedHeader(chain_height - 1))
.await? .await?
else { else {
panic!("Database sent incorrect response!"); panic!("Database sent incorrect response!");
}; };
let current_hardfork = ext_header.version; let current_hardfork =
HardFork::from_version(ext_header.version).expect("Stored block has invalid hardfork");
let mut hfs = HardForkState { let mut hfs = HardForkState {
config, config,
@ -105,7 +117,10 @@ impl HardForkState {
Ok(hfs) Ok(hfs)
} }
/// Add a new block to the cache.
pub fn new_block(&mut self, vote: HardFork, height: u64) { pub fn new_block(&mut self, vote: HardFork, height: u64) {
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
// of blocks.
assert_eq!(self.last_height + 1, height); assert_eq!(self.last_height + 1, height);
self.last_height += 1; self.last_height += 1;
@ -115,6 +130,7 @@ impl HardForkState {
vote vote
); );
// This function remove votes outside the window as well.
self.votes.add_vote_for_hf(&vote); self.votes.add_vote_for_hf(&vote);
if height > self.config.window { if height > self.config.window {
@ -136,11 +152,13 @@ impl HardForkState {
); );
} }
/// Returns the current hard-fork.
pub fn current_hardfork(&self) -> HardFork { pub fn current_hardfork(&self) -> HardFork {
self.current_hardfork self.current_hardfork
} }
} }
/// Returns the block votes for blocks in the specified range.
#[instrument(name = "get_votes", skip(database))] #[instrument(name = "get_votes", skip(database))]
async fn get_votes_in_range<D: Database>( async fn get_votes_in_range<D: Database>(
database: D, database: D,
@ -149,15 +167,15 @@ async fn get_votes_in_range<D: Database>(
) -> Result<HFVotes, ExtendedConsensusError> { ) -> Result<HFVotes, ExtendedConsensusError> {
let mut votes = HFVotes::new(window_size); let mut votes = HFVotes::new(window_size);
let DatabaseResponse::BlockExtendedHeaderInRange(vote_list) = database let BCResponse::BlockExtendedHeaderInRange(vote_list) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(block_heights)) .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights))
.await? .await?
else { else {
panic!("Database sent incorrect response!"); panic!("Database sent incorrect response!");
}; };
for hf_info in vote_list.into_iter() { for hf_info in vote_list.into_iter() {
votes.add_vote_for_hf(&hf_info.vote); votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote));
} }
Ok(votes) Ok(votes)

View file

@ -1,3 +1,8 @@
//! RandomX VM Cache
//!
//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially
//! more VMs around this height.
//!
use std::{ use std::{
collections::{HashMap, VecDeque}, collections::{HashMap, VecDeque},
sync::Arc, sync::Arc,
@ -8,26 +13,35 @@ use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
use rayon::prelude::*; use rayon::prelude::*;
use thread_local::ThreadLocal; use thread_local::ThreadLocal;
use tower::ServiceExt; use tower::ServiceExt;
use tracing::instrument;
use cuprate_helper::asynch::rayon_spawn_async; use cuprate_consensus_rules::{
use monero_consensus::{
blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS}, blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS},
HardFork, HardFork,
}; };
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError}; use crate::{Database, ExtendedConsensusError};
/// The amount of randomX VMs to keep in the cache.
const RX_SEEDS_CACHED: usize = 2; const RX_SEEDS_CACHED: usize = 2;
/// A multithreaded randomX VM.
#[derive(Debug)] #[derive(Debug)]
pub struct RandomXVM { pub struct RandomXVM {
/// These RandomX VMs all share the same cache.
vms: ThreadLocal<VMInner>, vms: ThreadLocal<VMInner>,
/// The RandomX cache.
cache: RandomXCache, cache: RandomXCache,
/// The flags used to start the RandomX VMs.
flags: RandomXFlag, flags: RandomXFlag,
} }
impl RandomXVM { impl RandomXVM {
/// Create a new multithreaded randomX VM with the provided seed.
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> { pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
// TODO: allow passing in flags.
let flags = RandomXFlag::get_recommended_flags(); let flags = RandomXFlag::get_recommended_flags();
let cache = RandomXCache::new(flags, seed.as_slice())?; let cache = RandomXCache::new(flags, seed.as_slice())?;
@ -51,15 +65,21 @@ impl RandomX for RandomXVM {
} }
} }
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
/// couple more around this VM.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RandomXVMCache { pub struct RandomXVMCache {
/// The top [`RX_SEEDS_CACHED`] RX seeds.
pub(crate) seeds: VecDeque<(u64, [u8; 32])>, pub(crate) seeds: VecDeque<(u64, [u8; 32])>,
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
pub(crate) vms: HashMap<u64, Arc<RandomXVM>>, pub(crate) vms: HashMap<u64, Arc<RandomXVM>>,
/// A single cached VM that was given to us from a part of Cuprate.
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>, pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
} }
impl RandomXVMCache { impl RandomXVMCache {
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
pub async fn init_from_chain_height<D: Database + Clone>( pub async fn init_from_chain_height<D: Database + Clone>(
chain_height: u64, chain_height: u64,
hf: &HardFork, hf: &HardFork,
@ -68,9 +88,12 @@ impl RandomXVMCache {
let seed_heights = get_last_rx_seed_heights(chain_height - 1, RX_SEEDS_CACHED); let seed_heights = get_last_rx_seed_heights(chain_height - 1, RX_SEEDS_CACHED);
let seed_hashes = get_block_hashes(seed_heights.clone(), database).await?; let seed_hashes = get_block_hashes(seed_heights.clone(), database).await?;
tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",);
let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect(); let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect();
let vms = if hf >= &HardFork::V12 { let vms = if hf >= &HardFork::V12 {
tracing::debug!("Creating RandomX VMs");
let seeds_clone = seeds.clone(); let seeds_clone = seeds.clone();
rayon_spawn_async(move || { rayon_spawn_async(move || {
seeds_clone seeds_clone
@ -85,6 +108,7 @@ impl RandomXVMCache {
}) })
.await .await
} else { } else {
tracing::debug!("We are before hard-fork 12 randomX VMs are not needed.");
HashMap::new() HashMap::new()
}; };
@ -95,18 +119,25 @@ impl RandomXVMCache {
}) })
} }
/// Add a randomX VM to the cache, with the seed it was created with.
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) { pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) {
self.cached_vm.replace(vm); self.cached_vm.replace(vm);
} }
/// Get the RandomX VMs.
pub fn get_vms(&self) -> HashMap<u64, Arc<RandomXVM>> { pub fn get_vms(&self) -> HashMap<u64, Arc<RandomXVM>> {
self.vms.clone() self.vms.clone()
} }
/// Add a new block to the VM cache.
///
/// hash is the block hash not the blocks PoW hash.
pub async fn new_block(&mut self, height: u64, hash: &[u8; 32], hf: &HardFork) { pub async fn new_block(&mut self, height: u64, hash: &[u8; 32], hf: &HardFork) {
let should_make_vms = hf >= &HardFork::V12; let should_make_vms = hf >= &HardFork::V12;
if should_make_vms && self.vms.len() != self.seeds.len() { if should_make_vms && self.vms.len() != self.seeds.len() {
// this will only happen when syncing and rx activates. // this will only happen when syncing and rx activates.
tracing::debug!("RandomX has activated, initialising VMs");
let seeds_clone = self.seeds.clone(); let seeds_clone = self.seeds.clone();
self.vms = rayon_spawn_async(move || { self.vms = rayon_spawn_async(move || {
seeds_clone seeds_clone
@ -123,12 +154,21 @@ impl RandomXVMCache {
} }
if is_randomx_seed_height(height) { if is_randomx_seed_height(height) {
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
self.seeds.push_front((height, *hash)); self.seeds.push_front((height, *hash));
if should_make_vms { if should_make_vms {
let new_vm = 'new_vm_block: { let new_vm = 'new_vm_block: {
tracing::debug!(
"Past hard-fork 12 initializing VM for seed: {}",
hex::encode(hash)
);
// Check if we have been given the RX VM from another part of Cuprate.
if let Some((cached_hash, cached_vm)) = self.cached_vm.take() { if let Some((cached_hash, cached_vm)) = self.cached_vm.take() {
if &cached_hash == hash { if &cached_hash == hash {
tracing::debug!("VM was already created.");
break 'new_vm_block cached_vm; break 'new_vm_block cached_vm;
} }
}; };
@ -153,6 +193,8 @@ impl RandomXVMCache {
} }
} }
/// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block
/// in the chain as VMs include some lag before a seed activates.
pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec<u64> { pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec<u64> {
let mut seeds = Vec::with_capacity(amount); let mut seeds = Vec::with_capacity(amount);
if is_randomx_seed_height(last_height) { if is_randomx_seed_height(last_height) {
@ -174,6 +216,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize)
seeds seeds
} }
/// Gets the block hashes for the heights specified.
async fn get_block_hashes<D: Database + Clone>( async fn get_block_hashes<D: Database + Clone>(
heights: Vec<u64>, heights: Vec<u64>,
database: D, database: D,
@ -183,10 +226,8 @@ async fn get_block_hashes<D: Database + Clone>(
for height in heights { for height in heights {
let db = database.clone(); let db = database.clone();
fut.push_back(async move { fut.push_back(async move {
let DatabaseResponse::BlockHash(hash) = db let BCResponse::BlockHash(hash) =
.clone() db.clone().oneshot(BCReadRequest::BlockHash(height)).await?
.oneshot(DatabaseRequest::BlockHash(height))
.await?
else { else {
panic!("Database sent incorrect response!"); panic!("Database sent incorrect response!");
}; };

View file

@ -0,0 +1,233 @@
//! Context Task
//!
//! This module contains the async task that handles keeping track of blockchain context.
//! It holds all the context caches and handles [`tower::Service`] requests.
//!
use futures::channel::oneshot;
use tokio::sync::mpsc;
use tower::ServiceExt;
use tracing::Instrument;
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use super::{
difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest,
BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken,
BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
};
use crate::{Database, ExtendedConsensusError};
/// A request from the context service to the context task.
pub(super) struct ContextTaskRequest {
/// The request.
pub req: BlockChainContextRequest,
/// The response channel.
pub tx: oneshot::Sender<Result<BlockChainContextResponse, tower::BoxError>>,
/// The tracing span of the requester.
pub span: tracing::Span,
}
/// The Context task that keeps the blockchain context and handles requests.
pub struct ContextTask {
/// A token used to invalidate previous contexts when a new
/// block is added to the chain.
current_validity_token: ValidityToken,
/// The difficulty cache.
difficulty_cache: difficulty::DifficultyCache,
/// The weight cache.
weight_cache: weight::BlockWeightsCache,
/// The RX VM cache.
rx_vm_cache: rx_vms::RandomXVMCache,
/// The hard-fork state cache.
hardfork_state: hardforks::HardForkState,
/// The current chain height.
chain_height: u64,
/// The top block hash.
top_block_hash: [u8; 32],
/// The total amount of coins generated.
already_generated_coins: u64,
}
impl ContextTask {
/// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a
/// while to complete.
pub async fn init_context<D>(
cfg: ContextConfig,
mut database: D,
) -> Result<ContextTask, ExtendedConsensusError>
where
D: Database + Clone + Send + Sync + 'static,
D::Future: Send + 'static,
{
let ContextConfig {
difficulty_cfg,
weights_config,
hard_fork_cfg,
} = cfg;
tracing::debug!("Initialising blockchain context");
let BCResponse::ChainHeight(chain_height, top_block_hash) = database
.ready()
.await?
.call(BCReadRequest::ChainHeight)
.await?
else {
panic!("Database sent incorrect response!");
};
let BCResponse::GeneratedCoins(already_generated_coins) = database
.ready()
.await?
.call(BCReadRequest::GeneratedCoins)
.await?
else {
panic!("Database sent incorrect response!");
};
let db = database.clone();
let hardfork_state_handle = tokio::spawn(async move {
hardforks::HardForkState::init_from_chain_height(chain_height, hard_fork_cfg, db).await
});
let db = database.clone();
let difficulty_cache_handle = tokio::spawn(async move {
difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db)
.await
});
let db = database.clone();
let weight_cache_handle = tokio::spawn(async move {
weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db)
.await
});
// Wait for the hardfork state to finish first as we need it to start the randomX VM cache.
let hardfork_state = hardfork_state_handle.await.unwrap()?;
let current_hf = hardfork_state.current_hardfork();
let db = database.clone();
let rx_seed_handle = tokio::spawn(async move {
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, &current_hf, db).await
});
let context_svc = ContextTask {
current_validity_token: ValidityToken::new(),
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
weight_cache: weight_cache_handle.await.unwrap()?,
rx_vm_cache: rx_seed_handle.await.unwrap()?,
hardfork_state,
chain_height,
already_generated_coins,
top_block_hash,
};
Ok(context_svc)
}
/// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`].
pub async fn handle_req(
&mut self,
req: BlockChainContextRequest,
) -> Result<BlockChainContextResponse, tower::BoxError> {
Ok(match req {
BlockChainContextRequest::GetContext => {
tracing::debug!("Getting blockchain context");
let current_hf = self.hardfork_state.current_hardfork();
BlockChainContextResponse::Context(BlockChainContext {
validity_token: self.current_validity_token.clone(),
raw: RawBlockChainContext {
context_to_verify_block: ContextToVerifyBlock {
median_weight_for_block_reward: self
.weight_cache
.median_for_block_reward(&current_hf),
effective_median_weight: self
.weight_cache
.effective_median_block_weight(&current_hf),
top_hash: self.top_block_hash,
median_block_timestamp: self.difficulty_cache.median_timestamp(
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
),
chain_height: self.chain_height,
current_hf,
next_difficulty: self.difficulty_cache.next_difficulty(&current_hf),
already_generated_coins: self.already_generated_coins,
},
rx_vms: self.rx_vm_cache.get_vms(),
cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(),
median_long_term_weight: self.weight_cache.median_long_term_weight(),
top_block_timestamp: self.difficulty_cache.top_block_timestamp(),
},
})
}
BlockChainContextRequest::BatchGetDifficulties(blocks) => {
tracing::debug!("Getting batch difficulties len: {}", blocks.len() + 1);
let next_diffs = self
.difficulty_cache
.next_difficulties(blocks, &self.hardfork_state.current_hardfork());
BlockChainContextResponse::BatchDifficulties(next_diffs)
}
BlockChainContextRequest::NewRXVM(vm) => {
tracing::debug!("Adding randomX VM to cache.");
self.rx_vm_cache.add_vm(vm);
BlockChainContextResponse::Ok
}
BlockChainContextRequest::Update(new) => {
tracing::debug!(
"Updating blockchain cache with new block, height: {}",
new.height
);
// Cancel the validity token and replace it with a new one.
std::mem::replace(&mut self.current_validity_token, ValidityToken::new())
.set_data_invalid();
self.difficulty_cache.new_block(
new.height,
new.timestamp,
new.cumulative_difficulty,
);
self.weight_cache
.new_block(new.height, new.weight, new.long_term_weight);
self.hardfork_state.new_block(new.vote, new.height);
self.rx_vm_cache
.new_block(
new.height,
&new.block_hash,
// We use the current hf and not the hf of the top block as when syncing we need to generate VMs
// on the switch to RX not after it.
&self.hardfork_state.current_hardfork(),
)
.await;
self.chain_height = new.height + 1;
self.top_block_hash = new.block_hash;
self.already_generated_coins = self
.already_generated_coins
.saturating_add(new.generated_coins);
BlockChainContextResponse::Ok
}
})
}
/// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the
/// task will finish.
pub async fn run(mut self, mut rx: mpsc::Receiver<ContextTaskRequest>) {
while let Some(req) = rx.recv().await {
let res = self.handle_req(req.req).instrument(req.span).await;
let _ = req.tx.send(res);
}
tracing::info!("Shutting down blockchain context task.");
}
}

View file

@ -1,3 +1,10 @@
//! Tokens
//!
//! This module contains tokens which keep track of the validity of certain data.
//! Currently, there is 1 token:
//! - [`ValidityToken`]
//!
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
/// A token representing if a piece of data is valid. /// A token representing if a piece of data is valid.
@ -7,39 +14,20 @@ pub struct ValidityToken {
} }
impl ValidityToken { impl ValidityToken {
/// Creates a new [`ValidityToken`]
pub fn new() -> ValidityToken { pub fn new() -> ValidityToken {
ValidityToken { ValidityToken {
token: CancellationToken::new(), token: CancellationToken::new(),
} }
} }
/// Returns `true` if the data is still valid.
pub fn is_data_valid(&self) -> bool { pub fn is_data_valid(&self) -> bool {
!self.token.is_cancelled() !self.token.is_cancelled()
} }
/// Sets the data to invalid.
pub fn set_data_invalid(self) { pub fn set_data_invalid(self) {
self.token.cancel() self.token.cancel()
} }
} }
/// A token representing if a re-org has happened since it's creation.
#[derive(Debug, Clone, Default)]
pub struct ReOrgToken {
token: CancellationToken,
}
impl ReOrgToken {
pub fn new() -> ReOrgToken {
ReOrgToken {
token: CancellationToken::new(),
}
}
pub fn reorg_happened(&self) -> bool {
self.token.is_cancelled()
}
pub fn set_reorg_happened(self) {
self.token.cancel()
}
}

View file

@ -16,12 +16,15 @@ use rayon::prelude::*;
use tower::ServiceExt; use tower::ServiceExt;
use tracing::instrument; use tracing::instrument;
use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5};
use cuprate_helper::{asynch::rayon_spawn_async, num::median}; use cuprate_helper::{asynch::rayon_spawn_async, num::median};
use monero_consensus::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5}; use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError, HardFork}; use crate::{Database, ExtendedConsensusError, HardFork};
/// The short term block weight window.
const SHORT_TERM_WINDOW: u64 = 100; const SHORT_TERM_WINDOW: u64 = 100;
/// The long term block weight window.
const LONG_TERM_WINDOW: u64 = 100000; const LONG_TERM_WINDOW: u64 = 100000;
/// Configuration for the block weight cache. /// Configuration for the block weight cache.
@ -33,6 +36,7 @@ pub struct BlockWeightsCacheConfig {
} }
impl BlockWeightsCacheConfig { impl BlockWeightsCacheConfig {
/// Creates a new [`BlockWeightsCacheConfig`]
pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig { pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig {
BlockWeightsCacheConfig { BlockWeightsCacheConfig {
short_term_window, short_term_window,
@ -40,6 +44,7 @@ impl BlockWeightsCacheConfig {
} }
} }
/// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet).
pub fn main_net() -> BlockWeightsCacheConfig { pub fn main_net() -> BlockWeightsCacheConfig {
BlockWeightsCacheConfig { BlockWeightsCacheConfig {
short_term_window: SHORT_TERM_WINDOW, short_term_window: SHORT_TERM_WINDOW,
@ -55,7 +60,9 @@ impl BlockWeightsCacheConfig {
/// this data it reduces the load on the database. /// this data it reduces the load on the database.
#[derive(Clone)] #[derive(Clone)]
pub struct BlockWeightsCache { pub struct BlockWeightsCache {
/// The short term block weights.
short_term_block_weights: VecDeque<usize>, short_term_block_weights: VecDeque<usize>,
/// The long term block weights.
long_term_weights: VecDeque<usize>, long_term_weights: VecDeque<usize>,
/// The short term block weights sorted so we don't have to sort them every time we need /// The short term block weights sorted so we don't have to sort them every time we need
@ -68,6 +75,7 @@ pub struct BlockWeightsCache {
/// The height of the top block. /// The height of the top block.
tip_height: u64, tip_height: u64,
/// The block weight config.
config: BlockWeightsCacheConfig, config: BlockWeightsCacheConfig,
} }
@ -131,6 +139,7 @@ impl BlockWeightsCache {
long_term_weight long_term_weight
); );
// add the new block to the `long_term_weights` list and the sorted `cached_sorted_long_term_weights` list.
self.long_term_weights.push_back(long_term_weight); self.long_term_weights.push_back(long_term_weight);
match self match self
.cached_sorted_long_term_weights .cached_sorted_long_term_weights
@ -141,6 +150,7 @@ impl BlockWeightsCache {
.insert(idx, long_term_weight), .insert(idx, long_term_weight),
} }
// If the list now has too many entries remove the oldest.
if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window { if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window {
let val = self let val = self
.long_term_weights .long_term_weights
@ -153,6 +163,7 @@ impl BlockWeightsCache {
}; };
} }
// add the block to the short_term_block_weights and the sorted cached_sorted_short_term_weights list.
self.short_term_block_weights.push_back(block_weight); self.short_term_block_weights.push_back(block_weight);
match self match self
.cached_sorted_short_term_weights .cached_sorted_short_term_weights
@ -163,6 +174,7 @@ impl BlockWeightsCache {
.insert(idx, block_weight), .insert(idx, block_weight),
} }
// If there are now too many entries remove the oldest.
if u64::try_from(self.short_term_block_weights.len()).unwrap() if u64::try_from(self.short_term_block_weights.len()).unwrap()
> self.config.short_term_window > self.config.short_term_window
{ {
@ -192,6 +204,7 @@ impl BlockWeightsCache {
median(&self.cached_sorted_long_term_weights) median(&self.cached_sorted_long_term_weights)
} }
/// Returns the median weight over the last [`SHORT_TERM_WINDOW`] blocks, or custom amount of blocks in the config.
pub fn median_short_term_weight(&self) -> usize { pub fn median_short_term_weight(&self) -> usize {
median(&self.cached_sorted_short_term_weights) median(&self.cached_sorted_short_term_weights)
} }
@ -221,6 +234,7 @@ impl BlockWeightsCache {
} }
} }
/// Calculates the effective median with the long term and short term median.
fn calculate_effective_median_block_weight( fn calculate_effective_median_block_weight(
hf: &HardFork, hf: &HardFork,
median_short_term_weight: usize, median_short_term_weight: usize,
@ -247,6 +261,7 @@ fn calculate_effective_median_block_weight(
effective_median.max(penalty_free_zone(hf)) effective_median.max(penalty_free_zone(hf))
} }
/// Calculates a blocks long term weight.
pub fn calculate_block_long_term_weight( pub fn calculate_block_long_term_weight(
hf: &HardFork, hf: &HardFork,
block_weight: usize, block_weight: usize,
@ -270,6 +285,7 @@ pub fn calculate_block_long_term_weight(
min(short_term_constraint, adjusted_block_weight) min(short_term_constraint, adjusted_block_weight)
} }
/// Gets the block weights from the blocks with heights in the range provided.
#[instrument(name = "get_block_weights", skip(database))] #[instrument(name = "get_block_weights", skip(database))]
async fn get_blocks_weight_in_range<D: Database + Clone>( async fn get_blocks_weight_in_range<D: Database + Clone>(
range: Range<u64>, range: Range<u64>,
@ -277,8 +293,8 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
) -> Result<Vec<usize>, ExtendedConsensusError> { ) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block weights."); tracing::info!("getting block weights.");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range)) .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.await? .await?
else { else {
panic!("Database sent incorrect response!") panic!("Database sent incorrect response!")
@ -290,6 +306,7 @@ async fn get_blocks_weight_in_range<D: Database + Clone>(
.collect()) .collect())
} }
/// Gets the block long term weights from the blocks with heights in the range provided.
#[instrument(name = "get_long_term_weights", skip(database), level = "info")] #[instrument(name = "get_long_term_weights", skip(database), level = "info")]
async fn get_long_term_weight_in_range<D: Database + Clone>( async fn get_long_term_weight_in_range<D: Database + Clone>(
range: Range<u64>, range: Range<u64>,
@ -297,8 +314,8 @@ async fn get_long_term_weight_in_range<D: Database + Clone>(
) -> Result<Vec<usize>, ExtendedConsensusError> { ) -> Result<Vec<usize>, ExtendedConsensusError> {
tracing::info!("getting block long term weights."); tracing::info!("getting block long term weights.");
let DatabaseResponse::BlockExtendedHeaderInRange(ext_headers) = database let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database
.oneshot(DatabaseRequest::BlockExtendedHeaderInRange(range)) .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range))
.await? .await?
else { else {
panic!("Database sent incorrect response!") panic!("Database sent incorrect response!")

View file

@ -1,65 +1,59 @@
use std::{ //! Cuprate Consensus
collections::{HashMap, HashSet}, //!
future::Future, //! This crate contains 3 [`tower::Service`]s that implement Monero's consensus rules:
}; //!
//! - [`BlockChainContextService`] Which handles keeping the current state of the blockchain.
use monero_consensus::{transactions::OutputOnChain, ConsensusError, HardFork}; //! - [`BlockVerifierService`] Which handles block verification.
//! - [`TxVerifierService`] Which handles transaction verification.
//!
//! This crate is generic over the database which is implemented as a [`tower::Service`]. To
//! implement a database you need to have a service which accepts [`BCReadRequest`] and responds
//! with [`BCResponse`].
//!
use cuprate_consensus_rules::{ConsensusError, HardFork};
mod batch_verifier; mod batch_verifier;
pub mod block; pub mod block;
pub mod context; pub mod context;
pub mod randomx;
#[cfg(feature = "binaries")]
pub mod rpc;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
pub mod transactions; pub mod transactions;
pub use block::{ pub use block::{BlockVerifierService, VerifyBlockRequest, VerifyBlockResponse};
PrePreparedBlock, VerifiedBlockInformation, VerifyBlockRequest, VerifyBlockResponse,
};
pub use context::{ pub use context::{
initialize_blockchain_context, BlockChainContext, BlockChainContextRequest, initialize_blockchain_context, BlockChainContext, BlockChainContextRequest,
BlockChainContextResponse, ContextConfig, BlockChainContextResponse, BlockChainContextService, ContextConfig,
}; };
pub use transactions::{VerifyTxRequest, VerifyTxResponse}; pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse};
// re-export.
pub use cuprate_types::blockchain::{BCReadRequest, BCResponse};
/// An Error returned from one of the consensus services.
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum ExtendedConsensusError { pub enum ExtendedConsensusError {
/// A consensus error.
#[error("{0}")] #[error("{0}")]
ConErr(#[from] monero_consensus::ConsensusError), ConErr(#[from] ConsensusError),
/// A database error.
#[error("Database error: {0}")] #[error("Database error: {0}")]
DBErr(#[from] tower::BoxError), DBErr(#[from] tower::BoxError),
/// The transactions passed in with this block were not the ones needed.
#[error("The transactions passed in with the block are incorrect.")] #[error("The transactions passed in with the block are incorrect.")]
TxsIncludedWithBlockIncorrect, TxsIncludedWithBlockIncorrect,
/// One or more statements in the batch verifier was invalid.
#[error("One or more statements in the batch verifier was invalid.")]
OneOrMoreBatchVerificationStatementsInvalid,
} }
// TODO: instead of (ab)using generic returns return the acc type /// Initialize the 2 verifier [`tower::Service`]s (block and transaction).
pub async fn initialize_verifier<D, Ctx>( pub async fn initialize_verifier<D, Ctx>(
database: D, database: D,
ctx_svc: Ctx, ctx_svc: Ctx,
) -> Result< ) -> Result<
( (
impl tower::Service< BlockVerifierService<Ctx, TxVerifierService<D>, D>,
VerifyBlockRequest, TxVerifierService<D>,
Response = VerifyBlockResponse,
Error = ExtendedConsensusError,
Future = impl Future<Output = Result<VerifyBlockResponse, ExtendedConsensusError>>
+ Send
+ 'static,
> + Clone
+ Send
+ 'static,
impl tower::Service<
VerifyTxRequest,
Response = VerifyTxResponse,
Error = ExtendedConsensusError,
Future = impl Future<Output = Result<VerifyTxResponse, ExtendedConsensusError>>
+ Send
+ 'static,
> + Clone
+ Send
+ 'static,
), ),
ConsensusError, ConsensusError,
> >
@ -76,73 +70,41 @@ where
+ 'static, + 'static,
Ctx::Future: Send + 'static, Ctx::Future: Send + 'static,
{ {
let tx_svc = transactions::TxVerifierService::new(database.clone()); let tx_svc = TxVerifierService::new(database.clone());
let block_svc = block::BlockVerifierService::new(ctx_svc, tx_svc.clone(), database); let block_svc = BlockVerifierService::new(ctx_svc, tx_svc.clone(), database);
Ok((block_svc, tx_svc)) Ok((block_svc, tx_svc))
} }
pub trait Database: use __private::Database;
tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
{ pub mod __private {
} use std::future::Future;
impl<T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>> use cuprate_types::blockchain::{BCReadRequest, BCResponse};
Database for T
{ /// A type alias trait used to represent a database, so we don't have to write [`tower::Service`] bounds
} /// everywhere.
///
#[derive(Debug, Copy, Clone)] /// Automatically implemented for:
pub struct ExtendedBlockHeader { /// ```ignore
pub version: HardFork, /// tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>
pub vote: HardFork, /// ```
pub trait Database:
pub timestamp: u64, tower::Service<
pub cumulative_difficulty: u128, BCReadRequest,
Response = BCResponse,
pub block_weight: usize, Error = tower::BoxError,
pub long_term_weight: usize, Future = Self::Future2,
} >
{
#[derive(Debug, Clone)] type Future2: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static;
pub enum DatabaseRequest { }
BlockExtendedHeader(u64),
BlockHash(u64), impl<T: tower::Service<BCReadRequest, Response = BCResponse, Error = tower::BoxError>>
crate::Database for T
BlockExtendedHeaderInRange(std::ops::Range<u64>), where
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
ChainHeight, {
GeneratedCoins, type Future2 = T::Future;
}
Outputs(HashMap<u64, HashSet<u64>>),
NumberOutputsWithAmount(Vec<u64>),
CheckKIsNotSpent(HashSet<[u8; 32]>),
#[cfg(feature = "binaries")]
BlockBatchInRange(std::ops::Range<u64>),
}
#[derive(Debug)]
pub enum DatabaseResponse {
BlockExtendedHeader(ExtendedBlockHeader),
BlockHash([u8; 32]),
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
ChainHeight(u64, [u8; 32]),
GeneratedCoins(u64),
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
NumberOutputsWithAmount(HashMap<u64, usize>),
/// returns true if key images are spent
CheckKIsNotSpent(bool),
#[cfg(feature = "binaries")]
BlockBatchInRange(
Vec<(
monero_serai::block::Block,
Vec<monero_serai::transaction::Transaction>,
)>,
),
} }

View file

@ -1,35 +0,0 @@
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
use thread_local::ThreadLocal;
use monero_consensus::blocks::RandomX;
pub struct RandomXVM {
vms: ThreadLocal<VMInner>,
cache: RandomXCache,
flags: RandomXFlag,
}
impl RandomXVM {
pub fn new(seed: [u8; 32]) -> Result<Self, RandomXError> {
let flags = RandomXFlag::get_recommended_flags();
let cache = RandomXCache::new(flags, &seed)?;
Ok(RandomXVM {
vms: ThreadLocal::new(),
cache,
flags,
})
}
}
impl RandomX for RandomXVM {
type Error = RandomXError;
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
self.vms
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))?
.calculate_hash(buf)
.map(|out| out.try_into().unwrap())
}
}

View file

@ -1,288 +0,0 @@
use std::{
cmp::min,
collections::{HashMap, HashSet},
future::Future,
ops::Range,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use futures::{
stream::{FuturesOrdered, FuturesUnordered},
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
};
use tokio::sync::RwLock;
use tower::{balance::p2c::Balance, ServiceExt};
use cuprate_helper::asynch::rayon_spawn_async;
use crate::{DatabaseRequest, DatabaseResponse};
pub mod cache;
mod connection;
mod discover;
use cache::ScanningCache;
const MAX_OUTS_PER_RPC: usize = 5000; // the cap for monerod is 5000
#[derive(Debug, Copy, Clone)]
pub struct RpcConfig {
pub max_blocks_per_node: u64,
pub max_block_headers_per_node: u64,
}
impl RpcConfig {
pub fn block_batch_size(&self) -> u64 {
self.max_blocks_per_node * 3
}
pub fn new(max_blocks_per_node: u64, max_block_headers_per_node: u64) -> RpcConfig {
RpcConfig {
max_block_headers_per_node,
max_blocks_per_node,
}
}
}
#[derive(Clone)]
pub struct Attempts(u64);
impl<Req: Clone, Res, E> tower::retry::Policy<Req, Res, E> for Attempts {
type Future = futures::future::Ready<Self>;
fn retry(&self, _: &Req, result: Result<&Res, &E>) -> Option<Self::Future> {
if result.is_err() {
if self.0 == 0 {
None
} else {
Some(futures::future::ready(Attempts(self.0 - 1)))
}
} else {
None
}
}
fn clone_request(&self, req: &Req) -> Option<Req> {
Some(req.clone())
}
}
pub fn init_rpc_load_balancer(
addresses: Vec<String>,
cache: Arc<RwLock<ScanningCache>>,
config: Arc<std::sync::RwLock<RpcConfig>>,
) -> impl tower::Service<
DatabaseRequest,
Response = DatabaseResponse,
Error = tower::BoxError,
Future = Pin<
Box<dyn Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static>,
>,
> + Clone {
let (rpc_discoverer_tx, rpc_discoverer_rx) = futures::channel::mpsc::channel(0);
let rpc_balance = Balance::new(Box::pin(
rpc_discoverer_rx.map(Result::<_, tower::BoxError>::Ok),
));
let rpc_buffer = tower::buffer::Buffer::new(rpc_balance, 50);
let rpcs = tower::retry::Retry::new(Attempts(10), rpc_buffer);
let discover = discover::RPCDiscover {
initial_list: addresses,
ok_channel: rpc_discoverer_tx,
already_connected: Default::default(),
cache: cache.clone(),
};
tokio::spawn(discover.run());
RpcBalancer {
rpcs,
config,
cache,
}
}
#[derive(Clone)]
pub struct RpcBalancer<T: Clone> {
rpcs: T,
config: Arc<std::sync::RwLock<RpcConfig>>,
cache: Arc<RwLock<ScanningCache>>,
}
impl<T> tower::Service<DatabaseRequest> for RpcBalancer<T>
where
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
+ Clone
+ Send
+ Sync
+ 'static,
T::Future: Send + 'static,
{
type Response = DatabaseResponse;
type Error = tower::BoxError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
let this = self.rpcs.clone();
let config_mutex = self.config.clone();
let config = config_mutex.clone();
let cache = self.cache.clone();
match req {
DatabaseRequest::CheckKIsNotSpent(kis) => async move {
Ok(DatabaseResponse::CheckKIsNotSpent(
cache.read().await.are_kis_spent(kis),
))
}
.boxed(),
DatabaseRequest::GeneratedCoins => async move {
Ok(DatabaseResponse::GeneratedCoins(
cache.read().await.already_generated_coins,
))
}
.boxed(),
DatabaseRequest::NumberOutputsWithAmount(amt) => async move {
Ok(DatabaseResponse::NumberOutputsWithAmount(
cache.read().await.numb_outs(&amt),
))
}
.boxed(),
DatabaseRequest::BlockBatchInRange(range) => {
let resp_to_ret = |resp: DatabaseResponse| {
let DatabaseResponse::BlockBatchInRange(pow_info) = resp else {
panic!("Database sent incorrect response");
};
pow_info
};
split_range_request(
this,
range,
DatabaseRequest::BlockBatchInRange,
DatabaseResponse::BlockBatchInRange,
resp_to_ret,
config.read().unwrap().max_blocks_per_node,
)
.boxed()
}
DatabaseRequest::BlockExtendedHeaderInRange(range) => {
let resp_to_ret = |resp: DatabaseResponse| {
let DatabaseResponse::BlockExtendedHeaderInRange(pow_info) = resp else {
panic!("Database sent incorrect response");
};
pow_info
};
split_range_request(
this,
range,
DatabaseRequest::BlockExtendedHeaderInRange,
DatabaseResponse::BlockExtendedHeaderInRange,
resp_to_ret,
config.read().unwrap().max_block_headers_per_node,
)
.boxed()
}
DatabaseRequest::Outputs(outs) => async move {
let split_outs = rayon_spawn_async(|| {
let mut split_outs: Vec<HashMap<u64, HashSet<u64>>> = Vec::new();
let mut i: usize = 0;
for (amount, ixs) in outs {
if ixs.len() > MAX_OUTS_PER_RPC {
for ii in (0..ixs.len()).step_by(MAX_OUTS_PER_RPC) {
let mut amt_map = HashSet::with_capacity(MAX_OUTS_PER_RPC);
amt_map.extend(ixs.iter().skip(ii).copied().take(MAX_OUTS_PER_RPC));
let mut map = HashMap::new();
map.insert(amount, amt_map);
split_outs.push(map);
i += 1;
}
continue;
}
if let Some(map) = split_outs.get_mut(i.saturating_sub(1)) {
if map.iter().map(|(_, amt_map)| amt_map.len()).sum::<usize>()
+ ixs.len()
< MAX_OUTS_PER_RPC
{
assert!(map.insert(amount, ixs).is_none());
continue;
}
}
let mut map = HashMap::new();
map.insert(amount, ixs);
split_outs.push(map);
i += 1;
}
split_outs
})
.await;
let mut futs = FuturesUnordered::from_iter(
split_outs
.into_iter()
.map(|map| this.clone().oneshot(DatabaseRequest::Outputs(map))),
);
let mut outs = HashMap::new();
while let Some(out_response) = futs.next().await {
let DatabaseResponse::Outputs(out_response) = out_response? else {
panic!("RPC sent incorrect response!");
};
out_response.into_iter().for_each(|(amt, amt_map)| {
outs.entry(amt).or_insert_with(HashMap::new).extend(amt_map)
});
}
Ok(DatabaseResponse::Outputs(outs))
}
.boxed(),
req => this.oneshot(req).boxed(),
}
}
}
fn split_range_request<T, Ret>(
rpc: T,
range: Range<u64>,
req: impl Fn(Range<u64>) -> DatabaseRequest + Send + 'static,
resp: impl FnOnce(Vec<Ret>) -> DatabaseResponse + Send + 'static,
resp_to_ret: impl Fn(DatabaseResponse) -> Vec<Ret> + Copy + Send + 'static,
max_request_per_rpc: u64,
) -> impl Future<Output = Result<DatabaseResponse, tower::BoxError>> + Send + 'static
where
T: tower::Service<DatabaseRequest, Response = DatabaseResponse, Error = tower::BoxError>
+ Clone
+ Send
+ Sync
+ 'static,
T::Future: Send + 'static,
Ret: Send + 'static,
{
let iter = (0..range.clone().count() as u64)
.step_by(max_request_per_rpc as usize)
.map(|i| {
let new_range =
(range.start + i)..(min(range.start + i + max_request_per_rpc, range.end));
rpc.clone().oneshot(req(new_range)).map_ok(resp_to_ret)
});
let fut = FuturesOrdered::from_iter(iter);
let mut res = Vec::with_capacity(range.count());
async move {
for mut rpc_res in fut.try_collect::<Vec<Vec<_>>>().await?.into_iter() {
res.append(&mut rpc_res)
}
Ok(resp(res))
}
}

View file

@ -1,146 +0,0 @@
#![cfg(feature = "binaries")]
use std::{
collections::HashMap,
collections::HashSet,
fmt::{Display, Formatter},
io::{BufWriter, Write},
path::Path,
sync::Arc,
};
use borsh::{BorshDeserialize, BorshSerialize};
use monero_serai::transaction::{Input, Timelock, Transaction};
use tracing_subscriber::fmt::MakeWriter;
use crate::transactions::TransactionVerificationData;
/// A cache which can keep chain state while scanning.
///
/// Because we are using a RPC interface with a node we need to keep track
/// of certain data that the node doesn't hold or give us like the number
/// of outputs at a certain time.
#[derive(Debug, Default, Clone, BorshSerialize, BorshDeserialize)]
pub struct ScanningCache {
// network: u8,
numb_outs: HashMap<u64, usize>,
time_locked_out: HashMap<[u8; 32], u64>,
kis: HashSet<[u8; 32]>,
pub already_generated_coins: u64,
/// The height of the *next* block to scan.
pub height: u64,
}
impl ScanningCache {
pub fn save(&self, file: &Path) -> Result<(), tower::BoxError> {
let file = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(file)?;
let mut writer = BufWriter::new(file.make_writer());
borsh::to_writer(&mut writer, &self)?;
writer.flush()?;
Ok(())
}
pub fn load(file: &Path) -> Result<ScanningCache, tower::BoxError> {
let mut file = std::fs::OpenOptions::new().read(true).open(file)?;
let data: ScanningCache = borsh::from_reader(&mut file)?;
Ok(data)
}
pub fn add_new_block_data(
&mut self,
generated_coins: u64,
miner_tx: &Transaction,
txs: &[Arc<TransactionVerificationData>],
) {
self.add_tx_time_lock(miner_tx.hash(), miner_tx.prefix.timelock);
miner_tx.prefix.outputs.iter().for_each(|out| {
self.add_outs(miner_tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
});
txs.iter().for_each(|tx| {
self.add_tx_time_lock(tx.tx_hash, tx.tx.prefix.timelock);
tx.tx.prefix.outputs.iter().for_each(|out| {
self.add_outs(tx.tx.prefix.version == 2, out.amount.unwrap_or(0), 1)
});
tx.tx.prefix.inputs.iter().for_each(|inp| match inp {
Input::ToKey { key_image, .. } => {
assert!(self.kis.insert(key_image.compress().to_bytes()))
}
_ => unreachable!(),
})
});
self.already_generated_coins = self.already_generated_coins.saturating_add(generated_coins);
self.height += 1;
}
/// Returns true if any kis are included in our spent set.
pub fn are_kis_spent(&self, kis: HashSet<[u8; 32]>) -> bool {
!self.kis.is_disjoint(&kis)
}
pub fn outputs_time_lock(&self, tx: &[u8; 32]) -> Timelock {
let time_lock = self.time_locked_out.get(tx).copied().unwrap_or(0);
match time_lock {
0 => Timelock::None,
block if block < 500_000_000 => Timelock::Block(block as usize),
time => Timelock::Time(time),
}
}
pub fn add_tx_time_lock(&mut self, tx: [u8; 32], time_lock: Timelock) {
match time_lock {
Timelock::None => (),
lock => {
self.time_locked_out.insert(
tx,
match lock {
Timelock::None => unreachable!(),
Timelock::Block(x) => x as u64,
Timelock::Time(x) => x,
},
);
}
}
}
pub fn total_outs(&self) -> usize {
self.numb_outs.values().sum()
}
pub fn numb_outs(&self, amounts: &[u64]) -> HashMap<u64, usize> {
amounts
.iter()
.map(|amount| (*amount, *self.numb_outs.get(amount).unwrap_or(&0)))
.collect()
}
pub fn add_outs(&mut self, is_v2: bool, amount: u64, count: usize) {
let amount = if is_v2 { 0 } else { amount };
if let Some(numb_outs) = self.numb_outs.get_mut(&amount) {
*numb_outs += count;
} else {
self.numb_outs.insert(amount, count);
}
}
}
impl Display for ScanningCache {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let rct_outs = *self.numb_outs(&[0]).get(&0).unwrap();
let total_outs = self.total_outs();
f.debug_struct("Cache")
.field("next_block", &self.height)
.field("rct_outs", &rct_outs)
.field("total_outs", &total_outs)
.finish()
}
}

View file

@ -1,476 +0,0 @@
use std::ops::Deref;
use std::{
collections::{HashMap, HashSet},
ops::Range,
sync::Arc,
task::{Context, Poll},
};
use curve25519_dalek::edwards::CompressedEdwardsY;
use futures::{
channel::{mpsc, oneshot},
StreamExt,
};
use monero_serai::{
block::Block,
rpc::{HttpRpc, Rpc},
transaction::Transaction,
};
use monero_wire::common::TransactionBlobs;
use rayon::prelude::*;
use serde::Deserialize;
use serde_json::json;
use tokio::{
sync::RwLock,
task::JoinHandle,
time::{timeout, Duration},
};
use tower::Service;
use tracing::{instrument, Instrument};
use cuprate_helper::asynch::{rayon_spawn_async, InfallibleOneshotReceiver};
use super::ScanningCache;
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork, OutputOnChain};
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(300);
const OUTPUTS_TIMEOUT: Duration = Duration::from_secs(50);
pub struct RpcConnectionSvc {
pub(crate) address: String,
pub(crate) rpc_task_handle: JoinHandle<()>,
pub(crate) rpc_task_chan: mpsc::Sender<RpcReq>,
}
impl Service<DatabaseRequest> for RpcConnectionSvc {
type Response = DatabaseResponse;
type Error = tower::BoxError;
type Future = InfallibleOneshotReceiver<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.rpc_task_handle.is_finished() {
return Poll::Ready(Err("RPC task has exited!".into()));
}
self.rpc_task_chan.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, req: DatabaseRequest) -> Self::Future {
let (tx, rx) = oneshot::channel();
let req = RpcReq {
req,
res_chan: tx,
span: tracing::info_span!(parent: &tracing::Span::current(), "rpc", addr = &self.address),
};
self.rpc_task_chan
.try_send(req)
.expect("poll_ready should be called first!");
rx.into()
}
}
pub(crate) struct RpcReq {
req: DatabaseRequest,
res_chan: oneshot::Sender<Result<DatabaseResponse, tower::BoxError>>,
span: tracing::Span,
}
pub struct RpcConnection {
pub(crate) address: String,
pub(crate) con: Rpc<HttpRpc>,
pub(crate) cache: Arc<RwLock<ScanningCache>>,
pub(crate) req_chan: mpsc::Receiver<RpcReq>,
}
impl RpcConnection {
async fn get_block_hash(&self, height: u64) -> Result<[u8; 32], tower::BoxError> {
self.con
.get_block_hash(height.try_into().unwrap())
.await
.map_err(Into::into)
}
async fn get_extended_block_header(
&self,
height: u64,
) -> Result<ExtendedBlockHeader, tower::BoxError> {
tracing::info!("Retrieving block info with height: {}", height);
#[derive(Deserialize, Debug)]
struct Response {
block_header: BlockInfo,
}
let info = {
let res = self
.con
.json_rpc_call::<Response>(
"get_block_header_by_height",
Some(json!({"height": height})),
)
.await?;
res.block_header
};
Ok(ExtendedBlockHeader {
version: HardFork::from_version(info.major_version)
.expect("previously checked block has incorrect version"),
vote: HardFork::from_vote(info.minor_version),
timestamp: info.timestamp,
cumulative_difficulty: u128_from_low_high(
info.cumulative_difficulty,
info.cumulative_difficulty_top64,
),
block_weight: info.block_weight,
long_term_weight: info.long_term_weight,
})
}
async fn get_extended_block_header_in_range(
&self,
range: Range<u64>,
) -> Result<Vec<ExtendedBlockHeader>, tower::BoxError> {
#[derive(Deserialize, Debug)]
struct Response {
headers: Vec<BlockInfo>,
}
let res = self
.con
.json_rpc_call::<Response>(
"get_block_headers_range",
Some(json!({"start_height": range.start, "end_height": range.end - 1})),
)
.await?;
tracing::info!("Retrieved block headers in range: {:?}", range);
Ok(rayon_spawn_async(|| {
res.headers
.into_iter()
.map(|info| ExtendedBlockHeader {
version: HardFork::from_version(info.major_version)
.expect("previously checked block has incorrect version"),
vote: HardFork::from_vote(info.minor_version),
timestamp: info.timestamp,
cumulative_difficulty: u128_from_low_high(
info.cumulative_difficulty,
info.cumulative_difficulty_top64,
),
block_weight: info.block_weight,
long_term_weight: info.long_term_weight,
})
.collect()
})
.await)
}
async fn get_blocks_in_range(
&self,
range: Range<u64>,
) -> Result<Vec<(Block, Vec<Transaction>)>, tower::BoxError> {
tracing::info!("Getting blocks in range: {:?}", range);
mod items {
use monero_wire::common::BlockCompleteEntry;
pub struct Request {
pub heights: Vec<u64>,
}
epee_encoding::epee_object!(
Request,
heights: Vec<u64>,
);
pub struct Response {
pub blocks: Vec<BlockCompleteEntry>,
}
epee_encoding::epee_object!(
Response,
blocks: Vec<BlockCompleteEntry>,
);
}
use items::*;
let res = self
.con
.bin_call(
"get_blocks_by_height.bin",
epee_encoding::to_bytes(Request {
heights: range.collect(),
})?
.to_vec(),
)
.await?;
let address = self.address.clone();
rayon_spawn_async(move || {
let blocks: Response =
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
blocks
.blocks
.into_par_iter()
.map(|b| {
let block = Block::read(&mut b.block.deref())?;
let txs = match b.txs {
TransactionBlobs::Pruned(_) => return Err("node sent pruned txs!".into()),
TransactionBlobs::Normal(txs) => txs
.into_par_iter()
.map(|tx| Transaction::read(&mut tx.deref()))
.collect::<Result<_, _>>()?,
TransactionBlobs::None => vec![],
};
assert_eq!(
block.txs.len(),
txs.len(),
"node: {}, height: {}, node is pruned, which is not supported!",
address,
block.number().unwrap(),
);
Ok((block, txs))
})
.collect::<Result<_, tower::BoxError>>()
})
.await
}
async fn get_outputs(
&self,
out_ids: HashMap<u64, HashSet<u64>>,
) -> Result<HashMap<u64, HashMap<u64, OutputOnChain>>, tower::BoxError> {
tracing::info!(
"Getting outputs len: {}",
out_ids.values().map(|amt_map| amt_map.len()).sum::<usize>()
);
mod items {
#[derive(Copy, Clone)]
pub struct OutputID {
pub amount: u64,
pub index: u64,
}
epee_encoding::epee_object!(
OutputID,
amount: u64,
index: u64,
);
#[derive(Clone)]
pub struct Request {
pub outputs: Vec<OutputID>,
}
epee_encoding::epee_object!(
Request,
outputs: Vec<OutputID>,
);
pub struct OutputRes {
pub height: u64,
pub key: [u8; 32],
pub mask: [u8; 32],
pub txid: [u8; 32],
}
epee_encoding::epee_object!(
OutputRes,
height: u64,
key: [u8; 32],
mask: [u8; 32],
txid: [u8; 32],
);
pub struct Response {
pub outs: Vec<OutputRes>,
}
epee_encoding::epee_object!(
Response,
outs: Vec<OutputRes>,
);
}
use items::*;
let outputs = rayon_spawn_async(|| {
out_ids
.into_iter()
.flat_map(|(amt, amt_map)| {
amt_map
.into_iter()
.map(|amt_idx| OutputID {
amount: amt,
index: amt_idx,
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
})
.await;
let res = self
.con
.bin_call(
"get_outs.bin",
epee_encoding::to_bytes(Request {
outputs: outputs.clone(),
})?
.to_vec(),
)
.await?;
let cache = self.cache.clone().read_owned().await;
let span = tracing::Span::current();
rayon_spawn_async(move || {
let outs: Response =
epee_encoding::from_bytes(&mut epee_encoding::macros::bytes::Bytes::from(res))?;
tracing::info!(parent: &span, "Got outputs len: {}", outs.outs.len());
let mut ret = HashMap::new();
for (out, idx) in outs.outs.into_iter().zip(outputs) {
ret.entry(idx.amount).or_insert_with(HashMap::new).insert(
idx.index,
OutputOnChain {
height: out.height,
time_lock: cache.outputs_time_lock(&out.txid),
// we unwrap these as we are checking already approved rings so if these points are bad
// then a bad proof has been approved.
key: CompressedEdwardsY::from_slice(&out.key)
.unwrap()
.decompress(),
commitment: CompressedEdwardsY::from_slice(&out.mask)
.unwrap()
.decompress()
.unwrap(),
},
);
}
Ok(ret)
})
.await
}
async fn handle_request(
&mut self,
req: DatabaseRequest,
) -> Result<DatabaseResponse, tower::BoxError> {
match req {
DatabaseRequest::BlockHash(height) => {
timeout(DEFAULT_TIMEOUT, self.get_block_hash(height))
.await?
.map(DatabaseResponse::BlockHash)
}
DatabaseRequest::ChainHeight => {
let height = self.cache.read().await.height;
let hash = timeout(DEFAULT_TIMEOUT, self.get_block_hash(height - 1)).await??;
Ok(DatabaseResponse::ChainHeight(height, hash))
}
DatabaseRequest::BlockExtendedHeader(id) => {
timeout(DEFAULT_TIMEOUT, self.get_extended_block_header(id))
.await?
.map(DatabaseResponse::BlockExtendedHeader)
}
DatabaseRequest::BlockExtendedHeaderInRange(range) => timeout(
DEFAULT_TIMEOUT,
self.get_extended_block_header_in_range(range),
)
.await?
.map(DatabaseResponse::BlockExtendedHeaderInRange),
DatabaseRequest::BlockBatchInRange(range) => {
timeout(DEFAULT_TIMEOUT, self.get_blocks_in_range(range))
.await?
.map(DatabaseResponse::BlockBatchInRange)
}
DatabaseRequest::Outputs(out_ids) => {
timeout(OUTPUTS_TIMEOUT, self.get_outputs(out_ids))
.await?
.map(DatabaseResponse::Outputs)
}
DatabaseRequest::NumberOutputsWithAmount(_)
| DatabaseRequest::GeneratedCoins
| DatabaseRequest::CheckKIsNotSpent(_) => {
panic!("Request does not need RPC connection!")
}
}
}
#[instrument(level = "info", skip(self), fields(addr = self.address))]
pub async fn check_rpc_alive(&self) -> Result<(), tower::BoxError> {
tracing::debug!("Checking RPC connection");
let res = timeout(Duration::from_secs(10), self.con.get_height()).await;
let ok = matches!(res, Ok(Ok(_)));
if !ok {
tracing::warn!("RPC connection test failed");
return Err("RPC connection test failed".into());
}
tracing::info!("RPC connection Ok");
Ok(())
}
pub async fn run(mut self) {
while let Some(req) = self.req_chan.next().await {
let RpcReq {
req,
span,
res_chan,
} = req;
let res = self.handle_request(req).instrument(span.clone()).await;
let is_err = res.is_err();
if is_err {
tracing::warn!(parent: &span, "Error from RPC: {:?}", res)
}
let _ = res_chan.send(res);
if is_err && self.check_rpc_alive().await.is_err() {
break;
}
}
tracing::warn!("Shutting down RPC connection: {}", self.address);
self.req_chan.close();
while let Some(req) = self.req_chan.try_next().unwrap() {
let _ = req.res_chan.send(Err("RPC connection closed!".into()));
}
}
}
#[derive(Deserialize, Debug)]
struct BlockInfo {
cumulative_difficulty: u64,
cumulative_difficulty_top64: u64,
timestamp: u64,
block_weight: usize,
long_term_weight: usize,
major_version: u8,
minor_version: u8,
}
fn u128_from_low_high(low: u64, high: u64) -> u128 {
let res: u128 = high as u128;
res << 64 | low as u128
}

View file

@ -1,87 +0,0 @@
use std::{sync::Arc, time::Duration};
use futures::{
channel::mpsc::{self, SendError},
stream::FuturesUnordered,
SinkExt, StreamExt,
};
use monero_serai::rpc::HttpRpc;
use tokio::sync::RwLock;
use tower::{discover::Change, load::PeakEwma};
use tracing::instrument;
use super::{
cache::ScanningCache,
connection::{RpcConnection, RpcConnectionSvc},
};
#[instrument(skip(cache))]
async fn check_rpc(addr: String, cache: Arc<RwLock<ScanningCache>>) -> Option<RpcConnectionSvc> {
tracing::debug!("Sending request to node.");
let con = HttpRpc::with_custom_timeout(addr.clone(), Duration::from_secs(u64::MAX))
.await
.ok()?;
let (tx, rx) = mpsc::channel(0);
let rpc = RpcConnection {
address: addr.clone(),
con,
cache,
req_chan: rx,
};
rpc.check_rpc_alive().await.ok()?;
let handle = tokio::spawn(rpc.run());
Some(RpcConnectionSvc {
address: addr,
rpc_task_chan: tx,
rpc_task_handle: handle,
})
}
pub(crate) struct RPCDiscover {
pub initial_list: Vec<String>,
pub ok_channel: mpsc::Sender<Change<usize, PeakEwma<RpcConnectionSvc>>>,
pub already_connected: usize,
pub cache: Arc<RwLock<ScanningCache>>,
}
impl RPCDiscover {
async fn found_rpc(&mut self, rpc: RpcConnectionSvc) -> Result<(), SendError> {
self.already_connected += 1;
self.ok_channel
.send(Change::Insert(
self.already_connected,
PeakEwma::new(
rpc,
Duration::from_secs(5000),
3000.0,
tower::load::CompleteOnResponse::default(),
),
))
.await?;
Ok(())
}
pub async fn run(mut self) {
if !self.initial_list.is_empty() {
let mut fut = FuturesUnordered::from_iter(
self.initial_list
.drain(..)
.map(|addr| check_rpc(addr, self.cache.clone())),
);
while let Some(res) = fut.next().await {
if let Some(rpc) = res {
if self.found_rpc(rpc).await.is_err() {
tracing::info!("Stopping RPC discover channel closed!");
return;
}
}
}
}
}
}

View file

@ -5,7 +5,7 @@ use tower::ServiceExt;
use crate::{ use crate::{
context::{ context::{
initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse, initialize_blockchain_context, BlockChainContextRequest, BlockChainContextResponse,
ContextConfig, UpdateBlockchainCacheData, ContextConfig, NewBlockData,
}, },
tests::mock_db::*, tests::mock_db::*,
HardFork, HardFork,
@ -52,18 +52,16 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> {
assert!(context.is_still_valid()); assert!(context.is_still_valid());
ctx_svc ctx_svc
.oneshot(BlockChainContextRequest::Update( .oneshot(BlockChainContextRequest::Update(NewBlockData {
UpdateBlockchainCacheData { block_hash: [0; 32],
new_top_hash: [0; 32], height: BLOCKCHAIN_HEIGHT,
height: BLOCKCHAIN_HEIGHT, timestamp: 0,
timestamp: 0, weight: 0,
weight: 0, long_term_weight: 0,
long_term_weight: 0, generated_coins: 0,
generated_coins: 0, vote: HardFork::V1,
vote: HardFork::V1, cumulative_difficulty: 0,
cumulative_difficulty: 0, }))
},
))
.await?; .await?;
assert!(!context.is_still_valid()); assert!(!context.is_still_valid());

View file

@ -1,4 +1,4 @@
use monero_consensus::HardFork; use cuprate_consensus_rules::HardFork;
pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] = pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] =
include!("./data/hfs_2688888_2689608"); include!("./data/hfs_2688888_2689608");

View file

@ -181,7 +181,7 @@ proptest! {
#[test] #[test]
fn claculating_multiple_diffs_does_not_change_state( fn claculating_multiple_diffs_does_not_change_state(
mut diff_cache in random_difficulty_cache(), diff_cache in random_difficulty_cache(),
timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()), timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()),
hf in any::<HardFork>(), hf in any::<HardFork>(),
) { ) {
@ -189,7 +189,7 @@ proptest! {
diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf); diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf);
assert_eq!(diff_cache, cache); prop_assert_eq!(diff_cache, cache);
} }
#[test] #[test]
@ -203,7 +203,7 @@ proptest! {
let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf); let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf);
for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) { for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) {
assert_eq!(diff_cache.next_difficulty(&timestamp.1), diff); prop_assert_eq!(diff_cache.next_difficulty(&timestamp.1), diff);
diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty()); diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty());
} }

View file

@ -1,5 +1,4 @@
use monero_consensus::hard_forks::{HFInfo, HardFork, NUMB_OF_HARD_FORKS}; use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS};
use monero_consensus::HFsInfo;
use crate::{ use crate::{
context::{hardforks::HardForkState, HardForkConfig}, context::{hardforks::HardForkState, HardForkConfig},

View file

@ -3,7 +3,7 @@ use std::collections::VecDeque;
use proptest::prelude::*; use proptest::prelude::*;
use tokio::runtime::Builder; use tokio::runtime::Builder;
use monero_consensus::{ use cuprate_consensus_rules::{
blocks::{is_randomx_seed_height, randomx_seed_height}, blocks::{is_randomx_seed_height, randomx_seed_height},
HardFork, HardFork,
}; };

View file

@ -15,7 +15,12 @@ use proptest::{
use proptest_derive::Arbitrary; use proptest_derive::Arbitrary;
use tower::{BoxError, Service}; use tower::{BoxError, Service};
use crate::{DatabaseRequest, DatabaseResponse, ExtendedBlockHeader, HardFork}; use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader,
};
use crate::HardFork;
prop_compose! { prop_compose! {
/// Generates an arbitrary full [`DummyDatabase`], it is not safe to do consensus checks on the returned database /// Generates an arbitrary full [`DummyDatabase`], it is not safe to do consensus checks on the returned database
@ -56,8 +61,8 @@ pub struct DummyBlockExtendedHeader {
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader { impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
fn from(value: DummyBlockExtendedHeader) -> Self { fn from(value: DummyBlockExtendedHeader) -> Self {
ExtendedBlockHeader { ExtendedBlockHeader {
version: value.version.unwrap_or(HardFork::V1), version: value.version.unwrap_or(HardFork::V1) as u8,
vote: value.vote.unwrap_or(HardFork::V1), vote: value.vote.unwrap_or(HardFork::V1) as u8,
timestamp: value.timestamp.unwrap_or_default(), timestamp: value.timestamp.unwrap_or_default(),
cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(), cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(),
block_weight: value.block_weight.unwrap_or_default(), block_weight: value.block_weight.unwrap_or_default(),
@ -122,8 +127,8 @@ pub struct DummyDatabase {
dummy_height: Option<usize>, dummy_height: Option<usize>,
} }
impl Service<DatabaseRequest> for DummyDatabase { impl Service<BCReadRequest> for DummyDatabase {
type Response = DatabaseResponse; type Response = BCResponse;
type Error = BoxError; type Error = BoxError;
type Future = type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
@ -132,13 +137,13 @@ impl Service<DatabaseRequest> for DummyDatabase {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
fn call(&mut self, req: DatabaseRequest) -> Self::Future { fn call(&mut self, req: BCReadRequest) -> Self::Future {
let blocks = self.blocks.clone(); let blocks = self.blocks.clone();
let dummy_height = self.dummy_height; let dummy_height = self.dummy_height;
async move { async move {
Ok(match req { Ok(match req {
DatabaseRequest::BlockExtendedHeader(id) => { BCReadRequest::BlockExtendedHeader(id) => {
let mut id = usize::try_from(id).unwrap(); let mut id = usize::try_from(id).unwrap();
if let Some(dummy_height) = dummy_height { if let Some(dummy_height) = dummy_height {
let block_len = blocks.read().unwrap().len(); let block_len = blocks.read().unwrap().len();
@ -146,7 +151,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
id -= dummy_height - block_len; id -= dummy_height - block_len;
} }
DatabaseResponse::BlockExtendedHeader( BCResponse::BlockExtendedHeader(
blocks blocks
.read() .read()
.unwrap() .unwrap()
@ -156,12 +161,12 @@ impl Service<DatabaseRequest> for DummyDatabase {
.ok_or("block not in database!")?, .ok_or("block not in database!")?,
) )
} }
DatabaseRequest::BlockHash(id) => { BCReadRequest::BlockHash(id) => {
let mut hash = [0; 32]; let mut hash = [0; 32];
hash[0..8].copy_from_slice(&id.to_le_bytes()); hash[0..8].copy_from_slice(&id.to_le_bytes());
DatabaseResponse::BlockHash(hash) BCResponse::BlockHash(hash)
} }
DatabaseRequest::BlockExtendedHeaderInRange(range) => { BCReadRequest::BlockExtendedHeaderInRange(range) => {
let mut end = usize::try_from(range.end).unwrap(); let mut end = usize::try_from(range.end).unwrap();
let mut start = usize::try_from(range.start).unwrap(); let mut start = usize::try_from(range.start).unwrap();
@ -172,7 +177,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
start -= dummy_height - block_len; start -= dummy_height - block_len;
} }
DatabaseResponse::BlockExtendedHeaderInRange( BCResponse::BlockExtendedHeaderInRange(
blocks blocks
.read() .read()
.unwrap() .unwrap()
@ -184,7 +189,7 @@ impl Service<DatabaseRequest> for DummyDatabase {
.collect(), .collect(),
) )
} }
DatabaseRequest::ChainHeight => { BCReadRequest::ChainHeight => {
let height: u64 = dummy_height let height: u64 = dummy_height
.unwrap_or(blocks.read().unwrap().len()) .unwrap_or(blocks.read().unwrap().len())
.try_into() .try_into()
@ -193,9 +198,9 @@ impl Service<DatabaseRequest> for DummyDatabase {
let mut top_hash = [0; 32]; let mut top_hash = [0; 32];
top_hash[0..8].copy_from_slice(&height.to_le_bytes()); top_hash[0..8].copy_from_slice(&height.to_le_bytes());
DatabaseResponse::ChainHeight(height, top_hash) BCResponse::ChainHeight(height, top_hash)
} }
DatabaseRequest::GeneratedCoins => DatabaseResponse::GeneratedCoins(0), BCReadRequest::GeneratedCoins => BCResponse::GeneratedCoins(0),
_ => unimplemented!("the context svc should not need these requests!"), _ => unimplemented!("the context svc should not need these requests!"),
}) })
} }

View file

@ -1,94 +1,105 @@
//! # Transaction Verifier Service.
//!
//! This module contains the [`TxVerifierService`] which handles consensus validation of transactions.
//!
use std::{ use std::{
collections::HashSet, collections::HashSet,
future::Future, future::Future,
ops::Deref, ops::Deref,
pin::Pin, pin::Pin,
sync::Arc, sync::{Arc, Mutex as StdMutex},
task::{Context, Poll}, task::{Context, Poll},
}; };
use futures::FutureExt; use futures::FutureExt;
use monero_serai::ringct::RctType; use monero_serai::{
use monero_serai::transaction::Transaction; ringct::RctType,
transaction::{Input, Timelock, Transaction},
};
use rayon::prelude::*; use rayon::prelude::*;
use tower::{Service, ServiceExt}; use tower::{Service, ServiceExt};
use tracing::instrument; use tracing::instrument;
use cuprate_helper::asynch::rayon_spawn_async; use cuprate_consensus_rules::{
use monero_consensus::{
transactions::{ transactions::{
check_transaction_contextual, check_transaction_semantic, RingCTError, TransactionError, check_decoy_info, check_transaction_contextual, check_transaction_semantic,
TxRingMembersInfo, output_unlocked, TransactionError,
}, },
ConsensusError, HardFork, TxVersion, ConsensusError, HardFork, TxVersion,
}; };
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_types::blockchain::{BCReadRequest, BCResponse};
use crate::{ use crate::{
batch_verifier::MultiThreadedBatchVerifier, context::ReOrgToken, Database, DatabaseRequest, batch_verifier::MultiThreadedBatchVerifier,
DatabaseResponse, ExtendedConsensusError, transactions::contextual_data::{batch_get_decoy_info, batch_get_ring_member_info},
Database, ExtendedConsensusError,
}; };
pub mod contextual_data; pub mod contextual_data;
mod output_cache;
pub use output_cache::OutputCache; /// A struct representing the type of validation that needs to be completed for this transaction.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum VerificationNeeded {
/// Both semantic validation and contextual validation are needed.
SemanticAndContextual,
/// Only contextual validation is needed.
Contextual,
}
pub async fn batch_setup_txs( /// Represents if a transaction has been fully validated and under what conditions
txs: Vec<(Vec<Transaction>, HardFork)>, /// the transaction is valid in the future.
) -> Result<Vec<Vec<Arc<TransactionVerificationData>>>, ExtendedConsensusError> { #[derive(Copy, Clone, Debug, PartialEq, Eq)]
let batch_verifier = Arc::new(MultiThreadedBatchVerifier::new(rayon::current_num_threads())); pub enum CachedVerificationState {
/// The transaction has not been validated.
NotVerified,
/// The transaction is valid* if the block represented by this hash is in the blockchain and the [`HardFork`]
/// is the same.
///
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
ValidAtHashAndHF([u8; 32], HardFork),
/// The transaction is valid* if the block represented by this hash is in the blockchain _and_ this
/// given time lock is unlocked. The time lock here will represent the youngest used time based lock
/// (If the transaction uses any time based time locks). This is because time locks are not monotonic
/// so unlocked outputs could become re-locked.
///
/// *V1 transactions require checks on their ring-length even if this hash is in the blockchain.
ValidAtHashAndHFWithTimeBasedLock([u8; 32], HardFork, Timelock),
}
// Move out of the async runtime and use rayon to parallelize the serialisation and hashing of the txs. impl CachedVerificationState {
let txs = rayon_spawn_async(move || { /// Returns the block hash this is valid for if in state [`CachedVerificationState::ValidAtHashAndHF`] or [`CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock`].
let txs = txs fn verified_at_block_hash(&self) -> Option<[u8; 32]> {
.into_par_iter() match self {
.map(|(txs, hf)| { CachedVerificationState::NotVerified => None,
txs.into_par_iter() CachedVerificationState::ValidAtHashAndHF(hash, _)
.map(|tx| { | CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, _, _) => Some(*hash),
Ok(Arc::new(TransactionVerificationData::new(
tx,
&hf,
batch_verifier.clone(),
)?))
})
.collect::<Result<Vec<_>, ConsensusError>>()
})
.collect::<Result<Vec<_>, ConsensusError>>()?;
if !Arc::into_inner(batch_verifier).unwrap().verify() {
Err(ConsensusError::Transaction(TransactionError::RingCTError(
RingCTError::BulletproofsRangeInvalid,
)))?
} }
}
Ok::<_, ConsensusError>(txs)
})
.await?;
Ok(txs)
} }
/// Data needed to verify a transaction. /// Data needed to verify a transaction.
///
#[derive(Debug)] #[derive(Debug)]
pub struct TransactionVerificationData { pub struct TransactionVerificationData {
/// The transaction we are verifying
pub tx: Transaction, pub tx: Transaction,
/// The [`TxVersion`] of this tx.
pub version: TxVersion, pub version: TxVersion,
/// The serialised transaction.
pub tx_blob: Vec<u8>, pub tx_blob: Vec<u8>,
/// The weight of the transaction.
pub tx_weight: usize, pub tx_weight: usize,
/// The fee this transaction has paid.
pub fee: u64, pub fee: u64,
/// The hash of this transaction.
pub tx_hash: [u8; 32], pub tx_hash: [u8; 32],
/// We put this behind a mutex as the information is not constant and is based of past outputs idxs /// The verification state of this transaction.
/// which could change on re-orgs. pub cached_verification_state: StdMutex<CachedVerificationState>,
rings_member_info: std::sync::Mutex<Option<(TxRingMembersInfo, ReOrgToken)>>,
} }
impl TransactionVerificationData { impl TransactionVerificationData {
pub fn new( /// Creates a new [`TransactionVerificationData`] from the given [`Transaction`].
tx: Transaction, pub fn new(tx: Transaction) -> Result<TransactionVerificationData, ConsensusError> {
hf: &HardFork,
verifier: Arc<MultiThreadedBatchVerifier>,
) -> Result<TransactionVerificationData, ConsensusError> {
let tx_hash = tx.hash(); let tx_hash = tx.hash();
let tx_blob = tx.serialize(); let tx_blob = tx.serialize();
@ -101,17 +112,12 @@ impl TransactionVerificationData {
_ => tx_blob.len(), _ => tx_blob.len(),
}; };
let fee = verifier.queue_statement(|verifier| {
check_transaction_semantic(&tx, tx_blob.len(), tx_weight, &tx_hash, hf, verifier)
.map_err(ConsensusError::Transaction)
})?;
Ok(TransactionVerificationData { Ok(TransactionVerificationData {
tx_hash, tx_hash,
tx_blob, tx_blob,
tx_weight, tx_weight,
fee, fee: tx.rct_signatures.base.fee,
rings_member_info: std::sync::Mutex::new(None), cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified),
version: TxVersion::from_raw(tx.prefix.version) version: TxVersion::from_raw(tx.prefix.version)
.ok_or(TransactionError::TransactionVersionInvalid)?, .ok_or(TransactionError::TransactionVersionInvalid)?,
tx, tx,
@ -119,24 +125,49 @@ impl TransactionVerificationData {
} }
} }
/// A request to verify a transaction.
pub enum VerifyTxRequest { pub enum VerifyTxRequest {
/// Verifies transactions in the context of a block. /// Verifies a batch of prepared txs.
Block { Prepped {
/// The transactions to verify.
// TODO: Can we use references to remove the Vec? wont play nicely with Service though
txs: Vec<Arc<TransactionVerificationData>>, txs: Vec<Arc<TransactionVerificationData>>,
/// The current chain height.
current_chain_height: u64, current_chain_height: u64,
/// The top block hash.
top_hash: [u8; 32],
/// The value for time to use to check time locked outputs.
time_for_time_lock: u64, time_for_time_lock: u64,
/// The current [`HardFork`]
hf: HardFork,
},
/// Verifies a batch of new txs.
/// Returning [`VerifyTxResponse::OkPrepped`]
New {
/// The transactions to verify.
txs: Vec<Transaction>,
/// The current chain height.
current_chain_height: u64,
/// The top block hash.
top_hash: [u8; 32],
/// The value for time to use to check time locked outputs.
time_for_time_lock: u64,
/// The current [`HardFork`]
hf: HardFork, hf: HardFork,
re_org_token: ReOrgToken,
}, },
} }
/// A response from a verify transaction request.
#[derive(Debug)]
pub enum VerifyTxResponse { pub enum VerifyTxResponse {
BatchSetupOk(Vec<Arc<TransactionVerificationData>>), OkPrepped(Vec<Arc<TransactionVerificationData>>),
Ok, Ok,
} }
/// The transaction verifier service.
#[derive(Clone)] #[derive(Clone)]
pub struct TxVerifierService<D: Clone> { pub struct TxVerifierService<D> {
/// The database.
database: D, database: D,
} }
@ -145,6 +176,7 @@ where
D: Database + Clone + Send + 'static, D: Database + Clone + Send + 'static,
D::Future: Send + 'static, D::Future: Send + 'static,
{ {
/// Creates a new [`TxVerifierService`].
pub fn new(database: D) -> TxVerifierService<D> { pub fn new(database: D) -> TxVerifierService<D> {
TxVerifierService { database } TxVerifierService { database }
} }
@ -169,20 +201,38 @@ where
async move { async move {
match req { match req {
VerifyTxRequest::Block { VerifyTxRequest::New {
txs, txs,
current_chain_height, current_chain_height,
top_hash,
time_for_time_lock, time_for_time_lock,
hf, hf,
re_org_token,
} => { } => {
verify_transactions_for_block( prep_and_verify_transactions(
database, database,
txs, txs,
current_chain_height, current_chain_height,
top_hash,
time_for_time_lock,
hf,
)
.await
}
VerifyTxRequest::Prepped {
txs,
current_chain_height,
top_hash,
time_for_time_lock,
hf,
} => {
verify_prepped_transactions(
database,
&txs,
current_chain_height,
top_hash,
time_for_time_lock, time_for_time_lock,
hf, hf,
re_org_token,
) )
.await .await
} }
@ -192,88 +242,318 @@ where
} }
} }
#[instrument(name = "verify_txs", skip_all, level = "info")] /// Prepares transactions for verification, then verifies them.
async fn verify_transactions_for_block<D>( async fn prep_and_verify_transactions<D>(
database: D, database: D,
txs: Vec<Arc<TransactionVerificationData>>, txs: Vec<Transaction>,
current_chain_height: u64, current_chain_height: u64,
top_hash: [u8; 32],
time_for_time_lock: u64, time_for_time_lock: u64,
hf: HardFork, hf: HardFork,
re_org_token: ReOrgToken,
) -> Result<VerifyTxResponse, ExtendedConsensusError> ) -> Result<VerifyTxResponse, ExtendedConsensusError>
where where
D: Database + Clone + Sync + Send + 'static, D: Database + Clone + Sync + Send + 'static,
{ {
tracing::debug!("Verifying transactions for block, amount: {}", txs.len()); let span = tracing::info_span!("prep_txs", amt = txs.len());
contextual_data::batch_refresh_ring_member_info( tracing::debug!(parent: &span, "prepping transactions for verification.");
&txs, let txs = rayon_spawn_async(|| {
&hf, txs.into_par_iter()
re_org_token, .map(|tx| TransactionVerificationData::new(tx).map(Arc::new))
database.clone(), .collect::<Result<Vec<_>, _>>()
None,
)
.await?;
let spent_kis = Arc::new(std::sync::Mutex::new(HashSet::new()));
let cloned_spent_kis = spent_kis.clone();
rayon_spawn_async(move || {
txs.par_iter().try_for_each(|tx| {
verify_transaction_for_block(
tx,
current_chain_height,
time_for_time_lock,
hf,
cloned_spent_kis.clone(),
)
})
}) })
.await?; .await?;
let DatabaseResponse::CheckKIsNotSpent(kis_spent) = database verify_prepped_transactions(
.oneshot(DatabaseRequest::CheckKIsNotSpent( database,
Arc::into_inner(spent_kis).unwrap().into_inner().unwrap(), &txs,
)) current_chain_height,
top_hash,
time_for_time_lock,
hf,
)
.await?;
Ok(VerifyTxResponse::OkPrepped(txs))
}
#[instrument(name = "verify_txs", skip_all, fields(amt = txs.len()) level = "info")]
async fn verify_prepped_transactions<D>(
mut database: D,
txs: &[Arc<TransactionVerificationData>],
current_chain_height: u64,
top_hash: [u8; 32],
time_for_time_lock: u64,
hf: HardFork,
) -> Result<VerifyTxResponse, ExtendedConsensusError>
where
D: Database + Clone + Sync + Send + 'static,
{
tracing::debug!("Verifying transactions");
tracing::trace!("Checking for duplicate key images");
let mut spent_kis = HashSet::with_capacity(txs.len());
txs.iter().try_for_each(|tx| {
tx.tx.prefix.inputs.iter().try_for_each(|input| {
if let Input::ToKey { key_image, .. } = input {
if !spent_kis.insert(key_image.compress().0) {
tracing::debug!("Duplicate key image found in batch.");
return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent));
}
}
Ok(())
})
})?;
let BCResponse::KeyImagesSpent(kis_spent) = database
.ready()
.await?
.call(BCReadRequest::KeyImagesSpent(spent_kis))
.await? .await?
else { else {
panic!("Database sent incorrect response!"); panic!("Database sent incorrect response!");
}; };
if kis_spent { if kis_spent {
tracing::debug!("One or more key images in batch already spent.");
Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?; Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?;
} }
let mut verified_at_block_hashes = txs
.iter()
.filter_map(|txs| {
txs.cached_verification_state
.lock()
.unwrap()
.verified_at_block_hash()
})
.collect::<HashSet<_>>();
tracing::trace!(
"Verified at hashes len: {}.",
verified_at_block_hashes.len()
);
if !verified_at_block_hashes.is_empty() {
tracing::trace!("Filtering block hashes not in the main chain.");
let BCResponse::FilterUnknownHashes(known_hashes) = database
.ready()
.await?
.call(BCReadRequest::FilterUnknownHashes(verified_at_block_hashes))
.await?
else {
panic!("Database returned wrong response!");
};
verified_at_block_hashes = known_hashes;
}
let (txs_needing_full_verification, txs_needing_partial_verification) =
transactions_needing_verification(
txs,
verified_at_block_hashes,
&hf,
current_chain_height,
time_for_time_lock,
)?;
futures::try_join!(
verify_transactions_decoy_info(txs_needing_partial_verification, hf, database.clone()),
verify_transactions(
txs_needing_full_verification,
current_chain_height,
top_hash,
time_for_time_lock,
hf,
database
)
)?;
Ok(VerifyTxResponse::Ok) Ok(VerifyTxResponse::Ok)
} }
fn verify_transaction_for_block( #[allow(clippy::type_complexity)] // I don't think the return is too complex
tx_verification_data: &TransactionVerificationData, fn transactions_needing_verification(
txs: &[Arc<TransactionVerificationData>],
hashes_in_main_chain: HashSet<[u8; 32]>,
current_hf: &HardFork,
current_chain_height: u64, current_chain_height: u64,
time_for_time_lock: u64, time_for_time_lock: u64,
) -> Result<
(
Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
Vec<Arc<TransactionVerificationData>>,
),
ConsensusError,
> {
// txs needing full validation: semantic and/or contextual
let mut full_validation_transactions = Vec::new();
// txs needing partial _contextual_ validation, not semantic.
let mut partial_validation_transactions = Vec::new();
for tx in txs.iter() {
let guard = tx.cached_verification_state.lock().unwrap();
match guard.deref() {
CachedVerificationState::NotVerified => {
drop(guard);
full_validation_transactions
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
continue;
}
CachedVerificationState::ValidAtHashAndHF(hash, hf) => {
if current_hf != hf {
drop(guard);
full_validation_transactions
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
continue;
}
if !hashes_in_main_chain.contains(hash) {
drop(guard);
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
continue;
}
}
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, hf, lock) => {
if current_hf != hf {
drop(guard);
full_validation_transactions
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
continue;
}
if !hashes_in_main_chain.contains(hash) {
drop(guard);
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
continue;
}
// If the time lock is still locked then the transaction is invalid.
if !output_unlocked(lock, current_chain_height, time_for_time_lock, hf) {
return Err(ConsensusError::Transaction(
TransactionError::OneOrMoreRingMembersLocked,
));
}
}
}
if tx.version == TxVersion::RingSignatures {
drop(guard);
partial_validation_transactions.push(tx.clone());
continue;
}
}
Ok((
full_validation_transactions,
partial_validation_transactions,
))
}
async fn verify_transactions_decoy_info<D>(
txs: Vec<Arc<TransactionVerificationData>>,
hf: HardFork, hf: HardFork,
spent_kis: Arc<std::sync::Mutex<HashSet<[u8; 32]>>>, database: D,
) -> Result<(), ConsensusError> { ) -> Result<(), ExtendedConsensusError>
tracing::debug!( where
"Verifying transaction: {}", D: Database + Clone + Sync + Send + 'static,
hex::encode(tx_verification_data.tx_hash) {
); batch_get_decoy_info(&txs, hf, database)
.await?
let rings_member_info_lock = tx_verification_data.rings_member_info.lock().unwrap(); .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?;
let rings_member_info = match rings_member_info_lock.deref() {
Some(rings_member_info) => rings_member_info, Ok(())
None => panic!("rings_member_info needs to be set to be able to verify!"), }
};
async fn verify_transactions<D>(
check_transaction_contextual( txs: Vec<(Arc<TransactionVerificationData>, VerificationNeeded)>,
&tx_verification_data.tx, current_chain_height: u64,
&rings_member_info.0, top_hash: [u8; 32],
current_chain_height, current_time_lock_timestamp: u64,
time_for_time_lock, hf: HardFork,
&hf, database: D,
spent_kis, ) -> Result<(), ExtendedConsensusError>
)?; where
D: Database + Clone + Sync + Send + 'static,
{
let txs_ring_member_info =
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
rayon_spawn_async(move || {
let batch_veriifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
txs.par_iter()
.zip(txs_ring_member_info.par_iter())
.try_for_each(|((tx, verification_needed), ring)| {
// do semantic validation if needed.
if *verification_needed == VerificationNeeded::SemanticAndContextual {
batch_veriifier.queue_statement(|verifier| {
let fee = check_transaction_semantic(
&tx.tx,
tx.tx_blob.len(),
tx.tx_weight,
&tx.tx_hash,
&hf,
verifier,
)?;
// make sure monero-serai calculated the same fee.
assert_eq!(fee, tx.fee);
Ok(())
})?;
}
// Both variants of `VerificationNeeded` require contextual validation.
check_transaction_contextual(
&tx.tx,
ring,
current_chain_height,
current_time_lock_timestamp,
&hf,
)?;
Ok::<_, ConsensusError>(())
})?;
if !batch_veriifier.verify() {
return Err(ExtendedConsensusError::OneOrMoreBatchVerificationStatementsInvalid);
}
txs.iter()
.zip(txs_ring_member_info)
.for_each(|((tx, _), ring)| {
if ring.time_locked_outs.is_empty() {
*tx.cached_verification_state.lock().unwrap() =
CachedVerificationState::ValidAtHashAndHF(top_hash, hf);
} else {
let youngest_timebased_lock = ring
.time_locked_outs
.iter()
.filter_map(|lock| match lock {
Timelock::Time(time) => Some(*time),
_ => None,
})
.min();
*tx.cached_verification_state.lock().unwrap() =
if let Some(time) = youngest_timebased_lock {
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(
top_hash,
hf,
Timelock::Time(time),
)
} else {
CachedVerificationState::ValidAtHashAndHF(top_hash, hf)
};
}
});
Ok(())
})
.await?;
Ok(()) Ok(())
} }

View file

@ -1,6 +1,6 @@
//! # Contextual Data //! # Contextual Data
//! //!
//! This module contains [`TxRingMembersInfo`] which is a struct made up from blockchain information about the //! This module fills [`TxRingMembersInfo`] which is a struct made up from blockchain information about the
//! ring members of inputs. This module does minimal consensus checks, only when needed, and should not be relied //! ring members of inputs. This module does minimal consensus checks, only when needed, and should not be relied
//! upon to do any. //! upon to do any.
//! //!
@ -10,166 +10,142 @@
//! //!
//! Because this data is unique for *every* transaction and the context service is just for blockchain state data. //! Because this data is unique for *every* transaction and the context service is just for blockchain state data.
//! //!
use std::{ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
ops::Deref,
sync::Arc, sync::Arc,
}; };
use monero_serai::transaction::Input; use monero_serai::transaction::{Input, Timelock};
use tower::ServiceExt; use tower::ServiceExt;
use tracing::instrument;
use monero_consensus::{ use cuprate_consensus_rules::{
transactions::{ transactions::{
get_ring_members_for_inputs, insert_ring_member_ids, DecoyInfo, TxRingMembersInfo, get_absolute_offsets, insert_ring_member_ids, DecoyInfo, Rings, TransactionError,
TxRingMembersInfo,
}, },
ConsensusError, HardFork, ConsensusError, HardFork, TxVersion,
};
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
OutputOnChain,
}; };
use crate::{ use crate::{transactions::TransactionVerificationData, Database, ExtendedConsensusError};
context::ReOrgToken,
transactions::{output_cache::OutputCache, TransactionVerificationData},
Database, DatabaseRequest, DatabaseResponse, ExtendedConsensusError,
};
pub async fn batch_refresh_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>( /// Get the ring members for the inputs from the outputs on the chain.
txs_verification_data: &'a [Arc<TransactionVerificationData>], ///
hf: &HardFork, /// Will error if `outputs` does not contain the outputs needed.
re_org_token: ReOrgToken, fn get_ring_members_for_inputs(
mut database: D, get_outputs: impl Fn(u64, u64) -> Option<OutputOnChain>,
out_cache: Option<&OutputCache<'a>>, inputs: &[Input],
) -> Result<(), ExtendedConsensusError> { ) -> Result<Vec<Vec<OutputOnChain>>, TransactionError> {
let (txs_needing_full_refresh, txs_needing_partial_refresh) = inputs
ring_member_info_needing_refresh(txs_verification_data, hf);
if !txs_needing_full_refresh.is_empty() {
batch_fill_ring_member_info(
txs_needing_full_refresh.iter(),
hf,
re_org_token,
database.clone(),
out_cache,
)
.await?;
}
let unique_input_amounts = txs_needing_partial_refresh
.iter() .iter()
.flat_map(|tx_info| { .map(|inp| match inp {
tx_info Input::ToKey {
.tx amount,
.prefix key_offsets,
.inputs ..
.iter() } => {
.map(|input| match input { let offsets = get_absolute_offsets(key_offsets)?;
Input::ToKey { amount, .. } => amount.unwrap_or(0), Ok(offsets
_ => 0, .iter()
}) .map(|offset| {
.collect::<HashSet<_>>() get_outputs(amount.unwrap_or(0), *offset)
}) .ok_or(TransactionError::RingMemberNotFoundOrInvalid)
.collect::<HashSet<_>>(); })
.collect::<Result<_, TransactionError>>()?)
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database
.ready()
.await?
.call(DatabaseRequest::NumberOutputsWithAmount(
unique_input_amounts.into_iter().collect(),
))
.await?
else {
panic!("Database sent incorrect response!")
};
for tx_v_data in txs_needing_partial_refresh {
let decoy_info = if hf != &HardFork::V1 {
// this data is only needed after hard-fork 1.
Some(
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
.map_err(ConsensusError::Transaction)?,
)
} else {
None
};
// Temporarily acquirer the mutex lock to add the ring member info.
tx_v_data
.rings_member_info
.lock()
.unwrap()
.as_mut()
// this unwrap is safe as otherwise this would require a full refresh not a partial one.
.unwrap()
.0
.decoy_info = decoy_info;
}
Ok(())
}
/// This function returns the transaction verification data that need refreshing.
///
/// The first returned vec needs a full refresh.
/// The second returned vec only needs a partial refresh.
///
/// A full refresh is a refresh of all the ring members and the decoy info.
/// A partial refresh is just a refresh of the decoy info.
fn ring_member_info_needing_refresh(
txs_verification_data: &[Arc<TransactionVerificationData>],
hf: &HardFork,
) -> (
Vec<Arc<TransactionVerificationData>>,
Vec<Arc<TransactionVerificationData>>,
) {
let mut txs_needing_full_refresh = Vec::new();
let mut txs_needing_partial_refresh = Vec::new();
for tx in txs_verification_data {
let tx_ring_member_info = tx.rings_member_info.lock().unwrap();
// if we don't have ring members or if a re-org has happened do a full refresh.
if let Some(tx_ring_member_info) = tx_ring_member_info.deref() {
if tx_ring_member_info.1.reorg_happened() {
txs_needing_full_refresh.push(tx.clone());
continue;
} }
} else { _ => Err(TransactionError::IncorrectInputType),
txs_needing_full_refresh.push(tx.clone()); })
continue; .collect::<Result<_, TransactionError>>()
}
// if any input does not have a 0 amount do a partial refresh, this is because some decoy info
// data is based on the amount of non-ringCT outputs at a certain point.
// Or if a hf has happened as this will change the default minimum decoys.
if &tx_ring_member_info
.as_ref()
.expect("We just checked if this was None")
.0
.hf
!= hf
|| tx.tx.prefix.inputs.iter().any(|inp| match inp {
Input::Gen(_) => false,
Input::ToKey { amount, .. } => amount.is_some(),
})
{
txs_needing_partial_refresh.push(tx.clone());
}
}
(txs_needing_full_refresh, txs_needing_partial_refresh)
} }
/// Fills the `rings_member_info` field on the inputted [`TransactionVerificationData`]. /// Construct a [`TxRingMembersInfo`] struct.
///
/// The used outs must be all the ring members used in the transactions inputs.
pub fn new_ring_member_info(
used_outs: Vec<Vec<OutputOnChain>>,
decoy_info: Option<DecoyInfo>,
tx_version: TxVersion,
) -> Result<TxRingMembersInfo, TransactionError> {
Ok(TxRingMembersInfo {
youngest_used_out_height: used_outs
.iter()
.map(|inp_outs| {
inp_outs
.iter()
// the output with the highest height is the youngest
.map(|out| out.height)
.max()
.expect("Input must have ring members")
})
.max()
.expect("Tx must have inputs"),
time_locked_outs: used_outs
.iter()
.flat_map(|inp_outs| {
inp_outs
.iter()
.filter_map(|out| match out.time_lock {
Timelock::None => None,
lock => Some(lock),
})
.collect::<Vec<_>>()
})
.collect(),
rings: new_rings(used_outs, tx_version)?,
decoy_info,
})
}
/// Builds the [`Rings`] for the transaction inputs, from the given outputs.
fn new_rings(
outputs: Vec<Vec<OutputOnChain>>,
tx_version: TxVersion,
) -> Result<Rings, TransactionError> {
Ok(match tx_version {
TxVersion::RingSignatures => Rings::Legacy(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| out.key.ok_or(TransactionError::RingMemberNotFoundOrInvalid))
.collect::<Result<Vec<_>, TransactionError>>()
})
.collect::<Result<Vec<_>, TransactionError>>()?,
),
TxVersion::RingCT => Rings::RingCT(
outputs
.into_iter()
.map(|inp_outs| {
inp_outs
.into_iter()
.map(|out| {
Ok([
out.key
.ok_or(TransactionError::RingMemberNotFoundOrInvalid)?,
out.commitment,
])
})
.collect::<Result<_, TransactionError>>()
})
.collect::<Result<_, _>>()?,
),
})
}
/// Retrieves the [`TxRingMembersInfo`] for the inputted [`TransactionVerificationData`].
/// ///
/// This function batch gets all the ring members for the inputted transactions and fills in data about /// This function batch gets all the ring members for the inputted transactions and fills in data about
/// them. /// them.
pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync + 'static>( pub async fn batch_get_ring_member_info<D: Database>(
txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone, txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone,
hf: &HardFork, hf: &HardFork,
re_org_token: ReOrgToken,
mut database: D, mut database: D,
out_cache: Option<&OutputCache<'a>>, ) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
) -> Result<(), ExtendedConsensusError> {
let mut output_ids = HashMap::new(); let mut output_ids = HashMap::new();
for tx_v_data in txs_verification_data.clone() { for tx_v_data in txs_verification_data.clone() {
@ -177,19 +153,19 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
.map_err(ConsensusError::Transaction)?; .map_err(ConsensusError::Transaction)?;
} }
let DatabaseResponse::Outputs(outputs) = database let BCResponse::Outputs(outputs) = database
.ready() .ready()
.await? .await?
.call(DatabaseRequest::Outputs(output_ids)) .call(BCReadRequest::Outputs(output_ids))
.await? .await?
else { else {
panic!("Database sent incorrect response!") panic!("Database sent incorrect response!")
}; };
let DatabaseResponse::NumberOutputsWithAmount(outputs_with_amount) = database let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
.ready() .ready()
.await? .await?
.call(DatabaseRequest::NumberOutputsWithAmount( .call(BCReadRequest::NumberOutputsWithAmount(
outputs.keys().copied().collect(), outputs.keys().copied().collect(),
)) ))
.await? .await?
@ -197,38 +173,84 @@ pub async fn batch_fill_ring_member_info<'a, D: Database + Clone + Send + Sync +
panic!("Database sent incorrect response!") panic!("Database sent incorrect response!")
}; };
for tx_v_data in txs_verification_data { Ok(txs_verification_data
let ring_members_for_tx = get_ring_members_for_inputs( .map(move |tx_v_data| {
|amt, idx| { let numb_outputs = |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0);
if let Some(cached_outs) = out_cache {
if let Some(out) = cached_outs.get_out(amt, idx) {
return Some(out);
}
}
outputs.get(&amt)?.get(&idx) let ring_members_for_tx = get_ring_members_for_inputs(
}, |amt, idx| outputs.get(&amt)?.get(&idx).copied(),
&tx_v_data.tx.prefix.inputs, &tx_v_data.tx.prefix.inputs,
)
.map_err(ConsensusError::Transaction)?;
let decoy_info = if hf != &HardFork::V1 {
// this data is only needed after hard-fork 1.
Some(
DecoyInfo::new(&tx_v_data.tx.prefix.inputs, &outputs_with_amount, hf)
.map_err(ConsensusError::Transaction)?,
) )
} else { .map_err(ConsensusError::Transaction)?;
None
};
// Temporarily acquirer the mutex lock to add the ring member info. let decoy_info = if hf != &HardFork::V1 {
let _ = tx_v_data.rings_member_info.lock().unwrap().insert(( // this data is only needed after hard-fork 1.
TxRingMembersInfo::new(ring_members_for_tx, decoy_info, tx_v_data.version, *hf) Some(
.map_err(ConsensusError::Transaction)?, DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf)
re_org_token.clone(), .map_err(ConsensusError::Transaction)?,
)); )
} } else {
None
};
Ok(()) new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version)
.map_err(ConsensusError::Transaction)
})
.collect::<Result<_, _>>()?)
}
/// Refreshes the transactions [`TxRingMembersInfo`], if needed.
///
/// # Panics
/// This functions panics if `hf == HardFork::V1` as decoy info
/// should not be needed for V1.
#[instrument(level = "debug", skip_all)]
pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
txs_verification_data: &'a [Arc<TransactionVerificationData>],
hf: HardFork,
mut database: D,
) -> Result<impl Iterator<Item = Result<DecoyInfo, ConsensusError>> + 'a, ExtendedConsensusError> {
// decoy info is not needed for V1.
assert_ne!(hf, HardFork::V1);
tracing::debug!(
"Retrieving decoy info for {} txs.",
txs_verification_data.len()
);
// Get all the different input amounts.
let unique_input_amounts = txs_verification_data
.iter()
.flat_map(|tx_info| {
tx_info.tx.prefix.inputs.iter().map(|input| match input {
Input::ToKey { amount, .. } => amount.unwrap_or(0),
_ => 0,
})
})
.collect::<HashSet<_>>();
tracing::debug!(
"Getting the amount of outputs with certain amounts for {} amounts",
unique_input_amounts.len()
);
let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database
.ready()
.await?
.call(BCReadRequest::NumberOutputsWithAmount(
unique_input_amounts.into_iter().collect(),
))
.await?
else {
panic!("Database sent incorrect response!")
};
Ok(txs_verification_data.iter().map(move |tx_v_data| {
DecoyInfo::new(
&tx_v_data.tx.prefix.inputs,
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
&hf,
)
.map_err(ConsensusError::Transaction)
}))
} }

View file

@ -1,153 +0,0 @@
use std::{
collections::{BTreeMap, HashMap},
iter::once,
sync::{Arc, OnceLock},
};
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, EdwardsPoint, Scalar,
};
use monero_consensus::{
blocks::BlockError,
miner_tx::MinerTxError,
transactions::{OutputOnChain, TransactionError},
ConsensusError,
};
use monero_serai::{
block::Block,
transaction::{Input, Timelock},
H,
};
use tower::ServiceExt;
use crate::{
transactions::TransactionVerificationData, Database, DatabaseRequest, DatabaseResponse,
ExtendedConsensusError,
};
#[derive(Debug)]
enum CachedAmount<'a> {
Clear(u64),
Commitment(&'a EdwardsPoint),
}
impl<'a> CachedAmount<'a> {
fn get_commitment(&self) -> EdwardsPoint {
match self {
CachedAmount::Commitment(commitment) => **commitment,
// TODO: Setup a table with common amounts.
CachedAmount::Clear(amt) => ED25519_BASEPOINT_POINT + H() * Scalar::from(*amt),
}
}
}
#[derive(Debug)]
struct CachedOutput<'a> {
height: u64,
time_lock: &'a Timelock,
key: &'a CompressedEdwardsY,
amount: CachedAmount<'a>,
cached_created: OnceLock<OutputOnChain>,
}
#[derive(Debug)]
pub struct OutputCache<'a>(HashMap<u64, BTreeMap<u64, CachedOutput<'a>>>);
impl<'a> OutputCache<'a> {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
OutputCache(HashMap::new())
}
pub fn get_out(&self, amt: u64, idx: u64) -> Option<&OutputOnChain> {
let cached_out = self.0.get(&amt)?.get(&idx)?;
Some(cached_out.cached_created.get_or_init(|| OutputOnChain {
height: cached_out.height,
time_lock: *cached_out.time_lock,
key: cached_out.key.decompress(),
commitment: cached_out.amount.get_commitment(),
}))
}
pub async fn extend_from_block<'b: 'a, D: Database>(
&mut self,
blocks: impl Iterator<Item = (&'b Block, &'b [Arc<TransactionVerificationData>])> + 'b,
database: &mut D,
) -> Result<(), ExtendedConsensusError> {
let mut idx_needed = HashMap::new();
for (block, txs) in blocks {
for tx in once(&block.miner_tx).chain(txs.iter().map(|tx| &tx.tx)) {
let is_rct = tx.prefix.version == 2;
let is_miner = matches!(tx.prefix.inputs.as_slice(), &[Input::Gen(_)]);
for (i, out) in tx.prefix.outputs.iter().enumerate() {
let amt = out.amount.unwrap_or(0);
// The amt this output will be stored under.
let amt_table_key = if is_rct { 0 } else { amt };
let amount_commitment = match (is_rct, is_miner) {
(true, false) => CachedAmount::Commitment(
tx.rct_signatures.base.commitments.get(i).ok_or(
ConsensusError::Transaction(TransactionError::NonZeroOutputForV2),
)?,
),
_ => CachedAmount::Clear(amt),
};
let output_to_cache = CachedOutput {
height: block.number().ok_or(ConsensusError::Block(
BlockError::MinerTxError(MinerTxError::InputNotOfTypeGen),
))?,
time_lock: &tx.prefix.timelock,
key: &out.key,
amount: amount_commitment,
cached_created: OnceLock::new(),
};
let Some(amt_table) = self.0.get_mut(&amt_table_key) else {
idx_needed
.entry(amt_table_key)
.or_insert_with(Vec::new)
.push(output_to_cache);
continue;
};
let top_idx = *amt_table.last_key_value().unwrap().0;
amt_table.insert(top_idx + 1, output_to_cache);
}
}
}
if idx_needed.is_empty() {
return Ok(());
}
let DatabaseResponse::NumberOutputsWithAmount(numb_outs) = database
.ready()
.await?
.call(DatabaseRequest::NumberOutputsWithAmount(
idx_needed.keys().copied().collect(),
))
.await?
else {
panic!("Database sent incorrect response!");
};
for (amt_table_key, out) in idx_needed {
let numb_outs = *numb_outs
.get(&amt_table_key)
.expect("DB did not return all results!");
self.0.entry(amt_table_key).or_default().extend(
out.into_iter()
.enumerate()
.map(|(i, out)| (u64::try_from(i + numb_outs).unwrap(), out)),
)
}
Ok(())
}
}

View file

@ -0,0 +1,144 @@
use std::{
collections::{BTreeMap, HashMap},
future::ready,
sync::Arc,
};
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY};
use monero_serai::transaction::{Timelock, Transaction};
use tower::{service_fn, Service, ServiceExt};
use cuprate_consensus::{
TxVerifierService, VerifyTxRequest, VerifyTxResponse, __private::Database,
};
use cuprate_types::{
blockchain::{BCReadRequest, BCResponse},
OutputOnChain,
};
use cuprate_consensus_rules::HardFork;
use cuprate_test_utils::data::TX_E2D393;
fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clone {
let outputs = Arc::new(outputs);
service_fn(move |req: BCReadRequest| {
ready(Ok(match req {
BCReadRequest::NumberOutputsWithAmount(_) => {
BCResponse::NumberOutputsWithAmount(HashMap::new())
}
BCReadRequest::Outputs(outs) => {
let idxs = outs.get(&0).unwrap();
let mut ret = HashMap::new();
ret.insert(
0_u64,
idxs.iter()
.map(|idx| (*idx, *outputs.get(idx).unwrap()))
.collect::<HashMap<_, _>>(),
);
BCResponse::Outputs(ret)
}
BCReadRequest::KeyImagesSpent(_) => BCResponse::KeyImagesSpent(false),
_ => panic!("Database request not needed for this test"),
}))
})
}
macro_rules! test_verify_valid_v2_tx {
(
$test_name: ident,
$tx: ident,
Rings: $([
$($idx: literal: ($ring_member: literal, $commitment: literal),)+
],)+
$hf: ident
) => {
#[tokio::test]
#[allow(const_item_mutation)]
async fn $test_name() {
let members = vec![
$($(($idx,
OutputOnChain {
height: 0,
time_lock: Timelock::None,
commitment: CompressedEdwardsY::from_slice(&hex_literal::hex!($commitment))
.unwrap()
.decompress()
.unwrap(),
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
.unwrap()
.decompress(),
}),)+)+
];
let map = BTreeMap::from_iter(members);
let database = dummy_database(map);
let mut tx_verifier = TxVerifierService::new(database);
assert!(matches!(tx_verifier.ready().await.unwrap().call(
VerifyTxRequest::New {
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
current_chain_height: 10,
top_hash: [0; 32],
hf: HardFork::$hf,
time_for_time_lock: u64::MAX
}
).await.unwrap(), VerifyTxResponse::OkPrepped(_)));
// Check verification fails if we put random ring members
let members = vec![
$($(($idx,
OutputOnChain {
height: 0,
time_lock: Timelock::None,
commitment: ED25519_BASEPOINT_POINT,
key: CompressedEdwardsY::from_slice(&hex_literal::hex!($ring_member))
.unwrap()
.decompress(),
}),)+)+
];
let map = BTreeMap::from_iter(members);
let database = dummy_database(map);
let mut tx_verifier = TxVerifierService::new(database);
assert!(tx_verifier.ready().await.unwrap().call(
VerifyTxRequest::New {
txs: vec![Transaction::read(&mut $tx).unwrap()].into(),
current_chain_height: 10,
top_hash: [0; 32],
hf: HardFork::$hf,
time_for_time_lock: u64::MAX
}
).await.is_err());
}
};
}
test_verify_valid_v2_tx! {
verify_tx_e2d393,
TX_E2D393,
Rings: [
7567582: ("5fa4f8b160c0877476e78094d0ce4951b20f43088f6e3698fa4d3154069c7c1b", "9a41189729e8cf113cee0b126e22653f3f551227947f54fbbb16ae8d535d757d"),
7958047: ("0febe3d139bf3db267c2efdc714ea9b42e437a5aa16e42848a835d009108fcdf", "ecca12345c02c6b0348cfa988a0d86d34e3a89cd8b53dd4ffdb860cee0eda487"),// miner amt: 3551239030364
8224417: ("bdd1fb8a725ae15ce37bc8090925126396f87c2972d728814f2d622baa77ebf6", "24624e957c351727deadafda531f7bed433220e72dc85f8aa8d3d32cd7df42e1"),
8225772: ("cddef0210ed3113f3362ecb7aa43003c6c3ed4bcac09dc4d9d8d015472c8a3d8", "f61b954879a0f3cc3540f0364ad108fe286162f993f4b435b42038c29d07b8c2"),
8234785: ("4edf5a8448e133fcb7914ea161dbb8eb0057e44284d0315839d9fce4cdb063e8", "1cec1e2f88268d6f164f07f79c663bd1af09920a9254164f518faff45dd42138"),
8247173: ("cbee0e5fa9c31689b174862a6eb0a164a2d807d2862ac0ad50c0030f0af6c5e7", "f229752b609d923cda89735ed2a42a9af6fc3e3219ac164f17d5eac4f85f391c"),
8285361: ("f16dbd9542e7dd575c15e2c9217f5cecb6d134383e5e8416da4affab132f1ff8", "7e31ad658fff150b0ae3a9329e353522ed20dd3ac8df8cd965fa4369164857b4"),
8308826: ("4ce2b333cc421237fc96f1a0719d4ac0892f0ff457f3a14f2e499fc045cd4714", "2f7f240e42cbd3a5f02b0b185465263b6a4c6df609dcf928314ea7ddbec3d3dc"),// miner amt: 3408911250482
8312407: ("ead8dfb7423f5c3fa7f10663ce885d27d1b7eeb634ac05fd74d3b080440819bf", "236c3fde472978aff92aeb6e752eeb681dfdbb9a84d7e049238f7f544b85062a"),
8314321: ("24d3dadeef6b0aff3ee7288cd391823b0020ba3fab42085f66765fc2a164f879", "bffce0393f1fc96e3d83a057208b506c9f7ad52e012e20b228918932c6c8287a"),
8315222: ("a8b165589dffa4c31c27fb432cfdd4855b0d04102b79e439720bb80198d5b9c0", "c3febd29c1a3cc397639ff7fdb357d22a900821bef956af626651f2a916cf6f6"),
],
V9
}

View file

@ -40,7 +40,7 @@ impl<N: NetworkZone> P2PConfig<N> {
/// per [`NetworkZone`] per run. /// per [`NetworkZone`] per run.
pub(crate) fn basic_node_data(&self) -> BasicNodeData { pub(crate) fn basic_node_data(&self) -> BasicNodeData {
BasicNodeData { BasicNodeData {
my_port: self.p2p_port as u32, my_port: u32::from(self.p2p_port),
network_id: self.network.network_id(), network_id: self.network.network_id(),
peer_id: rand::random(), peer_id: rand::random(),
support_flags: PeerSupportFlags::FLUFFY_BLOCKS, support_flags: PeerSupportFlags::FLUFFY_BLOCKS,

View file

@ -24,7 +24,7 @@ use crate::{
P2PConfig, P2PConfig,
}; };
/// The inbound server. /// Starts the inbound server.
#[instrument(level = "warn", skip_all)] #[instrument(level = "warn", skip_all)]
pub async fn inbound_server<N, HS, A>( pub async fn inbound_server<N, HS, A>(
client_pool: Arc<ClientPool<N>>, client_pool: Arc<ClientPool<N>>,

View file

@ -1,12 +1,7 @@
//! Cuprate's P2P Crate. //! Cuprate's P2P Crate.
//! //!
//! This crate contains a [`ClientPool`](client_pool::ClientPool) which holds connected peers on a single [`NetworkZone`](monero_p2p::NetworkZone). //! This crate contains a [`NetworkInterface`] which allows interacting with the Monero P2P network on
//! //! a certain [`NetworkZone`]
//! This crate also contains the different routing methods that control how messages should be sent, i.e. broadcast to all,
//! or send to a single peer.
//!
#![allow(dead_code)]
use std::sync::Arc; use std::sync::Arc;
use futures::FutureExt; use futures::FutureExt;
@ -25,7 +20,6 @@ use monero_p2p::{
CoreSyncSvc, NetworkZone, PeerRequestHandler, CoreSyncSvc, NetworkZone, PeerRequestHandler,
}; };
pub mod block_downloader;
mod broadcast; mod broadcast;
mod client_pool; mod client_pool;
pub mod config; pub mod config;

View file

@ -25,7 +25,7 @@ use monero_wire::CoreSyncData;
use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN}; use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN};
/// The highest claimed sync info from our connected peers. /// The highest claimed sync info from our connected peers.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct NewSyncInfo { pub struct NewSyncInfo {
/// The peers chain height. /// The peers chain height.
pub chain_height: u64, pub chain_height: u64,

View file

@ -127,5 +127,6 @@ pub enum AddressBookResponse<Z: NetworkZone> {
Ok, Ok,
Peer(ZoneSpecificPeerListEntryBase<Z::Addr>), Peer(ZoneSpecificPeerListEntryBase<Z::Addr>),
Peers(Vec<ZoneSpecificPeerListEntryBase<Z::Addr>>), Peers(Vec<ZoneSpecificPeerListEntryBase<Z::Addr>>),
/// Contains `true` if the peer is banned.
IsPeerBanned(bool), IsPeerBanned(bool),
} }

View file

@ -1 +1,4 @@
# TODO # RPC
This directory contains Monero RPC types and Cuprate RPC's libraries.
<!-- TODO: link to architecture book section. -->

View file

@ -0,0 +1,15 @@
[package]
name = "cuprate-rpc-interface"
version = "0.0.0"
edition = "2021"
description = "Cuprate's RPC interface library"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-interface"
keywords = ["cuprate", "rpc", "interface"]
[features]
[dependencies]
[dev-dependencies]

View file

@ -0,0 +1 @@

15
rpc/json-rpc/Cargo.toml Normal file
View file

@ -0,0 +1,15 @@
[package]
name = "json-rpc"
version = "0.0.0"
edition = "2021"
description = "JSON-RPC 2.0 implementation"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc"
keywords = ["json", "rpc"]
[features]
[dependencies]
[dev-dependencies]

1
rpc/json-rpc/src/lib.rs Normal file
View file

@ -0,0 +1 @@

View file

@ -0,0 +1,15 @@
[package]
name = "monero-rpc-types"
version = "0.0.0"
edition = "2021"
description = "Monero RPC types"
license = "MIT"
authors = ["hinto-janai"]
repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/monero-rpc-types"
keywords = ["monero", "rpc", "types"]
[features]
[dependencies]
[dev-dependencies]

View file

@ -0,0 +1 @@

View file

@ -26,7 +26,7 @@ cfg-if = { workspace = true }
# We only need the `thread` feature if `service` is enabled. # We only need the `thread` feature if `service` is enabled.
# Figure out how to enable features of an already pulled in dependency conditionally. # Figure out how to enable features of an already pulled in dependency conditionally.
cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] }
cuprate-types = { path = "../../types", features = ["service"] } cuprate-types = { path = "../../types", features = ["blockchain"] }
curve25519-dalek = { workspace = true } curve25519-dalek = { workspace = true }
monero-pruning = { path = "../../pruning" } monero-pruning = { path = "../../pruning" }
monero-serai = { workspace = true, features = ["std"] } monero-serai = { workspace = true, features = ["std"] }

View file

@ -50,11 +50,11 @@
//! This channel can be `.await`ed upon to (eventually) receive //! This channel can be `.await`ed upon to (eventually) receive
//! the corresponding `Response` to your `Request`. //! the corresponding `Response` to your `Request`.
//! //!
//! [req_r]: cuprate_types::service::ReadRequest //! [req_r]: cuprate_types::blockchain::BCReadRequest
//! //!
//! [req_w]: cuprate_types::service::WriteRequest //! [req_w]: cuprate_types::blockchain::BCWriteRequest
//! //!
//! [resp]: cuprate_types::service::Response //! [resp]: cuprate_types::blockchain::BCResponse
//! //!
//! # Example //! # Example
//! Simple usage of `service`. //! Simple usage of `service`.
@ -63,7 +63,7 @@
//! use hex_literal::hex; //! use hex_literal::hex;
//! use tower::{Service, ServiceExt}; //! use tower::{Service, ServiceExt};
//! //!
//! use cuprate_types::service::{ReadRequest, WriteRequest, Response}; //! use cuprate_types::blockchain::{BCReadRequest, BCWriteRequest, BCResponse};
//! use cuprate_test_utils::data::block_v16_tx0; //! use cuprate_test_utils::data::block_v16_tx0;
//! //!
//! use cuprate_blockchain::{ConcreteEnv, config::ConfigBuilder, Env}; //! use cuprate_blockchain::{ConcreteEnv, config::ConfigBuilder, Env};
@ -82,7 +82,7 @@
//! // Prepare a request to write block. //! // Prepare a request to write block.
//! let mut block = block_v16_tx0().clone(); //! let mut block = block_v16_tx0().clone();
//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()` //! # block.height = 0 as u64; // must be 0th height or panic in `add_block()`
//! let request = WriteRequest::WriteBlock(block); //! let request = BCWriteRequest::WriteBlock(block);
//! //!
//! // Send the request. //! // Send the request.
//! // We receive back an `async` channel that will //! // We receive back an `async` channel that will
@ -92,16 +92,16 @@
//! //!
//! // Block write was OK. //! // Block write was OK.
//! let response = response_channel.await?; //! let response = response_channel.await?;
//! assert_eq!(response, Response::WriteBlockOk); //! assert_eq!(response, BCResponse::WriteBlockOk);
//! //!
//! // Now, let's try getting the block hash //! // Now, let's try getting the block hash
//! // of the block we just wrote. //! // of the block we just wrote.
//! let request = ReadRequest::BlockHash(0); //! let request = BCReadRequest::BlockHash(0);
//! let response_channel = read_handle.ready().await?.call(request); //! let response_channel = read_handle.ready().await?.call(request);
//! let response = response_channel.await?; //! let response = response_channel.await?;
//! assert_eq!( //! assert_eq!(
//! response, //! response,
//! Response::BlockHash( //! BCResponse::BlockHash(
//! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428") //! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428")
//! ) //! )
//! ); //! );

View file

@ -15,13 +15,14 @@ use tokio_util::sync::PollSemaphore;
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{ use cuprate_types::{
service::{ReadRequest, Response}, blockchain::{BCReadRequest, BCResponse},
ExtendedBlockHeader, OutputOnChain, ExtendedBlockHeader, OutputOnChain,
}; };
use crate::{ use crate::{
config::ReaderThreads, config::ReaderThreads,
error::RuntimeError, error::RuntimeError,
ops::block::block_exists,
ops::{ ops::{
block::{get_block_extended_header_from_height, get_block_info}, block::{get_block_extended_header_from_height, get_block_info},
blockchain::{cumulative_generated_coins, top_block_height}, blockchain::{cumulative_generated_coins, top_block_height},
@ -30,6 +31,7 @@ use crate::{
}, },
service::types::{ResponseReceiver, ResponseResult, ResponseSender}, service::types::{ResponseReceiver, ResponseResult, ResponseSender},
tables::{BlockHeights, BlockInfos, Tables}, tables::{BlockHeights, BlockInfos, Tables},
types::BlockHash,
types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId}, types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId},
ConcreteEnv, DatabaseRo, Env, EnvInner, ConcreteEnv, DatabaseRo, Env, EnvInner,
}; };
@ -40,9 +42,9 @@ use crate::{
/// This is cheaply [`Clone`]able handle that /// This is cheaply [`Clone`]able handle that
/// allows `async`hronously reading from the database. /// allows `async`hronously reading from the database.
/// ///
/// Calling [`tower::Service::call`] with a [`DatabaseReadHandle`] & [`ReadRequest`] /// Calling [`tower::Service::call`] with a [`DatabaseReadHandle`] & [`BCReadRequest`]
/// will return an `async`hronous channel that can be `.await`ed upon /// will return an `async`hronous channel that can be `.await`ed upon
/// to receive the corresponding [`Response`]. /// to receive the corresponding [`BCResponse`].
pub struct DatabaseReadHandle { pub struct DatabaseReadHandle {
/// Handle to the custom `rayon` DB reader thread-pool. /// Handle to the custom `rayon` DB reader thread-pool.
/// ///
@ -131,8 +133,8 @@ impl DatabaseReadHandle {
} }
} }
impl tower::Service<ReadRequest> for DatabaseReadHandle { impl tower::Service<BCReadRequest> for DatabaseReadHandle {
type Response = Response; type Response = BCResponse;
type Error = RuntimeError; type Error = RuntimeError;
type Future = ResponseReceiver; type Future = ResponseReceiver;
@ -152,7 +154,7 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
} }
#[inline] #[inline]
fn call(&mut self, request: ReadRequest) -> Self::Future { fn call(&mut self, request: BCReadRequest) -> Self::Future {
let permit = self let permit = self
.permit .permit
.take() .take()
@ -189,25 +191,26 @@ impl tower::Service<ReadRequest> for DatabaseReadHandle {
/// The basic structure is: /// The basic structure is:
/// 1. `Request` is mapped to a handler function /// 1. `Request` is mapped to a handler function
/// 2. Handler function is called /// 2. Handler function is called
/// 3. [`Response`] is sent /// 3. [`BCResponse`] is sent
fn map_request( fn map_request(
env: &ConcreteEnv, // Access to the database env: &ConcreteEnv, // Access to the database
request: ReadRequest, // The request we must fulfill request: BCReadRequest, // The request we must fulfill
response_sender: ResponseSender, // The channel we must send the response back to response_sender: ResponseSender, // The channel we must send the response back to
) { ) {
use ReadRequest as R; use BCReadRequest as R;
/* SOMEDAY: pre-request handling, run some code for each request? */ /* SOMEDAY: pre-request handling, run some code for each request? */
let response = match request { let response = match request {
R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockExtendedHeader(block) => block_extended_header(env, block),
R::BlockHash(block) => block_hash(env, block), R::BlockHash(block) => block_hash(env, block),
R::FilterUnknownHashes(hashes) => filter_unknown_hahses(env, hashes),
R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range),
R::ChainHeight => chain_height(env), R::ChainHeight => chain_height(env),
R::GeneratedCoins => generated_coins(env), R::GeneratedCoins => generated_coins(env),
R::Outputs(map) => outputs(env, map), R::Outputs(map) => outputs(env, map),
R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec),
R::CheckKIsNotSpent(set) => check_k_is_not_spent(env, set), R::KeyImagesSpent(set) => key_images_spent(env, set),
}; };
if let Err(e) = response_sender.send(response) { if let Err(e) = response_sender.send(response) {
@ -286,7 +289,10 @@ macro_rules! get_tables {
// FIXME: implement multi-transaction read atomicity. // FIXME: implement multi-transaction read atomicity.
// <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>. // <https://github.com/Cuprate/cuprate/pull/113#discussion_r1576874589>.
/// [`ReadRequest::BlockExtendedHeader`]. // TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal
// amount of parallelism.
/// [`BCReadRequest::BlockExtendedHeader`].
#[inline] #[inline]
fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
@ -294,12 +300,12 @@ fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> Respon
let tx_ro = env_inner.tx_ro()?; let tx_ro = env_inner.tx_ro()?;
let tables = env_inner.open_tables(&tx_ro)?; let tables = env_inner.open_tables(&tx_ro)?;
Ok(Response::BlockExtendedHeader( Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&block_height, &tables)?, get_block_extended_header_from_height(&block_height, &tables)?,
)) ))
} }
/// [`ReadRequest::BlockHash`]. /// [`BCReadRequest::BlockHash`].
#[inline] #[inline]
fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
@ -307,12 +313,40 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult {
let tx_ro = env_inner.tx_ro()?; let tx_ro = env_inner.tx_ro()?;
let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?; let table_block_infos = env_inner.open_db_ro::<BlockInfos>(&tx_ro)?;
Ok(Response::BlockHash( Ok(BCResponse::BlockHash(
get_block_info(&block_height, &table_block_infos)?.block_hash, get_block_info(&block_height, &table_block_infos)?.block_hash,
)) ))
} }
/// [`ReadRequest::BlockExtendedHeaderInRange`]. /// [`BCReadRequest::FilterUnknownHashes`].
#[inline]
fn filter_unknown_hahses(env: &ConcreteEnv, mut hashes: HashSet<BlockHash>) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required.
let env_inner = env.env_inner();
let tx_ro = env_inner.tx_ro()?;
let table_block_heights = env_inner.open_db_ro::<BlockHeights>(&tx_ro)?;
let mut err = None;
hashes.retain(
|block_hash| match block_exists(block_hash, &table_block_heights) {
Ok(exists) => exists,
Err(e) => {
err.get_or_insert(e);
false
}
},
);
if let Some(e) = err {
Err(e)
} else {
Ok(BCResponse::FilterUnknownHashes(hashes))
}
}
/// [`BCReadRequest::BlockExtendedHeaderInRange`].
#[inline] #[inline]
fn block_extended_header_in_range( fn block_extended_header_in_range(
env: &ConcreteEnv, env: &ConcreteEnv,
@ -333,10 +367,10 @@ fn block_extended_header_in_range(
}) })
.collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?; .collect::<Result<Vec<ExtendedBlockHeader>, RuntimeError>>()?;
Ok(Response::BlockExtendedHeaderInRange(vec)) Ok(BCResponse::BlockExtendedHeaderInRange(vec))
} }
/// [`ReadRequest::ChainHeight`]. /// [`BCReadRequest::ChainHeight`].
#[inline] #[inline]
fn chain_height(env: &ConcreteEnv) -> ResponseResult { fn chain_height(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
@ -349,10 +383,10 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult {
let block_hash = let block_hash =
get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash; get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash;
Ok(Response::ChainHeight(chain_height, block_hash)) Ok(BCResponse::ChainHeight(chain_height, block_hash))
} }
/// [`ReadRequest::GeneratedCoins`]. /// [`BCReadRequest::GeneratedCoins`].
#[inline] #[inline]
fn generated_coins(env: &ConcreteEnv) -> ResponseResult { fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
// Single-threaded, no `ThreadLocal` required. // Single-threaded, no `ThreadLocal` required.
@ -363,13 +397,13 @@ fn generated_coins(env: &ConcreteEnv) -> ResponseResult {
let top_height = top_block_height(&table_block_heights)?; let top_height = top_block_height(&table_block_heights)?;
Ok(Response::GeneratedCoins(cumulative_generated_coins( Ok(BCResponse::GeneratedCoins(cumulative_generated_coins(
&top_height, &top_height,
&table_block_infos, &table_block_infos,
)?)) )?))
} }
/// [`ReadRequest::Outputs`]. /// [`BCReadRequest::Outputs`].
#[inline] #[inline]
fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) -> ResponseResult { fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`. // Prepare tx/tables in `ThreadLocal`.
@ -407,10 +441,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap<Amount, HashSet<AmountIndex>>) ->
}) })
.collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?; .collect::<Result<HashMap<Amount, HashMap<AmountIndex, OutputOnChain>>, RuntimeError>>()?;
Ok(Response::Outputs(map)) Ok(BCResponse::Outputs(map))
} }
/// [`ReadRequest::NumberOutputsWithAmount`]. /// [`BCReadRequest::NumberOutputsWithAmount`].
#[inline] #[inline]
fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> ResponseResult { fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`. // Prepare tx/tables in `ThreadLocal`.
@ -452,12 +486,12 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec<Amount>) -> Respon
}) })
.collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?; .collect::<Result<HashMap<Amount, usize>, RuntimeError>>()?;
Ok(Response::NumberOutputsWithAmount(map)) Ok(BCResponse::NumberOutputsWithAmount(map))
} }
/// [`ReadRequest::CheckKIsNotSpent`]. /// [`BCReadRequest::KeyImagesSpent`].
#[inline] #[inline]
fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult { fn key_images_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> ResponseResult {
// Prepare tx/tables in `ThreadLocal`. // Prepare tx/tables in `ThreadLocal`.
let env_inner = env.env_inner(); let env_inner = env.env_inner();
let tx_ro = thread_local(env); let tx_ro = thread_local(env);
@ -486,8 +520,8 @@ fn check_k_is_not_spent(env: &ConcreteEnv, key_images: HashSet<KeyImage>) -> Res
// Else, `Ok(false)` will continue the iterator. // Else, `Ok(false)` will continue the iterator.
.find_any(|result| !matches!(result, Ok(false))) .find_any(|result| !matches!(result, Ok(false)))
{ {
None | Some(Ok(false)) => Ok(Response::CheckKIsNotSpent(true)), // Key image was NOT found. None | Some(Ok(false)) => Ok(BCResponse::KeyImagesSpent(false)), // Key image was NOT found.
Some(Ok(true)) => Ok(Response::CheckKIsNotSpent(false)), // Key image was found. Some(Ok(true)) => Ok(BCResponse::KeyImagesSpent(true)), // Key image was found.
Some(Err(e)) => Err(e), // A database error occurred. Some(Err(e)) => Err(e), // A database error occurred.
} }
} }

View file

@ -16,7 +16,7 @@ use tower::{Service, ServiceExt};
use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3};
use cuprate_types::{ use cuprate_types::{
service::{ReadRequest, Response, WriteRequest}, blockchain::{BCReadRequest, BCResponse, BCWriteRequest},
OutputOnChain, VerifiedBlockInformation, OutputOnChain, VerifiedBlockInformation,
}; };
@ -81,10 +81,10 @@ async fn test_template(
block.height = i as u64; block.height = i as u64;
// Request a block to be written, assert it was written. // Request a block to be written, assert it was written.
let request = WriteRequest::WriteBlock(block); let request = BCWriteRequest::WriteBlock(block);
let response_channel = writer.call(request); let response_channel = writer.call(request);
let response = response_channel.await.unwrap(); let response = response_channel.await.unwrap();
assert_eq!(response, Response::WriteBlockOk); assert_eq!(response, BCResponse::WriteBlockOk);
} }
//----------------------------------------------------------------------- Reset the transaction //----------------------------------------------------------------------- Reset the transaction
@ -100,36 +100,36 @@ async fn test_template(
// Next few lines are just for preparing the expected responses, // Next few lines are just for preparing the expected responses,
// see further below for usage. // see further below for usage.
let extended_block_header_0 = Ok(Response::BlockExtendedHeader( let extended_block_header_0 = Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&0, &tables).unwrap(),
)); ));
let extended_block_header_1 = if block_fns.len() > 1 { let extended_block_header_1 = if block_fns.len() > 1 {
Ok(Response::BlockExtendedHeader( Ok(BCResponse::BlockExtendedHeader(
get_block_extended_header_from_height(&1, &tables).unwrap(), get_block_extended_header_from_height(&1, &tables).unwrap(),
)) ))
} else { } else {
Err(RuntimeError::KeyNotFound) Err(RuntimeError::KeyNotFound)
}; };
let block_hash_0 = Ok(Response::BlockHash( let block_hash_0 = Ok(BCResponse::BlockHash(
get_block_info(&0, tables.block_infos()).unwrap().block_hash, get_block_info(&0, tables.block_infos()).unwrap().block_hash,
)); ));
let block_hash_1 = if block_fns.len() > 1 { let block_hash_1 = if block_fns.len() > 1 {
Ok(Response::BlockHash( Ok(BCResponse::BlockHash(
get_block_info(&1, tables.block_infos()).unwrap().block_hash, get_block_info(&1, tables.block_infos()).unwrap().block_hash,
)) ))
} else { } else {
Err(RuntimeError::KeyNotFound) Err(RuntimeError::KeyNotFound)
}; };
let range_0_1 = Ok(Response::BlockExtendedHeaderInRange(vec![ let range_0_1 = Ok(BCResponse::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&0, &tables).unwrap(),
])); ]));
let range_0_2 = if block_fns.len() >= 2 { let range_0_2 = if block_fns.len() >= 2 {
Ok(Response::BlockExtendedHeaderInRange(vec![ Ok(BCResponse::BlockExtendedHeaderInRange(vec![
get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&0, &tables).unwrap(),
get_block_extended_header_from_height(&1, &tables).unwrap(), get_block_extended_header_from_height(&1, &tables).unwrap(),
])) ]))
@ -140,10 +140,10 @@ async fn test_template(
let chain_height = { let chain_height = {
let height = chain_height(tables.block_heights()).unwrap(); let height = chain_height(tables.block_heights()).unwrap();
let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap(); let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap();
Ok(Response::ChainHeight(height, block_info.block_hash)) Ok(BCResponse::ChainHeight(height, block_info.block_hash))
}; };
let cumulative_generated_coins = Ok(Response::GeneratedCoins(cumulative_generated_coins)); let cumulative_generated_coins = Ok(BCResponse::GeneratedCoins(cumulative_generated_coins));
let num_req = tables let num_req = tables
.outputs_iter() .outputs_iter()
@ -153,7 +153,7 @@ async fn test_template(
.map(|key| key.amount) .map(|key| key.amount)
.collect::<Vec<Amount>>(); .collect::<Vec<Amount>>();
let num_resp = Ok(Response::NumberOutputsWithAmount( let num_resp = Ok(BCResponse::NumberOutputsWithAmount(
num_req num_req
.iter() .iter()
.map(|amount| match tables.num_outputs().get(amount) { .map(|amount| match tables.num_outputs().get(amount) {
@ -168,21 +168,27 @@ async fn test_template(
// Contains a fake non-spent key-image. // Contains a fake non-spent key-image.
let ki_req = HashSet::from([[0; 32]]); let ki_req = HashSet::from([[0; 32]]);
let ki_resp = Ok(Response::CheckKIsNotSpent(true)); let ki_resp = Ok(BCResponse::KeyImagesSpent(false));
//----------------------------------------------------------------------- Assert expected response //----------------------------------------------------------------------- Assert expected response
// Assert read requests lead to the expected responses. // Assert read requests lead to the expected responses.
for (request, expected_response) in [ for (request, expected_response) in [
(ReadRequest::BlockExtendedHeader(0), extended_block_header_0), (
(ReadRequest::BlockExtendedHeader(1), extended_block_header_1), BCReadRequest::BlockExtendedHeader(0),
(ReadRequest::BlockHash(0), block_hash_0), extended_block_header_0,
(ReadRequest::BlockHash(1), block_hash_1), ),
(ReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1), (
(ReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2), BCReadRequest::BlockExtendedHeader(1),
(ReadRequest::ChainHeight, chain_height), extended_block_header_1,
(ReadRequest::GeneratedCoins, cumulative_generated_coins), ),
(ReadRequest::NumberOutputsWithAmount(num_req), num_resp), (BCReadRequest::BlockHash(0), block_hash_0),
(ReadRequest::CheckKIsNotSpent(ki_req), ki_resp), (BCReadRequest::BlockHash(1), block_hash_1),
(BCReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1),
(BCReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2),
(BCReadRequest::ChainHeight, chain_height),
(BCReadRequest::GeneratedCoins, cumulative_generated_coins),
(BCReadRequest::NumberOutputsWithAmount(num_req), num_resp),
(BCReadRequest::KeyImagesSpent(ki_req), ki_resp),
] { ] {
let response = reader.clone().oneshot(request).await; let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, expected_response: {expected_response:#?}"); println!("response: {response:#?}, expected_response: {expected_response:#?}");
@ -196,10 +202,10 @@ async fn test_template(
// Assert each key image we inserted comes back as "spent". // Assert each key image we inserted comes back as "spent".
for key_image in tables.key_images_iter().keys().unwrap() { for key_image in tables.key_images_iter().keys().unwrap() {
let key_image = key_image.unwrap(); let key_image = key_image.unwrap();
let request = ReadRequest::CheckKIsNotSpent(HashSet::from([key_image])); let request = BCReadRequest::KeyImagesSpent(HashSet::from([key_image]));
let response = reader.clone().oneshot(request).await; let response = reader.clone().oneshot(request).await;
println!("response: {response:#?}, key_image: {key_image:#?}"); println!("response: {response:#?}, key_image: {key_image:#?}");
assert_eq!(response.unwrap(), Response::CheckKIsNotSpent(false)); assert_eq!(response.unwrap(), BCResponse::KeyImagesSpent(true));
} }
//----------------------------------------------------------------------- Output checks //----------------------------------------------------------------------- Output checks
@ -260,10 +266,10 @@ async fn test_template(
.collect::<Vec<OutputOnChain>>(); .collect::<Vec<OutputOnChain>>();
// Send a request for every output we inserted before. // Send a request for every output we inserted before.
let request = ReadRequest::Outputs(map.clone()); let request = BCReadRequest::Outputs(map.clone());
let response = reader.clone().oneshot(request).await; let response = reader.clone().oneshot(request).await;
println!("Response::Outputs response: {response:#?}"); println!("Response::Outputs response: {response:#?}");
let Ok(Response::Outputs(response)) = response else { let Ok(BCResponse::Outputs(response)) = response else {
panic!("{response:#?}") panic!("{response:#?}")
}; };

View file

@ -6,15 +6,15 @@
use futures::channel::oneshot::Sender; use futures::channel::oneshot::Sender;
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::service::Response; use cuprate_types::blockchain::BCResponse;
use crate::error::RuntimeError; use crate::error::RuntimeError;
//---------------------------------------------------------------------------------------------------- Types //---------------------------------------------------------------------------------------------------- Types
/// The actual type of the response. /// The actual type of the response.
/// ///
/// Either our [`Response`], or a database error occurred. /// Either our [`BCResponse`], or a database error occurred.
pub(super) type ResponseResult = Result<Response, RuntimeError>; pub(super) type ResponseResult = Result<BCResponse, RuntimeError>;
/// The `Receiver` channel that receives the read response. /// The `Receiver` channel that receives the read response.
/// ///

View file

@ -10,7 +10,7 @@ use futures::channel::oneshot;
use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_helper::asynch::InfallibleOneshotReceiver;
use cuprate_types::{ use cuprate_types::{
service::{Response, WriteRequest}, blockchain::{BCResponse, BCWriteRequest},
VerifiedBlockInformation, VerifiedBlockInformation,
}; };
@ -33,15 +33,15 @@ const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter");
/// it is not [`Clone`]able as there is only ever 1 place within Cuprate /// it is not [`Clone`]able as there is only ever 1 place within Cuprate
/// that writes. /// that writes.
/// ///
/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] & [`WriteRequest`] /// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] & [`BCWriteRequest`]
/// will return an `async`hronous channel that can be `.await`ed upon /// will return an `async`hronous channel that can be `.await`ed upon
/// to receive the corresponding [`Response`]. /// to receive the corresponding [`BCResponse`].
#[derive(Debug)] #[derive(Debug)]
pub struct DatabaseWriteHandle { pub struct DatabaseWriteHandle {
/// Sender channel to the database write thread-pool. /// Sender channel to the database write thread-pool.
/// ///
/// We provide the response channel for the thread-pool. /// We provide the response channel for the thread-pool.
pub(super) sender: crossbeam::channel::Sender<(WriteRequest, ResponseSender)>, pub(super) sender: crossbeam::channel::Sender<(BCWriteRequest, ResponseSender)>,
} }
impl DatabaseWriteHandle { impl DatabaseWriteHandle {
@ -65,8 +65,8 @@ impl DatabaseWriteHandle {
} }
} }
impl tower::Service<WriteRequest> for DatabaseWriteHandle { impl tower::Service<BCWriteRequest> for DatabaseWriteHandle {
type Response = Response; type Response = BCResponse;
type Error = RuntimeError; type Error = RuntimeError;
type Future = ResponseReceiver; type Future = ResponseReceiver;
@ -76,7 +76,7 @@ impl tower::Service<WriteRequest> for DatabaseWriteHandle {
} }
#[inline] #[inline]
fn call(&mut self, request: WriteRequest) -> Self::Future { fn call(&mut self, request: BCWriteRequest) -> Self::Future {
// Response channel we `.await` on. // Response channel we `.await` on.
let (response_sender, receiver) = oneshot::channel(); let (response_sender, receiver) = oneshot::channel();
@ -95,7 +95,7 @@ pub(super) struct DatabaseWriter {
/// Any caller can send some requests to this channel. /// Any caller can send some requests to this channel.
/// They send them alongside another `Response` channel, /// They send them alongside another `Response` channel,
/// which we will eventually send to. /// which we will eventually send to.
receiver: crossbeam::channel::Receiver<(WriteRequest, ResponseSender)>, receiver: crossbeam::channel::Receiver<(BCWriteRequest, ResponseSender)>,
/// Access to the database. /// Access to the database.
env: Arc<ConcreteEnv>, env: Arc<ConcreteEnv>,
@ -153,7 +153,7 @@ impl DatabaseWriter {
// FIXME: will there be more than 1 write request? // FIXME: will there be more than 1 write request?
// this won't have to be an enum. // this won't have to be an enum.
let response = match &request { let response = match &request {
WriteRequest::WriteBlock(block) => write_block(&self.env, block), BCWriteRequest::WriteBlock(block) => write_block(&self.env, block),
}; };
// If the database needs to resize, do so. // If the database needs to resize, do so.
@ -218,7 +218,7 @@ impl DatabaseWriter {
// Each function will return the [`Response`] that we // Each function will return the [`Response`] that we
// should send back to the caller in [`map_request()`]. // should send back to the caller in [`map_request()`].
/// [`WriteRequest::WriteBlock`]. /// [`BCWriteRequest::WriteBlock`].
#[inline] #[inline]
fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult { fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult {
let env_inner = env.env_inner(); let env_inner = env.env_inner();
@ -232,7 +232,7 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR
match result { match result {
Ok(()) => { Ok(()) => {
TxRw::commit(tx_rw)?; TxRw::commit(tx_rw)?;
Ok(Response::WriteBlockOk) Ok(BCResponse::WriteBlockOk)
} }
Err(e) => { Err(e) => {
// INVARIANT: ensure database atomicity by aborting // INVARIANT: ensure database atomicity by aborting

View file

@ -6,13 +6,13 @@
)] )]
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::sync::{Arc, OnceLock}; use std::sync::OnceLock;
use hex_literal::hex; use hex_literal::hex;
use monero_serai::{block::Block, transaction::Transaction}; use monero_serai::{block::Block, transaction::Transaction};
use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_helper::map::combine_low_high_bits_to_u128;
use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation}; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::data::constants::{ use crate::data::constants::{
BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73,
@ -20,14 +20,14 @@ use crate::data::constants::{
}; };
//---------------------------------------------------------------------------------------------------- Conversion //---------------------------------------------------------------------------------------------------- Conversion
/// Converts `monero_serai`'s `Block` into a /// Converts [`monero_serai::Block`] into a
/// `cuprate_types::VerifiedBlockInformation` (superset). /// [`VerifiedBlockInformation`] (superset).
/// ///
/// To prevent pulling other code in order to actually calculate things /// To prevent pulling other code in order to actually calculate things
/// (e.g. `pow_hash`), some information must be provided statically, /// (e.g. `pow_hash`), some information must be provided statically,
/// this struct represents that data that must be provided. /// this struct represents that data that must be provided.
/// ///
/// Consider using `cuprate_test_utils::rpc` to get this data easily. /// Consider using [`cuprate_test_utils::rpc`] to get this data easily.
struct VerifiedBlockMap { struct VerifiedBlockMap {
block_blob: &'static [u8], block_blob: &'static [u8],
pow_hash: [u8; 32], pow_hash: [u8; 32],
@ -43,7 +43,7 @@ struct VerifiedBlockMap {
} }
impl VerifiedBlockMap { impl VerifiedBlockMap {
/// Turn the various static data bits in `self` into a `VerifiedBlockInformation`. /// Turn the various static data bits in `self` into a [`VerifiedBlockInformation`].
/// ///
/// Transactions are verified that they at least match the block's, /// Transactions are verified that they at least match the block's,
/// although the correctness of data (whether this block actually existed or not) /// although the correctness of data (whether this block actually existed or not)
@ -64,11 +64,7 @@ impl VerifiedBlockMap {
let block_blob = block_blob.to_vec(); let block_blob = block_blob.to_vec();
let block = Block::read(&mut block_blob.as_slice()).unwrap(); let block = Block::read(&mut block_blob.as_slice()).unwrap();
let txs: Vec<Arc<TransactionVerificationData>> = txs let txs = txs.iter().map(to_tx_verification_data).collect::<Vec<_>>();
.iter()
.map(to_tx_verification_data)
.map(Arc::new)
.collect();
assert_eq!( assert_eq!(
txs.len(), txs.len(),
@ -101,11 +97,11 @@ impl VerifiedBlockMap {
} }
} }
// Same as [`VerifiedBlockMap`] but for [`TransactionVerificationData`]. // Same as [`VerifiedBlockMap`] but for [`VerifiedTransactionInformation`].
fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> TransactionVerificationData { fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInformation {
let tx_blob = tx_blob.as_ref().to_vec(); let tx_blob = tx_blob.as_ref().to_vec();
let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap(); let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap();
TransactionVerificationData { VerifiedTransactionInformation {
tx_weight: tx.weight(), tx_weight: tx.weight(),
fee: tx.rct_signatures.base.fee, fee: tx.rct_signatures.base.fee,
tx_hash: tx.hash(), tx_hash: tx.hash(),
@ -239,7 +235,7 @@ verified_block_information_fn! {
//---------------------------------------------------------------------------------------------------- Transactions //---------------------------------------------------------------------------------------------------- Transactions
/// Generate a transaction accessor function with this signature: /// Generate a transaction accessor function with this signature:
/// `fn() -> &'static TransactionVerificationData` /// `fn() -> &'static VerifiedTransactionInformation`
/// ///
/// Same as [`verified_block_information_fn`] but for transactions. /// Same as [`verified_block_information_fn`] but for transactions.
macro_rules! transaction_verification_data_fn { macro_rules! transaction_verification_data_fn {
@ -249,7 +245,7 @@ macro_rules! transaction_verification_data_fn {
weight: $weight:literal, // Transaction weight weight: $weight:literal, // Transaction weight
hash: $hash:literal, // Transaction hash as a string literal hash: $hash:literal, // Transaction hash as a string literal
) => { ) => {
#[doc = concat!("Return [`", stringify!($tx_blob), "`] as a [`TransactionVerificationData`].")] #[doc = concat!("Return [`", stringify!($tx_blob), "`] as a [`VerifiedTransactionInformation`].")]
/// ///
/// ```rust /// ```rust
#[doc = "# use cuprate_test_utils::data::*;"] #[doc = "# use cuprate_test_utils::data::*;"]
@ -261,8 +257,8 @@ macro_rules! transaction_verification_data_fn {
#[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")] #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")]
#[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"] #[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"]
/// ``` /// ```
pub fn $fn_name() -> &'static TransactionVerificationData { pub fn $fn_name() -> &'static VerifiedTransactionInformation {
static TX: OnceLock<TransactionVerificationData> = OnceLock::new(); static TX: OnceLock<VerifiedTransactionInformation> = OnceLock::new();
TX.get_or_init(|| to_tx_verification_data($tx_blob)) TX.get_or_init(|| to_tx_verification_data($tx_blob))
} }
}; };
@ -319,8 +315,8 @@ mod tests {
let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()]
.into_iter() .into_iter()
.flat_map(|block| block.txs.iter().map(|arc| (**arc).clone())) .flat_map(|block| block.txs.iter().cloned())
.collect::<Vec<TransactionVerificationData>>(); .collect::<Vec<VerifiedTransactionInformation>>();
txs.extend([ txs.extend([
tx_v1_sig0().clone(), tx_v1_sig0().clone(),
@ -333,7 +329,7 @@ mod tests {
let tx_rpc = rpc let tx_rpc = rpc
.get_transaction_verification_data(&[tx.tx_hash]) .get_transaction_verification_data(&[tx.tx_hash])
.await .await
.collect::<Vec<TransactionVerificationData>>() .collect::<Vec<VerifiedTransactionInformation>>()
.pop() .pop()
.unwrap(); .unwrap();
assert_eq!(tx, tx_rpc); assert_eq!(tx, tx_rpc);

View file

@ -19,10 +19,10 @@
//! The free functions provide access to typed data found in `cuprate_types`: //! The free functions provide access to typed data found in `cuprate_types`:
//! ```rust //! ```rust
//! # use cuprate_test_utils::data::*; //! # use cuprate_test_utils::data::*;
//! use cuprate_types::{VerifiedBlockInformation, TransactionVerificationData}; //! use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
//! //!
//! let block: VerifiedBlockInformation = block_v16_tx0().clone(); //! let block: VerifiedBlockInformation = block_v16_tx0().clone();
//! let tx: TransactionVerificationData = tx_v1_sig0().clone(); //! let tx: VerifiedTransactionInformation = tx_v1_sig0().clone();
//! ``` //! ```
mod constants; mod constants;

View file

@ -1,8 +1,6 @@
//! HTTP RPC client. //! HTTP RPC client.
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
use std::sync::Arc;
use serde::Deserialize; use serde::Deserialize;
use serde_json::json; use serde_json::json;
use tokio::task::spawn_blocking; use tokio::task::spawn_blocking;
@ -12,7 +10,7 @@ use monero_serai::{
rpc::{HttpRpc, Rpc}, rpc::{HttpRpc, Rpc},
}; };
use cuprate_types::{TransactionVerificationData, VerifiedBlockInformation}; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
use crate::rpc::constants::LOCALHOST_RPC_URL; use crate::rpc::constants::LOCALHOST_RPC_URL;
@ -110,10 +108,9 @@ impl HttpRpcClient {
.await .await
.unwrap(); .unwrap();
let txs: Vec<Arc<TransactionVerificationData>> = self let txs: Vec<VerifiedTransactionInformation> = self
.get_transaction_verification_data(&block.txs) .get_transaction_verification_data(&block.txs)
.await .await
.map(Arc::new)
.collect(); .collect();
let block_header = result.block_header; let block_header = result.block_header;
@ -152,7 +149,7 @@ impl HttpRpcClient {
} }
} }
/// Request data and map the response to a [`TransactionVerificationData`]. /// Request data and map the response to a [`VerifiedTransactionInformation`].
/// ///
/// # Panics /// # Panics
/// This function will panic at any error point, e.g., /// This function will panic at any error point, e.g.,
@ -160,7 +157,7 @@ impl HttpRpcClient {
pub async fn get_transaction_verification_data<'a>( pub async fn get_transaction_verification_data<'a>(
&self, &self,
tx_hashes: &'a [[u8; 32]], tx_hashes: &'a [[u8; 32]],
) -> impl Iterator<Item = TransactionVerificationData> + 'a { ) -> impl Iterator<Item = VerifiedTransactionInformation> + 'a {
self.rpc self.rpc
.get_transactions(tx_hashes) .get_transactions(tx_hashes)
.await .await
@ -170,7 +167,7 @@ impl HttpRpcClient {
.map(|(i, tx)| { .map(|(i, tx)| {
let tx_hash = tx.hash(); let tx_hash = tx.hash();
assert_eq!(tx_hash, tx_hashes[i]); assert_eq!(tx_hash, tx_hashes[i]);
TransactionVerificationData { VerifiedTransactionInformation {
tx_blob: tx.serialize(), tx_blob: tx.serialize(),
tx_weight: tx.weight(), tx_weight: tx.weight(),
tx_hash, tx_hash,

View file

@ -9,14 +9,11 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/types"
keywords = ["cuprate", "types"] keywords = ["cuprate", "types"]
[features] [features]
default = ["service"] default = ["blockchain"]
service = [] blockchain = []
[dependencies] [dependencies]
borsh = { workspace = true, optional = true }
cfg-if = { workspace = true }
curve25519-dalek = { workspace = true } curve25519-dalek = { workspace = true }
monero-serai = { workspace = true } monero-serai = { workspace = true }
serde = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]

View file

@ -1,10 +1,7 @@
//! Database [`ReadRequest`]s, [`WriteRequest`]s, and [`Response`]s. //! Database [`BCReadRequest`]s, [`BCWriteRequest`]s, and [`BCResponse`]s.
//!
//! See [`cuprate_database`](https://github.com/Cuprate/cuprate/blob/00c3692eac6b2669e74cfd8c9b41c7e704c779ad/database/src/service/mod.rs#L1-L59)'s
//! `service` module for more usage/documentation.
//! //!
//! Tests that assert particular requests lead to particular //! Tests that assert particular requests lead to particular
//! responses are also tested in `cuprate_database`. //! responses are also tested in Cuprate's blockchain database crate.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::{ use std::{
@ -20,18 +17,16 @@ use serde::{Deserialize, Serialize};
use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation};
//---------------------------------------------------------------------------------------------------- ReadRequest //---------------------------------------------------------------------------------------------------- ReadRequest
/// A read request to the database. /// A read request to the blockchain database.
/// ///
/// This pairs with [`Response`], where each variant here /// This pairs with [`BCResponse`], where each variant here
/// matches in name with a `Response` variant. For example, /// matches in name with a [`BCResponse`] variant. For example,
/// the proper response for a [`ReadRequest::BlockHash`] /// the proper response for a [`BCReadRequest::BlockHash`]
/// would be a [`Response::BlockHash`]. /// would be a [`BCResponse::BlockHash`].
/// ///
/// See `Response` for the expected responses per `Request`. /// See `Response` for the expected responses per `Request`.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum BCReadRequest {
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub enum ReadRequest {
/// Request a block's extended header. /// Request a block's extended header.
/// ///
/// The input is the block's height. /// The input is the block's height.
@ -42,6 +37,11 @@ pub enum ReadRequest {
/// The input is the block's height. /// The input is the block's height.
BlockHash(u64), BlockHash(u64),
/// Removes the block hashes that are not in the _main_ chain.
///
/// This should filter (remove) hashes in alt-blocks as well.
FilterUnknownHashes(HashSet<[u8; 32]>),
/// Request a range of block extended headers. /// Request a range of block extended headers.
/// ///
/// The input is a range of block heights. /// The input is a range of block heights.
@ -86,18 +86,17 @@ pub enum ReadRequest {
/// Check that all key images within a set arer not spent. /// Check that all key images within a set arer not spent.
/// ///
/// Input is a set of key images. /// Input is a set of key images.
CheckKIsNotSpent(HashSet<[u8; 32]>), KeyImagesSpent(HashSet<[u8; 32]>),
} }
//---------------------------------------------------------------------------------------------------- WriteRequest //---------------------------------------------------------------------------------------------------- WriteRequest
/// A write request to the database. /// A write request to the blockchain database.
/// ///
/// There is currently only 1 write request to the database, /// There is currently only 1 write request to the database,
/// as such, the only valid [`Response`] to this request is /// as such, the only valid [`BCResponse`] to this request is
/// the proper response for a [`Response::WriteBlockOk`]. /// the proper response for a [`BCResponse::WriteBlockOk`].
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] pub enum BCWriteRequest {
pub enum WriteRequest {
/// Request that a block be written to the database. /// Request that a block be written to the database.
/// ///
/// Input is an already verified block. /// Input is an already verified block.
@ -109,60 +108,64 @@ pub enum WriteRequest {
/// ///
/// These are the data types returned when using sending a `Request`. /// These are the data types returned when using sending a `Request`.
/// ///
/// This pairs with [`ReadRequest`] and [`WriteRequest`], /// This pairs with [`BCReadRequest`] and [`BCWriteRequest`],
/// see those two for more info. /// see those two for more info.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] pub enum BCResponse {
pub enum Response {
//------------------------------------------------------ Reads //------------------------------------------------------ Reads
/// Response to [`ReadRequest::BlockExtendedHeader`]. /// Response to [`BCReadRequest::BlockExtendedHeader`].
/// ///
/// Inner value is the extended headed of the requested block. /// Inner value is the extended headed of the requested block.
BlockExtendedHeader(ExtendedBlockHeader), BlockExtendedHeader(ExtendedBlockHeader),
/// Response to [`ReadRequest::BlockHash`]. /// Response to [`BCReadRequest::BlockHash`].
/// ///
/// Inner value is the hash of the requested block. /// Inner value is the hash of the requested block.
BlockHash([u8; 32]), BlockHash([u8; 32]),
/// Response to [`ReadRequest::BlockExtendedHeaderInRange`]. /// Response to [`BCReadRequest::FilterUnknownHashes`].
///
/// Inner value is the list of hashes that were in the main chain.
FilterUnknownHashes(HashSet<[u8; 32]>),
/// Response to [`BCReadRequest::BlockExtendedHeaderInRange`].
/// ///
/// Inner value is the list of extended header(s) of the requested block(s). /// Inner value is the list of extended header(s) of the requested block(s).
BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>), BlockExtendedHeaderInRange(Vec<ExtendedBlockHeader>),
/// Response to [`ReadRequest::ChainHeight`]. /// Response to [`BCReadRequest::ChainHeight`].
/// ///
/// Inner value is the chain height, and the top block's hash. /// Inner value is the chain height, and the top block's hash.
ChainHeight(u64, [u8; 32]), ChainHeight(u64, [u8; 32]),
/// Response to [`ReadRequest::GeneratedCoins`]. /// Response to [`BCReadRequest::GeneratedCoins`].
/// ///
/// Inner value is the total amount of generated coins so far, in atomic units. /// Inner value is the total amount of generated coins so far, in atomic units.
GeneratedCoins(u64), GeneratedCoins(u64),
/// Response to [`ReadRequest::Outputs`]. /// Response to [`BCReadRequest::Outputs`].
/// ///
/// Inner value is all the outputs requested, /// Inner value is all the outputs requested,
/// associated with their amount and amount index. /// associated with their amount and amount index.
Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>), Outputs(HashMap<u64, HashMap<u64, OutputOnChain>>),
/// Response to [`ReadRequest::NumberOutputsWithAmount`]. /// Response to [`BCReadRequest::NumberOutputsWithAmount`].
/// ///
/// Inner value is a `HashMap` of all the outputs requested where: /// Inner value is a `HashMap` of all the outputs requested where:
/// - Key = output amount /// - Key = output amount
/// - Value = count of outputs with the same amount /// - Value = count of outputs with the same amount
NumberOutputsWithAmount(HashMap<u64, usize>), NumberOutputsWithAmount(HashMap<u64, usize>),
/// Response to [`ReadRequest::CheckKIsNotSpent`]. /// Response to [`BCReadRequest::KeyImagesSpent`].
/// ///
/// The inner value is `true` if _any_ of the key images /// The inner value is `true` if _any_ of the key images
/// were spent (exited in the database already). /// were spent (existed in the database already).
/// ///
/// The inner value is `false` if _none_ of the key images were spent. /// The inner value is `false` if _none_ of the key images were spent.
CheckKIsNotSpent(bool), KeyImagesSpent(bool),
//------------------------------------------------------ Writes //------------------------------------------------------ Writes
/// Response to [`WriteRequest::WriteBlock`]. /// Response to [`BCWriteRequest::WriteBlock`].
/// ///
/// This response indicates that the requested block has /// This response indicates that the requested block has
/// successfully been written to the database without error. /// successfully been written to the database without error.

View file

@ -3,8 +3,8 @@
//! This crate is a kitchen-sink for data types that are shared across `Cuprate`. //! This crate is a kitchen-sink for data types that are shared across `Cuprate`.
//! //!
//! # Features flags //! # Features flags
//! The `service` module, containing `cuprate_database` request/response //! The [`blockchain`] module, containing the blockchain database request/response
//! types, must be enabled with the `service` feature (on by default). //! types, must be enabled with the `blockchain` feature (on by default).
//---------------------------------------------------------------------------------------------------- Lints //---------------------------------------------------------------------------------------------------- Lints
// Forbid lints. // Forbid lints.
@ -88,14 +88,11 @@
mod types; mod types;
pub use types::{ pub use types::{
ExtendedBlockHeader, OutputOnChain, TransactionVerificationData, VerifiedBlockInformation, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation,
}; };
//---------------------------------------------------------------------------------------------------- Feature-gated //---------------------------------------------------------------------------------------------------- Feature-gated
cfg_if::cfg_if! { #[cfg(feature = "blockchain")]
if #[cfg(feature = "service")] { pub mod blockchain;
pub mod service;
}
}
//---------------------------------------------------------------------------------------------------- Private //---------------------------------------------------------------------------------------------------- Private

View file

@ -1,28 +1,17 @@
//! Various shared data types in Cuprate. //! Various shared data types in Cuprate.
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::sync::Arc;
use curve25519_dalek::edwards::EdwardsPoint; use curve25519_dalek::edwards::EdwardsPoint;
use monero_serai::{ use monero_serai::{
block::Block, block::Block,
transaction::{Timelock, Transaction}, transaction::{Timelock, Transaction},
}; };
#[cfg(feature = "borsh")]
use borsh::{BorshDeserialize, BorshSerialize};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
//---------------------------------------------------------------------------------------------------- ExtendedBlockHeader //---------------------------------------------------------------------------------------------------- ExtendedBlockHeader
/// Extended header data of a block. /// Extended header data of a block.
/// ///
/// This contains various metadata of a block, but not the block blob itself. /// This contains various metadata of a block, but not the block blob itself.
///
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_last_block_header>.
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct ExtendedBlockHeader { pub struct ExtendedBlockHeader {
/// The block's major version. /// The block's major version.
/// ///
@ -46,15 +35,12 @@ pub struct ExtendedBlockHeader {
pub long_term_weight: usize, pub long_term_weight: usize,
} }
//---------------------------------------------------------------------------------------------------- TransactionVerificationData //---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation
/// Data needed to verify a transaction. /// Verified information of a transaction.
/// ///
/// This represents data that allows verification of a transaction, /// This represents a transaction in a valid block.
/// although it doesn't mean it _has_ been verified.
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai pub struct VerifiedTransactionInformation {
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct TransactionVerificationData {
/// The transaction itself. /// The transaction itself.
pub tx: Transaction, pub tx: Transaction,
/// The serialized byte form of [`Self::tx`]. /// The serialized byte form of [`Self::tx`].
@ -77,11 +63,7 @@ pub struct TransactionVerificationData {
/// Verified information of a block. /// Verified information of a block.
/// ///
/// This represents a block that has already been verified to be correct. /// This represents a block that has already been verified to be correct.
///
/// For more definitions, see also: <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block>.
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct VerifiedBlockInformation { pub struct VerifiedBlockInformation {
/// The block itself. /// The block itself.
pub block: Block, pub block: Block,
@ -90,7 +72,7 @@ pub struct VerifiedBlockInformation {
/// [`Block::serialize`]. /// [`Block::serialize`].
pub block_blob: Vec<u8>, pub block_blob: Vec<u8>,
/// All the transactions in the block, excluding the [`Block::miner_tx`]. /// All the transactions in the block, excluding the [`Block::miner_tx`].
pub txs: Vec<Arc<TransactionVerificationData>>, pub txs: Vec<VerifiedTransactionInformation>,
/// The block's hash. /// The block's hash.
/// ///
/// [`Block::hash`]. /// [`Block::hash`].
@ -111,9 +93,7 @@ pub struct VerifiedBlockInformation {
//---------------------------------------------------------------------------------------------------- OutputOnChain //---------------------------------------------------------------------------------------------------- OutputOnChain
/// An already existing transaction output. /// An already existing transaction output.
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
// #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] // FIXME: monero_serai
// #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
pub struct OutputOnChain { pub struct OutputOnChain {
/// The block height this output belongs to. /// The block height this output belongs to.
pub height: u64, pub height: u64,