mirror of
https://github.com/Cuprate/cuprate.git
synced 2024-12-23 03:59:31 +00:00
Merge branch 'main' into cuprated-blockchain
This commit is contained in:
commit
20033eedb0
146 changed files with 3898 additions and 2161 deletions
267
Cargo.lock
generated
267
Cargo.lock
generated
|
@ -17,6 +17,12 @@ version = "1.0.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.11"
|
||||
|
@ -44,21 +50,6 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
"anstyle-query",
|
||||
"anstyle-wincon",
|
||||
"colorchoice",
|
||||
"is_terminal_polyfill",
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.7"
|
||||
|
@ -66,32 +57,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.4"
|
||||
name = "anyhow"
|
||||
version = "1.0.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-query"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-wincon"
|
||||
version = "3.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8"
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
|
@ -203,7 +172,7 @@ dependencies = [
|
|||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"miniz_oxide 0.7.3",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
@ -395,10 +364,8 @@ version = "4.5.7"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -419,12 +386,6 @@ version = "0.7.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422"
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
version = "0.9.4"
|
||||
|
@ -697,6 +658,7 @@ version = "0.5.0"
|
|||
dependencies = [
|
||||
"bytes",
|
||||
"cuprate-fixed-bytes",
|
||||
"cuprate-helper",
|
||||
"hex",
|
||||
"paste",
|
||||
"ref-cast",
|
||||
|
@ -764,6 +726,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"bytes",
|
||||
"cuprate-helper",
|
||||
"futures",
|
||||
"proptest",
|
||||
"rand",
|
||||
|
@ -842,11 +805,13 @@ dependencies = [
|
|||
name = "cuprate-rpc-interface"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"cuprate-epee-encoding",
|
||||
"cuprate-helper",
|
||||
"cuprate-json-rpc",
|
||||
"cuprate-rpc-types",
|
||||
"cuprate-test-utils",
|
||||
"futures",
|
||||
"paste",
|
||||
"serde",
|
||||
|
@ -862,12 +827,9 @@ version = "0.0.0"
|
|||
dependencies = [
|
||||
"cuprate-epee-encoding",
|
||||
"cuprate-fixed-bytes",
|
||||
"cuprate-json-rpc",
|
||||
"cuprate-test-utils",
|
||||
"cuprate-types",
|
||||
"monero-serai",
|
||||
"paste",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
@ -901,12 +863,29 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "cuprate-txpool"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"bytemuck",
|
||||
"cuprate-database",
|
||||
"cuprate-database-service",
|
||||
"cuprate-helper",
|
||||
"cuprate-test-utils",
|
||||
"cuprate-types",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"monero-serai",
|
||||
"rayon",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tower",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cuprate-types"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"bytes",
|
||||
"cuprate-epee-encoding",
|
||||
"cuprate-fixed-bytes",
|
||||
|
@ -926,6 +905,7 @@ dependencies = [
|
|||
"bytes",
|
||||
"cuprate-epee-encoding",
|
||||
"cuprate-fixed-bytes",
|
||||
"cuprate-helper",
|
||||
"cuprate-levin",
|
||||
"cuprate-types",
|
||||
"hex",
|
||||
|
@ -936,18 +916,63 @@ dependencies = [
|
|||
name = "cuprated"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"bitflags 2.5.0",
|
||||
"borsh",
|
||||
"bytemuck",
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"chrono",
|
||||
"clap",
|
||||
"crossbeam",
|
||||
"crypto-bigint",
|
||||
"cuprate-address-book",
|
||||
"cuprate-async-buffer",
|
||||
"cuprate-blockchain",
|
||||
"cuprate-consensus",
|
||||
"cuprate-consensus-rules",
|
||||
"cuprate-cryptonight",
|
||||
"cuprate-dandelion-tower",
|
||||
"cuprate-database",
|
||||
"cuprate-database-service",
|
||||
"cuprate-epee-encoding",
|
||||
"cuprate-fast-sync",
|
||||
"cuprate-fixed-bytes",
|
||||
"cuprate-helper",
|
||||
"cuprate-json-rpc",
|
||||
"cuprate-levin",
|
||||
"cuprate-p2p",
|
||||
"cuprate-p2p-core",
|
||||
"cuprate-pruning",
|
||||
"cuprate-rpc-interface",
|
||||
"cuprate-rpc-types",
|
||||
"cuprate-test-utils",
|
||||
"cuprate-txpool",
|
||||
"cuprate-types",
|
||||
"cuprate-wire",
|
||||
"curve25519-dalek",
|
||||
"dashmap",
|
||||
"dirs",
|
||||
"futures",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"indexmap",
|
||||
"monero-serai",
|
||||
"paste",
|
||||
"pin-project",
|
||||
"rand",
|
||||
"rand_distr",
|
||||
"randomx-rs",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"thread_local",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
|
@ -985,7 +1010,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "dalek-ff-group"
|
||||
version = "0.4.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"curve25519-dalek",
|
||||
|
@ -1129,18 +1154,18 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
|
|||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.30"
|
||||
version = "1.0.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae"
|
||||
checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"miniz_oxide",
|
||||
"miniz_oxide 0.8.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flexible-transcript"
|
||||
version = "0.3.2"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"digest",
|
||||
|
@ -1288,9 +1313,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.4.5"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab"
|
||||
checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
|
@ -1644,12 +1669,6 @@ dependencies = [
|
|||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is_terminal_polyfill"
|
||||
version = "1.70.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.11"
|
||||
|
@ -1790,6 +1809,15 @@ dependencies = [
|
|||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.11"
|
||||
|
@ -1804,7 +1832,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-address"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-io",
|
||||
|
@ -1817,7 +1845,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-borromean"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -1830,7 +1858,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-bulletproofs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -1845,7 +1873,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-clsag"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
|
@ -1865,7 +1893,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-generators"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dalek-ff-group",
|
||||
|
@ -1879,7 +1907,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-io"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"std-shims",
|
||||
|
@ -1888,7 +1916,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-mlsag"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -1902,7 +1930,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-primitives"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"monero-generators",
|
||||
|
@ -1915,7 +1943,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-rpc"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"curve25519-dalek",
|
||||
|
@ -1932,7 +1960,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-serai"
|
||||
version = "0.1.4-alpha"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"hex-literal",
|
||||
|
@ -1950,7 +1978,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "monero-simple-request-rpc"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"digest_auth",
|
||||
|
@ -1960,16 +1988,6 @@ dependencies = [
|
|||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.19"
|
||||
|
@ -2017,12 +2035,6 @@ version = "0.2.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.6.0"
|
||||
|
@ -2468,9 +2480,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rustls-pki-types"
|
||||
version = "1.7.0"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d"
|
||||
checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
|
@ -2560,6 +2572,15 @@ dependencies = [
|
|||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_bytes"
|
||||
version = "0.11.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.203"
|
||||
|
@ -2625,15 +2646,6 @@ dependencies = [
|
|||
"keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.2"
|
||||
|
@ -2646,7 +2658,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "simple-request"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
|
@ -2702,18 +2714,12 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
|||
[[package]]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8"
|
||||
source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
"spin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.5.0"
|
||||
|
@ -3001,18 +3007,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"valuable",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
|
||||
dependencies = [
|
||||
"log",
|
||||
"once_cell",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3021,12 +3015,7 @@ version = "0.3.18"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
|
||||
dependencies = [
|
||||
"nu-ansi-term",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3061,9 +3050,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
|
|||
|
||||
[[package]]
|
||||
name = "ureq"
|
||||
version = "2.10.0"
|
||||
version = "2.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea"
|
||||
checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"flate2",
|
||||
|
@ -3100,18 +3089,6 @@ version = "1.0.4"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "valuable"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
|
@ -3198,9 +3175,9 @@ checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
|
|||
|
||||
[[package]]
|
||||
name = "webpki-roots"
|
||||
version = "0.26.3"
|
||||
version = "0.26.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd"
|
||||
checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a"
|
||||
dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
|
11
Cargo.toml
11
Cargo.toml
|
@ -48,6 +48,7 @@ opt-level = 1
|
|||
opt-level = 3
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = { version = "1.0.87", default-features = false }
|
||||
async-trait = { version = "0.1.74", default-features = false }
|
||||
bitflags = { version = "2.4.2", default-features = false }
|
||||
borsh = { version = "1.2.1", default-features = false }
|
||||
|
@ -76,7 +77,7 @@ serde_bytes = { version = "0.11.12", default-features = false }
|
|||
serde_json = { version = "1.0.108", default-features = false }
|
||||
serde = { version = "1.0.190", default-features = false }
|
||||
thiserror = { version = "1.0.50", default-features = false }
|
||||
thread_local = { version = "1.1.7", default-features = false }
|
||||
thread_local = { version = "1.1.7", default-features = false }
|
||||
tokio-util = { version = "0.7.10", default-features = false }
|
||||
tokio-stream = { version = "0.1.14", default-features = false }
|
||||
tokio = { version = "1.33.0", default-features = false }
|
||||
|
@ -262,6 +263,7 @@ empty_structs_with_brackets = "deny"
|
|||
empty_enum_variants_with_brackets = "deny"
|
||||
empty_drop = "deny"
|
||||
clone_on_ref_ptr = "deny"
|
||||
upper_case_acronyms = "deny"
|
||||
|
||||
# Hot
|
||||
# inline_always = "deny"
|
||||
|
@ -278,13 +280,15 @@ clone_on_ref_ptr = "deny"
|
|||
# allow_attributes_without_reason = "deny"
|
||||
# missing_assert_message = "deny"
|
||||
# missing_docs_in_private_items = "deny"
|
||||
# undocumented_unsafe_blocks = "deny"
|
||||
undocumented_unsafe_blocks = "deny"
|
||||
# multiple_unsafe_ops_per_block = "deny"
|
||||
# single_char_lifetime_names = "deny"
|
||||
# wildcard_enum_match_arm = "deny"
|
||||
|
||||
[workspace.lints.rust]
|
||||
# Cold
|
||||
future_incompatible = { level = "deny", priority = -1 }
|
||||
nonstandard_style = { level = "deny", priority = -1 }
|
||||
absolute_paths_not_starting_with_crate = "deny"
|
||||
explicit_outlives_requirements = "deny"
|
||||
keyword_idents_2018 = "deny"
|
||||
|
@ -305,10 +309,11 @@ ambiguous_glob_imports = "deny"
|
|||
unused_unsafe = "deny"
|
||||
|
||||
# Warm
|
||||
let_underscore_drop = "deny"
|
||||
let_underscore = { level = "deny", priority = -1 }
|
||||
unreachable_pub = "deny"
|
||||
unused_qualifications = "deny"
|
||||
variant_size_differences = "deny"
|
||||
non_camel_case_types = "deny"
|
||||
|
||||
# Hot
|
||||
# unused_results = "deny"
|
||||
|
|
|
@ -8,23 +8,69 @@ authors = ["Boog900", "hinto-janai", "SyntheticBird45"]
|
|||
repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated"
|
||||
|
||||
[dependencies]
|
||||
cuprate-consensus = { path = "../../consensus" }
|
||||
cuprate-blockchain = { path = "../../storage/blockchain" }
|
||||
cuprate-p2p = { path = "../../p2p/p2p" }
|
||||
cuprate-p2p-core = { path = "../../p2p/p2p-core" }
|
||||
cuprate-types = { path = "../../types" }
|
||||
cuprate-cryptonight = { path = "../../cryptonight" }
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
cuprate-consensus = { path = "../../consensus" }
|
||||
cuprate-fast-sync = { path = "../../consensus/fast-sync" }
|
||||
cuprate-consensus-rules = { path = "../../consensus/rules" }
|
||||
cuprate-cryptonight = { path = "../../cryptonight" }
|
||||
cuprate-helper = { path = "../../helper" }
|
||||
cuprate-epee-encoding = { path = "../../net/epee-encoding" }
|
||||
cuprate-fixed-bytes = { path = "../../net/fixed-bytes" }
|
||||
cuprate-levin = { path = "../../net/levin" }
|
||||
cuprate-wire = { path = "../../net/wire" }
|
||||
cuprate-p2p = { path = "../../p2p/p2p" }
|
||||
cuprate-p2p-core = { path = "../../p2p/p2p-core" }
|
||||
cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower" }
|
||||
cuprate-async-buffer = { path = "../../p2p/async-buffer" }
|
||||
cuprate-address-book = { path = "../../p2p/address-book" }
|
||||
cuprate-blockchain = { path = "../../storage/blockchain" }
|
||||
cuprate-database-service = { path = "../../storage/service" }
|
||||
cuprate-txpool = { path = "../../storage/txpool" }
|
||||
cuprate-database = { path = "../../storage/database" }
|
||||
cuprate-pruning = { path = "../../pruning" }
|
||||
cuprate-test-utils = { path = "../../test-utils" }
|
||||
cuprate-types = { path = "../../types" }
|
||||
cuprate-json-rpc = { path = "../../rpc/json-rpc" }
|
||||
cuprate-rpc-interface = { path = "../../rpc/interface" }
|
||||
cuprate-rpc-types = { path = "../../rpc/types" }
|
||||
|
||||
rayon = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
tower = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
|
||||
clap = { workspace = true, features = ["default", "derive"] }
|
||||
tracing-subscriber = { workspace = true, features = ["default"] }
|
||||
# TODO: after v1.0.0, remove unneeded dependencies.
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
borsh = { workspace = true }
|
||||
bytemuck = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
cfg-if = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
crypto-bigint = { workspace = true }
|
||||
crossbeam = { workspace = true }
|
||||
curve25519-dalek = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
hex-literal = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
monero-serai = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
pin-project = { workspace = true }
|
||||
randomx-rs = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rand_distr = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
serde_bytes = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
thread_local = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
#[lints]
|
||||
#workspace = true
|
||||
|
|
2
binaries/cuprated/README.md
Normal file
2
binaries/cuprated/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# `cuprated`
|
||||
TODO
|
|
@ -1,3 +1,16 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![allow(
|
||||
unused_imports,
|
||||
unreachable_pub,
|
||||
unused_crate_dependencies,
|
||||
dead_code,
|
||||
unused_variables,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::unused_async,
|
||||
reason = "TODO: remove after v1.0.0"
|
||||
)]
|
||||
|
||||
use tokio::runtime::Runtime;
|
||||
use tracing::Level;
|
||||
|
||||
|
|
|
@ -2,4 +2,9 @@
|
|||
//!
|
||||
//! Will contain the code to initiate the RPC and a request handler.
|
||||
|
||||
mod request_handler;
|
||||
mod bin;
|
||||
mod handler;
|
||||
mod json;
|
||||
mod other;
|
||||
|
||||
pub use handler::{CupratedRpcHandler, CupratedRpcHandlerState};
|
||||
|
|
85
binaries/cuprated/src/rpc/bin.rs
Normal file
85
binaries/cuprated/src/rpc/bin.rs
Normal file
|
@ -0,0 +1,85 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::{
|
||||
bin::{
|
||||
BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksByHeightResponse,
|
||||
GetBlocksRequest, GetBlocksResponse, GetHashesRequest, GetHashesResponse,
|
||||
GetOutputIndexesRequest, GetOutputIndexesResponse, GetOutsRequest, GetOutsResponse,
|
||||
GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
},
|
||||
json::{GetOutputDistributionRequest, GetOutputDistributionResponse},
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandlerState;
|
||||
|
||||
/// Map a [`BinRequest`] to the function that will lead to a [`BinResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: BinRequest,
|
||||
) -> Result<BinResponse, Error> {
|
||||
use BinRequest as Req;
|
||||
use BinResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlocks(r) => Resp::GetBlocks(get_blocks(state, r).await?),
|
||||
Req::GetBlocksByHeight(r) => Resp::GetBlocksByHeight(get_blocks_by_height(state, r).await?),
|
||||
Req::GetHashes(r) => Resp::GetHashes(get_hashes(state, r).await?),
|
||||
Req::GetOutputIndexes(r) => Resp::GetOutputIndexes(get_output_indexes(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetOutputDistribution(r) => {
|
||||
Resp::GetOutputDistribution(get_output_distribution(state, r).await?)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_blocks(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlocksRequest,
|
||||
) -> Result<GetBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_blocks_by_height(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlocksByHeightRequest,
|
||||
) -> Result<GetBlocksByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_hashes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetHashesRequest,
|
||||
) -> Result<GetHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_indexes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetOutputIndexesRequest,
|
||||
) -> Result<GetOutputIndexesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_distribution(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetOutputDistributionRequest,
|
||||
) -> Result<GetOutputDistributionResponse, Error> {
|
||||
todo!()
|
||||
}
|
103
binaries/cuprated/src/rpc/handler.rs
Normal file
103
binaries/cuprated/src/rpc/handler.rs
Normal file
|
@ -0,0 +1,103 @@
|
|||
//! Dummy implementation of [`RpcHandler`].
|
||||
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::{channel::oneshot::channel, future::BoxFuture};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_blockchain::service::BlockchainReadHandle;
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
use cuprate_json_rpc::Id;
|
||||
use cuprate_rpc_interface::RpcHandler;
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
use cuprate_txpool::service::TxpoolReadHandle;
|
||||
|
||||
use crate::rpc::{bin, json, other};
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
pub struct CupratedRpcHandler {
|
||||
/// Should this RPC server be [restricted](RpcHandler::restricted)?
|
||||
//
|
||||
// INVARIANT:
|
||||
// We don't need to include this in `state` and check for
|
||||
// `self.is_restricted()` because `cuprate-rpc-interface` handles that.
|
||||
pub restricted: bool,
|
||||
|
||||
/// State needed for request -> response mapping.
|
||||
pub state: CupratedRpcHandlerState,
|
||||
}
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
pub struct CupratedRpcHandlerState {
|
||||
/// Read handle to the blockchain database.
|
||||
pub blockchain: BlockchainReadHandle,
|
||||
|
||||
/// Read handle to the transaction pool database.
|
||||
pub txpool: TxpoolReadHandle,
|
||||
}
|
||||
|
||||
impl CupratedRpcHandler {
|
||||
/// TODO
|
||||
pub fn init() {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcHandler for CupratedRpcHandler {
|
||||
fn restricted(&self) -> bool {
|
||||
self.restricted
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<JsonRpcRequest> for CupratedRpcHandler {
|
||||
type Response = JsonRpcResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<JsonRpcResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: JsonRpcRequest) -> Self::Future {
|
||||
let state = CupratedRpcHandlerState::clone(&self.state);
|
||||
Box::pin(json::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<BinRequest> for CupratedRpcHandler {
|
||||
type Response = BinResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<BinResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: BinRequest) -> Self::Future {
|
||||
let state = CupratedRpcHandlerState::clone(&self.state);
|
||||
Box::pin(bin::map_request(state, request))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<OtherRequest> for CupratedRpcHandler {
|
||||
type Response = OtherResponse;
|
||||
type Error = Error;
|
||||
type Future = BoxFuture<'static, Result<OtherResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, request: OtherRequest) -> Self::Future {
|
||||
let state = CupratedRpcHandlerState::clone(&self.state);
|
||||
Box::pin(other::map_request(state, request))
|
||||
}
|
||||
}
|
294
binaries/cuprated/src/rpc/json.rs
Normal file
294
binaries/cuprated/src/rpc/json.rs
Normal file
|
@ -0,0 +1,294 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_rpc_types::json::{
|
||||
AddAuxPowRequest, AddAuxPowResponse, BannedRequest, BannedResponse, CalcPowRequest,
|
||||
CalcPowResponse, FlushCacheRequest, FlushCacheResponse, FlushTransactionPoolRequest,
|
||||
FlushTransactionPoolResponse, GenerateBlocksRequest, GenerateBlocksResponse,
|
||||
GetAlternateChainsRequest, GetAlternateChainsResponse, GetBansRequest, GetBansResponse,
|
||||
GetBlockCountRequest, GetBlockCountResponse, GetBlockHeaderByHashRequest,
|
||||
GetBlockHeaderByHashResponse, GetBlockHeaderByHeightRequest, GetBlockHeaderByHeightResponse,
|
||||
GetBlockHeadersRangeRequest, GetBlockHeadersRangeResponse, GetBlockRequest, GetBlockResponse,
|
||||
GetCoinbaseTxSumRequest, GetCoinbaseTxSumResponse, GetConnectionsRequest,
|
||||
GetConnectionsResponse, GetFeeEstimateRequest, GetFeeEstimateResponse, GetInfoRequest,
|
||||
GetInfoResponse, GetLastBlockHeaderRequest, GetLastBlockHeaderResponse, GetMinerDataRequest,
|
||||
GetMinerDataResponse, GetOutputHistogramRequest, GetOutputHistogramResponse,
|
||||
GetTransactionPoolBacklogRequest, GetTransactionPoolBacklogResponse, GetTxIdsLooseRequest,
|
||||
GetTxIdsLooseResponse, GetVersionRequest, GetVersionResponse, HardForkInfoRequest,
|
||||
HardForkInfoResponse, JsonRpcRequest, JsonRpcResponse, OnGetBlockHashRequest,
|
||||
OnGetBlockHashResponse, PruneBlockchainRequest, PruneBlockchainResponse, RelayTxRequest,
|
||||
RelayTxResponse, SetBansRequest, SetBansResponse, SubmitBlockRequest, SubmitBlockResponse,
|
||||
SyncInfoRequest, SyncInfoResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandlerState;
|
||||
|
||||
/// Map a [`JsonRpcRequest`] to the function that will lead to a [`JsonRpcResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: JsonRpcRequest,
|
||||
) -> Result<JsonRpcResponse, Error> {
|
||||
use JsonRpcRequest as Req;
|
||||
use JsonRpcResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetBlockCount(r) => Resp::GetBlockCount(get_block_count(state, r).await?),
|
||||
Req::OnGetBlockHash(r) => Resp::OnGetBlockHash(on_get_block_hash(state, r).await?),
|
||||
Req::SubmitBlock(r) => Resp::SubmitBlock(submit_block(state, r).await?),
|
||||
Req::GenerateBlocks(r) => Resp::GenerateBlocks(generate_blocks(state, r).await?),
|
||||
Req::GetLastBlockHeader(r) => {
|
||||
Resp::GetLastBlockHeader(get_last_block_header(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHash(r) => {
|
||||
Resp::GetBlockHeaderByHash(get_block_header_by_hash(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeaderByHeight(r) => {
|
||||
Resp::GetBlockHeaderByHeight(get_block_header_by_height(state, r).await?)
|
||||
}
|
||||
Req::GetBlockHeadersRange(r) => {
|
||||
Resp::GetBlockHeadersRange(get_block_headers_range(state, r).await?)
|
||||
}
|
||||
Req::GetBlock(r) => Resp::GetBlock(get_block(state, r).await?),
|
||||
Req::GetConnections(r) => Resp::GetConnections(get_connections(state, r).await?),
|
||||
Req::GetInfo(r) => Resp::GetInfo(get_info(state, r).await?),
|
||||
Req::HardForkInfo(r) => Resp::HardForkInfo(hard_fork_info(state, r).await?),
|
||||
Req::SetBans(r) => Resp::SetBans(set_bans(state, r).await?),
|
||||
Req::GetBans(r) => Resp::GetBans(get_bans(state, r).await?),
|
||||
Req::Banned(r) => Resp::Banned(banned(state, r).await?),
|
||||
Req::FlushTransactionPool(r) => {
|
||||
Resp::FlushTransactionPool(flush_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetOutputHistogram(r) => {
|
||||
Resp::GetOutputHistogram(get_output_histogram(state, r).await?)
|
||||
}
|
||||
Req::GetCoinbaseTxSum(r) => Resp::GetCoinbaseTxSum(get_coinbase_tx_sum(state, r).await?),
|
||||
Req::GetVersion(r) => Resp::GetVersion(get_version(state, r).await?),
|
||||
Req::GetFeeEstimate(r) => Resp::GetFeeEstimate(get_fee_estimate(state, r).await?),
|
||||
Req::GetAlternateChains(r) => {
|
||||
Resp::GetAlternateChains(get_alternate_chains(state, r).await?)
|
||||
}
|
||||
Req::RelayTx(r) => Resp::RelayTx(relay_tx(state, r).await?),
|
||||
Req::SyncInfo(r) => Resp::SyncInfo(sync_info(state, r).await?),
|
||||
Req::GetTransactionPoolBacklog(r) => {
|
||||
Resp::GetTransactionPoolBacklog(get_transaction_pool_backlog(state, r).await?)
|
||||
}
|
||||
Req::GetMinerData(r) => Resp::GetMinerData(get_miner_data(state, r).await?),
|
||||
Req::PruneBlockchain(r) => Resp::PruneBlockchain(prune_blockchain(state, r).await?),
|
||||
Req::CalcPow(r) => Resp::CalcPow(calc_pow(state, r).await?),
|
||||
Req::FlushCache(r) => Resp::FlushCache(flush_cache(state, r).await?),
|
||||
Req::AddAuxPow(r) => Resp::AddAuxPow(add_aux_pow(state, r).await?),
|
||||
Req::GetTxIdsLoose(r) => Resp::GetTxIdsLoose(get_tx_ids_loose(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_block_count(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlockCountRequest,
|
||||
) -> Result<GetBlockCountResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_get_block_hash(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: OnGetBlockHashRequest,
|
||||
) -> Result<OnGetBlockHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn submit_block(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SubmitBlockRequest,
|
||||
) -> Result<SubmitBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn generate_blocks(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GenerateBlocksRequest,
|
||||
) -> Result<GenerateBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_last_block_header(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetLastBlockHeaderRequest,
|
||||
) -> Result<GetLastBlockHeaderResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_hash(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlockHeaderByHashRequest,
|
||||
) -> Result<GetBlockHeaderByHashResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_header_by_height(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlockHeaderByHeightRequest,
|
||||
) -> Result<GetBlockHeaderByHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block_headers_range(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlockHeadersRangeRequest,
|
||||
) -> Result<GetBlockHeadersRangeResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_block(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBlockRequest,
|
||||
) -> Result<GetBlockResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_connections(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetConnectionsRequest,
|
||||
) -> Result<GetConnectionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_info(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetInfoRequest,
|
||||
) -> Result<GetInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn hard_fork_info(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: HardForkInfoRequest,
|
||||
) -> Result<HardForkInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bans(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetBansRequest,
|
||||
) -> Result<SetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_bans(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetBansRequest,
|
||||
) -> Result<GetBansResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn banned(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: BannedRequest,
|
||||
) -> Result<BannedResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_transaction_pool(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: FlushTransactionPoolRequest,
|
||||
) -> Result<FlushTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_output_histogram(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetOutputHistogramRequest,
|
||||
) -> Result<GetOutputHistogramResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_coinbase_tx_sum(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetCoinbaseTxSumRequest,
|
||||
) -> Result<GetCoinbaseTxSumResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_version(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetVersionRequest,
|
||||
) -> Result<GetVersionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_fee_estimate(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetFeeEstimateRequest,
|
||||
) -> Result<GetFeeEstimateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alternate_chains(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetAlternateChainsRequest,
|
||||
) -> Result<GetAlternateChainsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn relay_tx(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: RelayTxRequest,
|
||||
) -> Result<RelayTxResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn sync_info(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SyncInfoRequest,
|
||||
) -> Result<SyncInfoResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_backlog(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionPoolBacklogRequest,
|
||||
) -> Result<GetTransactionPoolBacklogResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_miner_data(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetMinerDataRequest,
|
||||
) -> Result<GetMinerDataResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn prune_blockchain(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: PruneBlockchainRequest,
|
||||
) -> Result<PruneBlockchainResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn calc_pow(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: CalcPowRequest,
|
||||
) -> Result<CalcPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn flush_cache(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: FlushCacheRequest,
|
||||
) -> Result<FlushCacheResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn add_aux_pow(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: AddAuxPowRequest,
|
||||
) -> Result<AddAuxPowResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_tx_ids_loose(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTxIdsLooseRequest,
|
||||
) -> Result<GetTxIdsLooseResponse, Error> {
|
||||
todo!()
|
||||
}
|
260
binaries/cuprated/src/rpc/other.rs
Normal file
260
binaries/cuprated/src/rpc/other.rs
Normal file
|
@ -0,0 +1,260 @@
|
|||
use anyhow::Error;
|
||||
|
||||
use cuprate_rpc_types::other::{
|
||||
GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse,
|
||||
GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest,
|
||||
GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest,
|
||||
GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse,
|
||||
GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest,
|
||||
GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse,
|
||||
InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse,
|
||||
MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest,
|
||||
OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse,
|
||||
SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest,
|
||||
SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest,
|
||||
SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, SetLogLevelRequest,
|
||||
SetLogLevelResponse, StartMiningRequest, StartMiningResponse, StopDaemonRequest,
|
||||
StopDaemonResponse, StopMiningRequest, StopMiningResponse, UpdateRequest, UpdateResponse,
|
||||
};
|
||||
|
||||
use crate::rpc::CupratedRpcHandlerState;
|
||||
|
||||
/// Map a [`OtherRequest`] to the function that will lead to a [`OtherResponse`].
|
||||
pub(super) async fn map_request(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: OtherRequest,
|
||||
) -> Result<OtherResponse, Error> {
|
||||
use OtherRequest as Req;
|
||||
use OtherResponse as Resp;
|
||||
|
||||
Ok(match request {
|
||||
Req::GetHeight(r) => Resp::GetHeight(get_height(state, r).await?),
|
||||
Req::GetTransactions(r) => Resp::GetTransactions(get_transactions(state, r).await?),
|
||||
Req::GetAltBlocksHashes(r) => {
|
||||
Resp::GetAltBlocksHashes(get_alt_blocks_hashes(state, r).await?)
|
||||
}
|
||||
Req::IsKeyImageSpent(r) => Resp::IsKeyImageSpent(is_key_image_spent(state, r).await?),
|
||||
Req::SendRawTransaction(r) => {
|
||||
Resp::SendRawTransaction(send_raw_transaction(state, r).await?)
|
||||
}
|
||||
Req::StartMining(r) => Resp::StartMining(start_mining(state, r).await?),
|
||||
Req::StopMining(r) => Resp::StopMining(stop_mining(state, r).await?),
|
||||
Req::MiningStatus(r) => Resp::MiningStatus(mining_status(state, r).await?),
|
||||
Req::SaveBc(r) => Resp::SaveBc(save_bc(state, r).await?),
|
||||
Req::GetPeerList(r) => Resp::GetPeerList(get_peer_list(state, r).await?),
|
||||
Req::SetLogHashRate(r) => Resp::SetLogHashRate(set_log_hash_rate(state, r).await?),
|
||||
Req::SetLogLevel(r) => Resp::SetLogLevel(set_log_level(state, r).await?),
|
||||
Req::SetLogCategories(r) => Resp::SetLogCategories(set_log_categories(state, r).await?),
|
||||
Req::SetBootstrapDaemon(r) => {
|
||||
Resp::SetBootstrapDaemon(set_bootstrap_daemon(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPool(r) => {
|
||||
Resp::GetTransactionPool(get_transaction_pool(state, r).await?)
|
||||
}
|
||||
Req::GetTransactionPoolStats(r) => {
|
||||
Resp::GetTransactionPoolStats(get_transaction_pool_stats(state, r).await?)
|
||||
}
|
||||
Req::StopDaemon(r) => Resp::StopDaemon(stop_daemon(state, r).await?),
|
||||
Req::GetLimit(r) => Resp::GetLimit(get_limit(state, r).await?),
|
||||
Req::SetLimit(r) => Resp::SetLimit(set_limit(state, r).await?),
|
||||
Req::OutPeers(r) => Resp::OutPeers(out_peers(state, r).await?),
|
||||
Req::InPeers(r) => Resp::InPeers(in_peers(state, r).await?),
|
||||
Req::GetNetStats(r) => Resp::GetNetStats(get_net_stats(state, r).await?),
|
||||
Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?),
|
||||
Req::Update(r) => Resp::Update(update(state, r).await?),
|
||||
Req::PopBlocks(r) => Resp::PopBlocks(pop_blocks(state, r).await?),
|
||||
Req::GetTransactionPoolHashes(r) => {
|
||||
Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?)
|
||||
}
|
||||
Req::GetPublicNodes(r) => Resp::GetPublicNodes(get_public_nodes(state, r).await?),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_height(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetHeightRequest,
|
||||
) -> Result<GetHeightResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transactions(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionsRequest,
|
||||
) -> Result<GetTransactionsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_alt_blocks_hashes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetAltBlocksHashesRequest,
|
||||
) -> Result<GetAltBlocksHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn is_key_image_spent(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: IsKeyImageSpentRequest,
|
||||
) -> Result<IsKeyImageSpentResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn send_raw_transaction(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SendRawTransactionRequest,
|
||||
) -> Result<SendRawTransactionResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_mining(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: StartMiningRequest,
|
||||
) -> Result<StartMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_mining(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: StopMiningRequest,
|
||||
) -> Result<StopMiningResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn mining_status(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: MiningStatusRequest,
|
||||
) -> Result<MiningStatusResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn save_bc(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SaveBcRequest,
|
||||
) -> Result<SaveBcResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_peer_list(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetPeerListRequest,
|
||||
) -> Result<GetPeerListResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_hash_rate(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetLogHashRateRequest,
|
||||
) -> Result<SetLogHashRateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_level(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetLogLevelRequest,
|
||||
) -> Result<SetLogLevelResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_log_categories(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetLogCategoriesRequest,
|
||||
) -> Result<SetLogCategoriesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_bootstrap_daemon(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetBootstrapDaemonRequest,
|
||||
) -> Result<SetBootstrapDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionPoolRequest,
|
||||
) -> Result<GetTransactionPoolResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_stats(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionPoolStatsRequest,
|
||||
) -> Result<GetTransactionPoolStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_daemon(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: StopDaemonRequest,
|
||||
) -> Result<StopDaemonResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_limit(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetLimitRequest,
|
||||
) -> Result<GetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn set_limit(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: SetLimitRequest,
|
||||
) -> Result<SetLimitResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn out_peers(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: OutPeersRequest,
|
||||
) -> Result<OutPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn in_peers(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: InPeersRequest,
|
||||
) -> Result<InPeersResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_net_stats(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetNetStatsRequest,
|
||||
) -> Result<GetNetStatsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_outs(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetOutsRequest,
|
||||
) -> Result<GetOutsResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn update(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: UpdateRequest,
|
||||
) -> Result<UpdateResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn pop_blocks(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: PopBlocksRequest,
|
||||
) -> Result<PopBlocksResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_transaction_pool_hashes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetTransactionPoolHashesRequest,
|
||||
) -> Result<GetTransactionPoolHashesResponse, Error> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_public_nodes(
|
||||
state: CupratedRpcHandlerState,
|
||||
request: GetPublicNodesRequest,
|
||||
) -> Result<GetPublicNodesResponse, Error> {
|
||||
todo!()
|
||||
}
|
|
@ -93,17 +93,20 @@
|
|||
|
||||
---
|
||||
|
||||
- [⚪️ Resource model](resource-model/intro.md)
|
||||
- [⚪️ File system](resource-model/file-system.md)
|
||||
- [⚪️ Sockets](resource-model/sockets.md)
|
||||
- [⚪️ Memory](resource-model/memory.md)
|
||||
- [🟡 Concurrency and parallelism](resource-model/concurrency-and-parallelism/intro.md)
|
||||
- [⚪️ Map](resource-model/concurrency-and-parallelism/map.md)
|
||||
- [⚪️ The RPC server](resource-model/concurrency-and-parallelism/the-rpc-server.md)
|
||||
- [⚪️ The database](resource-model/concurrency-and-parallelism/the-database.md)
|
||||
- [⚪️ The block downloader](resource-model/concurrency-and-parallelism/the-block-downloader.md)
|
||||
- [⚪️ The verifier](resource-model/concurrency-and-parallelism/the-verifier.md)
|
||||
- [⚪️ Thread exit](resource-model/concurrency-and-parallelism/thread-exit.md)
|
||||
- [⚪️ Resources](resources/intro.md)
|
||||
- [⚪️ File system](resources/fs/intro.md)
|
||||
- [🟡 Index of PATHs](resources/fs/paths.md)
|
||||
- [⚪️ Sockets](resources/sockets/index.md)
|
||||
- [🔴 Index of ports](resources/sockets/ports.md)
|
||||
- [⚪️ Memory](resources/memory.md)
|
||||
- [🟡 Concurrency and parallelism](resources/cap/intro.md)
|
||||
- [⚪️ Map](resources/cap/map.md)
|
||||
- [⚪️ The RPC server](resources/cap/the-rpc-server.md)
|
||||
- [⚪️ The database](resources/cap/the-database.md)
|
||||
- [⚪️ The block downloader](resources/cap/the-block-downloader.md)
|
||||
- [⚪️ The verifier](resources/cap/the-verifier.md)
|
||||
- [⚪️ Thread exit](resources/cap/thread-exit.md)
|
||||
- [🔴 Index of threads](resources/cap/threads.md)
|
||||
|
||||
---
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
# ⚪️ Resource model
|
|
@ -1 +0,0 @@
|
|||
# ⚪️ Sockets
|
2
books/architecture/src/resources/cap/threads.md
Normal file
2
books/architecture/src/resources/cap/threads.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Index of threads
|
||||
This is an index of all of the system threads Cuprate actively uses.
|
87
books/architecture/src/resources/fs/paths.md
Normal file
87
books/architecture/src/resources/fs/paths.md
Normal file
|
@ -0,0 +1,87 @@
|
|||
# Index of PATHs
|
||||
This is an index of all of the filesystem PATHs Cuprate actively uses.
|
||||
|
||||
The [`cuprate_helper::fs`](https://doc.cuprate.org/cuprate_helper/fs/index.html)
|
||||
module defines the general locations used throughout Cuprate.
|
||||
|
||||
[`dirs`](https://docs.rs/dirs) is used internally, which follows
|
||||
the PATH standards/conventions on each OS Cuprate supports, i.e.:
|
||||
- the [XDG base directory](https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html) and the [XDG user directory](https://www.freedesktop.org/wiki/Software/xdg-user-dirs/) specifications on Linux
|
||||
- the [Known Folder](https://msdn.microsoft.com/en-us/library/windows/desktop/bb776911(v=vs.85).aspx) system on Windows
|
||||
- the [Standard Directories](https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html#//apple_ref/doc/uid/TP40010672-CH2-SW6) on macOS
|
||||
|
||||
## Cache
|
||||
Cuprate's cache directory.
|
||||
|
||||
| OS | PATH |
|
||||
|---------|-----------------------------------------|
|
||||
| Windows | `C:\Users\Alice\AppData\Local\Cuprate\` |
|
||||
| macOS | `/Users/Alice/Library/Caches/Cuprate/` |
|
||||
| Linux | `/home/alice/.cache/cuprate/` |
|
||||
|
||||
## Config
|
||||
Cuprate's config directory.
|
||||
|
||||
| OS | PATH |
|
||||
|---------|-----------------------------------------------------|
|
||||
| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` |
|
||||
| macOS | `/Users/Alice/Library/Application Support/Cuprate/` |
|
||||
| Linux | `/home/alice/.config/cuprate/` |
|
||||
|
||||
## Data
|
||||
Cuprate's data directory.
|
||||
|
||||
| OS | PATH |
|
||||
|---------|-----------------------------------------------------|
|
||||
| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` |
|
||||
| macOS | `/Users/Alice/Library/Application Support/Cuprate/` |
|
||||
| Linux | `/home/alice/.local/share/cuprate/` |
|
||||
|
||||
## Blockchain
|
||||
Cuprate's blockchain directory.
|
||||
|
||||
| OS | PATH |
|
||||
|---------|----------------------------------------------------------------|
|
||||
| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` |
|
||||
| macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` |
|
||||
| Linux | `/home/alice/.local/share/cuprate/blockchain/` |
|
||||
|
||||
## Transaction pool
|
||||
Cuprate's transaction pool directory.
|
||||
|
||||
| OS | PATH |
|
||||
|---------|------------------------------------------------------------|
|
||||
| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` |
|
||||
| macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` |
|
||||
| Linux | `/home/alice/.local/share/cuprate/txpool/` |
|
||||
|
||||
## Database
|
||||
Cuprate's database location/filenames depend on:
|
||||
|
||||
- Which database it is
|
||||
- Which backend is being used
|
||||
|
||||
---
|
||||
|
||||
`cuprate_blockchain` files are in the above mentioned `blockchain` folder.
|
||||
|
||||
`cuprate_txpool` files are in the above mentioned `txpool` folder.
|
||||
|
||||
---
|
||||
|
||||
If the `heed` backend is being used, these files will be created:
|
||||
|
||||
| Filename | Purpose |
|
||||
|------------|--------------------|
|
||||
| `data.mdb` | Main data file |
|
||||
| `lock.mdb` | Database lock file |
|
||||
|
||||
For example: `/home/alice/.local/share/cuprate/blockchain/lock.mdb`.
|
||||
|
||||
If the `redb` backend is being used, these files will be created:
|
||||
|
||||
| Filename | Purpose |
|
||||
|-------------|--------------------|
|
||||
| `data.redb` | Main data file |
|
||||
|
||||
For example: `/home/alice/.local/share/cuprate/txpool/data.redb`.
|
1
books/architecture/src/resources/intro.md
Normal file
1
books/architecture/src/resources/intro.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Resources
|
1
books/architecture/src/resources/sockets/index.md
Normal file
1
books/architecture/src/resources/sockets/index.md
Normal file
|
@ -0,0 +1 @@
|
|||
# Sockets
|
2
books/architecture/src/resources/sockets/ports.md
Normal file
2
books/architecture/src/resources/sockets/ports.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Index of ports
|
||||
This is an index of all of the network sockets Cuprate actively uses.
|
1
clippy.toml
Normal file
1
clippy.toml
Normal file
|
@ -0,0 +1 @@
|
|||
upper-case-acronyms-aggressive = true
|
|
@ -11,7 +11,7 @@ proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"]
|
|||
rayon = ["dep:rayon"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] }
|
||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] }
|
||||
cuprate-types = { path = "../../types", default-features = false }
|
||||
cuprate-cryptonight = {path = "../../cryptonight"}
|
||||
|
||||
|
|
|
@ -1,36 +1,27 @@
|
|||
use std::sync::OnceLock;
|
||||
|
||||
/// Decomposed amount table.
|
||||
///
|
||||
static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new();
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub fn decomposed_amounts() -> &'static [u64; 172] {
|
||||
DECOMPOSED_AMOUNTS.get_or_init(|| {
|
||||
[
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
||||
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
||||
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
|
||||
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
|
||||
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
|
||||
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
|
||||
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
|
||||
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
|
||||
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
|
||||
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
|
||||
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
|
||||
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
|
||||
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
|
||||
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
|
||||
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
|
||||
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
|
||||
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
|
||||
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
|
||||
10000000000000000000
|
||||
]
|
||||
})
|
||||
}
|
||||
/// Decomposed amount table.
|
||||
pub static DECOMPOSED_AMOUNTS: [u64; 172] = [
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
||||
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
||||
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000,
|
||||
10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
|
||||
100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000,
|
||||
1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000,
|
||||
10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000,
|
||||
100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000,
|
||||
1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000,
|
||||
10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000,
|
||||
100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000,
|
||||
1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000,
|
||||
10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000,
|
||||
100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000,
|
||||
1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000,
|
||||
10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000,
|
||||
100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000,
|
||||
1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000,
|
||||
10000000000000000000
|
||||
];
|
||||
|
||||
/// Checks that an output amount is decomposed.
|
||||
///
|
||||
|
@ -40,7 +31,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] {
|
|||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-amounts>
|
||||
#[inline]
|
||||
pub fn is_decomposed_amount(amount: &u64) -> bool {
|
||||
decomposed_amounts().binary_search(amount).is_ok()
|
||||
DECOMPOSED_AMOUNTS.binary_search(amount).is_ok()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -49,7 +40,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn decomposed_amounts_return_decomposed() {
|
||||
for amount in decomposed_amounts() {
|
||||
for amount in DECOMPOSED_AMOUNTS.iter() {
|
||||
assert!(is_decomposed_amount(amount))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,12 +9,14 @@ use proptest::{collection::vec, prelude::*};
|
|||
|
||||
use monero_serai::transaction::Output;
|
||||
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
|
||||
use super::*;
|
||||
use crate::decomposed_amount::decomposed_amounts;
|
||||
use crate::decomposed_amount::DECOMPOSED_AMOUNTS;
|
||||
|
||||
#[test]
|
||||
fn test_check_output_amount_v1() {
|
||||
for amount in decomposed_amounts() {
|
||||
for amount in DECOMPOSED_AMOUNTS.iter() {
|
||||
assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok())
|
||||
}
|
||||
|
||||
|
@ -164,7 +166,7 @@ prop_compose! {
|
|||
if timebased || lock_height > 500_000_000 {
|
||||
Timelock::Time(time_for_time_lock)
|
||||
} else {
|
||||
Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
Timelock::Block(u64_to_usize(lock_height))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +181,7 @@ prop_compose! {
|
|||
match ty {
|
||||
0 => Timelock::None,
|
||||
1 => Timelock::Time(time_for_time_lock),
|
||||
_ => Timelock::Block(usize::try_from(lock_height).unwrap())
|
||||
_ => Timelock::Block(u64_to_usize(lock_height))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ use cuprate_consensus_rules::{
|
|||
miner_tx::MinerTxError,
|
||||
ConsensusError,
|
||||
};
|
||||
use cuprate_helper::asynch::rayon_spawn_async;
|
||||
use cuprate_helper::{asynch::rayon_spawn_async, cast::u64_to_usize};
|
||||
use cuprate_types::{
|
||||
AltBlockInformation, Chain, ChainId, TransactionVerificationData,
|
||||
VerifiedTransactionInformation,
|
||||
|
@ -24,7 +24,7 @@ use crate::{
|
|||
block::{free::pull_ordered_transactions, PreparedBlock},
|
||||
context::{
|
||||
difficulty::DifficultyCache,
|
||||
rx_vms::RandomXVM,
|
||||
rx_vms::RandomXVm,
|
||||
weight::{self, BlockWeightsCache},
|
||||
AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
|
||||
},
|
||||
|
@ -101,7 +101,7 @@ where
|
|||
|
||||
// Check the alt block timestamp is in the correct range.
|
||||
if let Some(median_timestamp) =
|
||||
difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap())
|
||||
difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW))
|
||||
{
|
||||
check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?
|
||||
};
|
||||
|
@ -195,7 +195,7 @@ async fn alt_rx_vm<C>(
|
|||
parent_chain: Chain,
|
||||
alt_chain_context: &mut AltChainContextCache,
|
||||
context_svc: C,
|
||||
) -> Result<Option<Arc<RandomXVM>>, ExtendedConsensusError>
|
||||
) -> Result<Option<Arc<RandomXVm>>, ExtendedConsensusError>
|
||||
where
|
||||
C: Service<
|
||||
BlockChainContextRequest,
|
||||
|
|
|
@ -15,7 +15,7 @@ use cuprate_helper::asynch::rayon_spawn_async;
|
|||
|
||||
use crate::{
|
||||
block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow},
|
||||
context::rx_vms::RandomXVM,
|
||||
context::rx_vms::RandomXVm,
|
||||
transactions::new_tx_verification_data,
|
||||
BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError,
|
||||
VerifyBlockResponse,
|
||||
|
@ -148,7 +148,7 @@ where
|
|||
tracing::debug!("New randomX seed in batch, initialising VM");
|
||||
|
||||
let new_vm = rayon_spawn_async(move || {
|
||||
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
|
||||
Arc::new(RandomXVm::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
|
||||
})
|
||||
.await;
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ mod tokens;
|
|||
|
||||
use cuprate_types::Chain;
|
||||
use difficulty::DifficultyCache;
|
||||
use rx_vms::RandomXVM;
|
||||
use rx_vms::RandomXVm;
|
||||
use weight::BlockWeightsCache;
|
||||
|
||||
pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache};
|
||||
|
@ -236,7 +236,7 @@ pub enum BlockChainContextRequest {
|
|||
/// seed.
|
||||
///
|
||||
/// This should include the seed used to init this VM and the VM.
|
||||
NewRXVM(([u8; 32], Arc<RandomXVM>)),
|
||||
NewRXVM(([u8; 32], Arc<RandomXVm>)),
|
||||
/// A request to add a new block to the cache.
|
||||
Update(NewBlockData),
|
||||
/// Pop blocks from the cache to the specified height.
|
||||
|
@ -313,7 +313,7 @@ pub enum BlockChainContextResponse {
|
|||
/// Blockchain context response.
|
||||
Context(BlockChainContext),
|
||||
/// A map of seed height to RandomX VMs.
|
||||
RxVms(HashMap<usize, Arc<RandomXVM>>),
|
||||
RxVms(HashMap<usize, Arc<RandomXVm>>),
|
||||
/// A list of difficulties.
|
||||
BatchDifficulties(Vec<u128>),
|
||||
/// An alt chain context cache.
|
||||
|
@ -321,7 +321,7 @@ pub enum BlockChainContextResponse {
|
|||
/// A difficulty cache for an alt chain.
|
||||
AltChainDifficultyCache(DifficultyCache),
|
||||
/// A randomX VM for an alt chain.
|
||||
AltChainRxVM(Arc<RandomXVM>),
|
||||
AltChainRxVM(Arc<RandomXVm>),
|
||||
/// A weight cache for an alt chain
|
||||
AltChainWeightCache(BlockWeightsCache),
|
||||
/// A generic Ok response.
|
||||
|
|
|
@ -11,7 +11,7 @@ use cuprate_types::{
|
|||
use crate::{
|
||||
ExtendedConsensusError,
|
||||
__private::Database,
|
||||
context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache},
|
||||
context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache},
|
||||
};
|
||||
|
||||
pub(crate) mod sealed {
|
||||
|
@ -32,7 +32,7 @@ pub struct AltChainContextCache {
|
|||
pub difficulty_cache: Option<DifficultyCache>,
|
||||
|
||||
/// A cached RX VM.
|
||||
pub cached_rx_vm: Option<(usize, Arc<RandomXVM>)>,
|
||||
pub cached_rx_vm: Option<(usize, Arc<RandomXVm>)>,
|
||||
|
||||
/// The chain height of the alt chain.
|
||||
pub chain_height: usize,
|
||||
|
|
|
@ -9,7 +9,7 @@ use std::{
|
|||
};
|
||||
|
||||
use futures::{stream::FuturesOrdered, StreamExt};
|
||||
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
|
||||
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VmInner};
|
||||
use rayon::prelude::*;
|
||||
use thread_local::ThreadLocal;
|
||||
use tower::ServiceExt;
|
||||
|
@ -33,16 +33,16 @@ const RX_SEEDS_CACHED: usize = 2;
|
|||
|
||||
/// A multithreaded randomX VM.
|
||||
#[derive(Debug)]
|
||||
pub struct RandomXVM {
|
||||
pub struct RandomXVm {
|
||||
/// These RandomX VMs all share the same cache.
|
||||
vms: ThreadLocal<VMInner>,
|
||||
vms: ThreadLocal<VmInner>,
|
||||
/// The RandomX cache.
|
||||
cache: RandomXCache,
|
||||
/// The flags used to start the RandomX VMs.
|
||||
flags: RandomXFlag,
|
||||
}
|
||||
|
||||
impl RandomXVM {
|
||||
impl RandomXVm {
|
||||
/// Create a new multithreaded randomX VM with the provided seed.
|
||||
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
|
||||
// TODO: allow passing in flags.
|
||||
|
@ -50,7 +50,7 @@ impl RandomXVM {
|
|||
|
||||
let cache = RandomXCache::new(flags, seed.as_slice())?;
|
||||
|
||||
Ok(RandomXVM {
|
||||
Ok(RandomXVm {
|
||||
vms: ThreadLocal::new(),
|
||||
cache,
|
||||
flags,
|
||||
|
@ -58,12 +58,12 @@ impl RandomXVM {
|
|||
}
|
||||
}
|
||||
|
||||
impl RandomX for RandomXVM {
|
||||
impl RandomX for RandomXVm {
|
||||
type Error = RandomXError;
|
||||
|
||||
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
|
||||
self.vms
|
||||
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))?
|
||||
.get_or_try(|| VmInner::new(self.flags, Some(self.cache.clone()), None))?
|
||||
.calculate_hash(buf)
|
||||
.map(|out| out.try_into().unwrap())
|
||||
}
|
||||
|
@ -72,17 +72,17 @@ impl RandomX for RandomXVM {
|
|||
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
|
||||
/// couple more around this VM.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RandomXVMCache {
|
||||
pub struct RandomXVmCache {
|
||||
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
||||
pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
|
||||
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
||||
pub(crate) vms: HashMap<usize, Arc<RandomXVM>>,
|
||||
pub(crate) vms: HashMap<usize, Arc<RandomXVm>>,
|
||||
|
||||
/// A single cached VM that was given to us from a part of Cuprate.
|
||||
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
|
||||
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVm>)>,
|
||||
}
|
||||
|
||||
impl RandomXVMCache {
|
||||
impl RandomXVmCache {
|
||||
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
|
||||
pub async fn init_from_chain_height<D: Database + Clone>(
|
||||
chain_height: usize,
|
||||
|
@ -106,7 +106,7 @@ impl RandomXVMCache {
|
|||
.map(|(height, seed)| {
|
||||
(
|
||||
*height,
|
||||
Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")),
|
||||
Arc::new(RandomXVm::new(seed).expect("Failed to create RandomX VM!")),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
|
@ -117,7 +117,7 @@ impl RandomXVMCache {
|
|||
HashMap::new()
|
||||
};
|
||||
|
||||
Ok(RandomXVMCache {
|
||||
Ok(RandomXVmCache {
|
||||
seeds,
|
||||
vms,
|
||||
cached_vm: None,
|
||||
|
@ -125,7 +125,7 @@ impl RandomXVMCache {
|
|||
}
|
||||
|
||||
/// Add a randomX VM to the cache, with the seed it was created with.
|
||||
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) {
|
||||
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVm>)) {
|
||||
self.cached_vm.replace(vm);
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ impl RandomXVMCache {
|
|||
height: usize,
|
||||
chain: Chain,
|
||||
database: D,
|
||||
) -> Result<Arc<RandomXVM>, ExtendedConsensusError> {
|
||||
) -> Result<Arc<RandomXVm>, ExtendedConsensusError> {
|
||||
let seed_height = randomx_seed_height(height);
|
||||
|
||||
let BlockchainResponse::BlockHash(seed_hash) = database
|
||||
|
@ -156,13 +156,13 @@ impl RandomXVMCache {
|
|||
}
|
||||
}
|
||||
|
||||
let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await;
|
||||
let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVm::new(&seed_hash).unwrap())).await;
|
||||
|
||||
Ok(alt_vm)
|
||||
}
|
||||
|
||||
/// Get the main-chain RandomX VMs.
|
||||
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVM>> {
|
||||
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
|
||||
match self.seeds.len().checked_sub(self.vms.len()) {
|
||||
// No difference in the amount of seeds to VMs.
|
||||
Some(0) => (),
|
||||
|
@ -184,7 +184,7 @@ impl RandomXVMCache {
|
|||
}
|
||||
};
|
||||
|
||||
rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap()))
|
||||
rayon_spawn_async(move || Arc::new(RandomXVm::new(&next_seed_hash).unwrap()))
|
||||
.await
|
||||
};
|
||||
|
||||
|
@ -200,7 +200,7 @@ impl RandomXVMCache {
|
|||
seeds_clone
|
||||
.par_iter()
|
||||
.map(|(height, seed)| {
|
||||
let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!");
|
||||
let vm = RandomXVm::new(seed).expect("Failed to create RandomX VM!");
|
||||
let vm = Arc::new(vm);
|
||||
(*height, vm)
|
||||
})
|
||||
|
|
|
@ -9,6 +9,7 @@ use tower::ServiceExt;
|
|||
use tracing::Instrument;
|
||||
|
||||
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
use cuprate_types::{
|
||||
blockchain::{BlockchainReadRequest, BlockchainResponse},
|
||||
Chain,
|
||||
|
@ -45,7 +46,7 @@ pub struct ContextTask<D: Database> {
|
|||
/// The weight cache.
|
||||
weight_cache: weight::BlockWeightsCache,
|
||||
/// The RX VM cache.
|
||||
rx_vm_cache: rx_vms::RandomXVMCache,
|
||||
rx_vm_cache: rx_vms::RandomXVmCache,
|
||||
/// The hard-fork state cache.
|
||||
hardfork_state: hardforks::HardForkState,
|
||||
|
||||
|
@ -127,7 +128,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
|||
|
||||
let db = database.clone();
|
||||
let rx_seed_handle = tokio::spawn(async move {
|
||||
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||
});
|
||||
|
||||
let context_svc = ContextTask {
|
||||
|
@ -168,9 +169,9 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
|||
.weight_cache
|
||||
.effective_median_block_weight(¤t_hf),
|
||||
top_hash: self.top_block_hash,
|
||||
median_block_timestamp: self.difficulty_cache.median_timestamp(
|
||||
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
|
||||
),
|
||||
median_block_timestamp: self
|
||||
.difficulty_cache
|
||||
.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)),
|
||||
chain_height: self.chain_height,
|
||||
current_hf,
|
||||
next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf),
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
//! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds
|
||||
//! with [`BlockchainResponse`].
|
||||
//!
|
||||
use cuprate_consensus_rules::{ConsensusError, HardFork};
|
||||
use cuprate_consensus_rules::ConsensusError;
|
||||
|
||||
mod batch_verifier;
|
||||
pub mod block;
|
||||
|
|
|
@ -9,7 +9,7 @@ use cuprate_consensus_rules::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
context::rx_vms::{get_last_rx_seed_heights, RandomXVMCache},
|
||||
context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache},
|
||||
tests::mock_db::*,
|
||||
};
|
||||
|
||||
|
@ -42,7 +42,7 @@ fn rx_heights_consistent() {
|
|||
async fn rx_vm_created_on_hf_12() {
|
||||
let db = DummyDatabaseBuilder::default().finish(Some(10));
|
||||
|
||||
let mut cache = RandomXVMCache::init_from_chain_height(10, &HardFork::V11, db)
|
||||
let mut cache = RandomXVmCache::init_from_chain_height(10, &HardFork::V11, db)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -67,7 +67,7 @@ proptest! {
|
|||
let rt = Builder::new_multi_thread().enable_all().build().unwrap();
|
||||
|
||||
rt.block_on(async move {
|
||||
let cache = RandomXVMCache::init_from_chain_height(10, &hf, db).await.unwrap();
|
||||
let cache = RandomXVmCache::init_from_chain_height(10, &hf, db).await.unwrap();
|
||||
assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -10,14 +10,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
|
|||
|
||||
[features]
|
||||
# All features on by default.
|
||||
default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"]
|
||||
default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"]
|
||||
std = []
|
||||
atomic = ["dep:crossbeam"]
|
||||
asynch = ["dep:futures", "dep:rayon"]
|
||||
cast = []
|
||||
constants = []
|
||||
fs = ["dep:dirs"]
|
||||
num = []
|
||||
map = ["dep:monero-serai"]
|
||||
map = ["cast", "dep:monero-serai"]
|
||||
time = ["dep:chrono", "std"]
|
||||
thread = ["std", "dep:target_os_lib"]
|
||||
|
||||
|
@ -39,3 +40,6 @@ target_os_lib = { package = "libc", version = "0.2.151", optional = true }
|
|||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
|
@ -19,7 +19,7 @@ pub struct InfallibleOneshotReceiver<T>(oneshot::Receiver<T>);
|
|||
|
||||
impl<T> From<oneshot::Receiver<T>> for InfallibleOneshotReceiver<T> {
|
||||
fn from(value: oneshot::Receiver<T>) -> Self {
|
||||
InfallibleOneshotReceiver(value)
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ where
|
|||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
rayon::spawn(move || {
|
||||
let _ = tx.send(f());
|
||||
drop(tx.send(f()));
|
||||
});
|
||||
rx.await.expect("The sender must not be dropped")
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ mod test {
|
|||
#[tokio::test]
|
||||
// Assert that basic channel operations work.
|
||||
async fn infallible_oneshot_receiver() {
|
||||
let (tx, rx) = futures::channel::oneshot::channel::<String>();
|
||||
let (tx, rx) = oneshot::channel::<String>();
|
||||
let msg = "hello world!".to_string();
|
||||
|
||||
tx.send(msg.clone()).unwrap();
|
||||
|
@ -84,7 +84,7 @@ mod test {
|
|||
let barrier = Arc::new(Barrier::new(2));
|
||||
let task = |barrier: &Barrier| barrier.wait();
|
||||
|
||||
let b_2 = barrier.clone();
|
||||
let b_2 = Arc::clone(&barrier);
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
|
|
|
@ -49,6 +49,8 @@ pub type AtomicF64 = AtomicCell<f64>;
|
|||
//---------------------------------------------------------------------------------------------------- TESTS
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(clippy::float_cmp)]
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
|
|
86
helper/src/cast.rs
Normal file
86
helper/src/cast.rs
Normal file
|
@ -0,0 +1,86 @@
|
|||
//! Casting.
|
||||
//!
|
||||
//! This modules provides utilities for casting between types.
|
||||
//!
|
||||
//! `#[no_std]` compatible.
|
||||
|
||||
#![allow(clippy::cast_possible_truncation)]
|
||||
|
||||
#[rustfmt::skip]
|
||||
//============================ SAFETY: DO NOT REMOVE ===========================//
|
||||
// //
|
||||
// //
|
||||
// Only allow building 64-bit targets. //
|
||||
// This allows us to assume 64-bit invariants in this file. //
|
||||
#[cfg(not(target_pointer_width = "64"))]
|
||||
compile_error!("Cuprate is only compatible with 64-bit CPUs");
|
||||
// //
|
||||
// //
|
||||
//============================ SAFETY: DO NOT REMOVE ===========================//
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free functions
|
||||
/// Cast [`u64`] to [`usize`].
|
||||
#[inline(always)]
|
||||
pub const fn u64_to_usize(u: u64) -> usize {
|
||||
u as usize
|
||||
}
|
||||
|
||||
/// Cast [`u32`] to [`usize`].
|
||||
#[inline(always)]
|
||||
pub const fn u32_to_usize(u: u32) -> usize {
|
||||
u as usize
|
||||
}
|
||||
|
||||
/// Cast [`usize`] to [`u64`].
|
||||
#[inline(always)]
|
||||
pub const fn usize_to_u64(u: usize) -> u64 {
|
||||
u as u64
|
||||
}
|
||||
|
||||
/// Cast [`i64`] to [`isize`].
|
||||
#[inline(always)]
|
||||
pub const fn i64_to_isize(i: i64) -> isize {
|
||||
i as isize
|
||||
}
|
||||
|
||||
/// Cast [`i32`] to [`isize`].
|
||||
#[inline(always)]
|
||||
pub const fn i32_to_isize(i: i32) -> isize {
|
||||
i as isize
|
||||
}
|
||||
|
||||
/// Cast [`isize`] to [`i64`].
|
||||
#[inline(always)]
|
||||
pub const fn isize_to_i64(i: isize) -> i64 {
|
||||
i as i64
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn max_unsigned() {
|
||||
assert_eq!(u32_to_usize(u32::MAX), usize::try_from(u32::MAX).unwrap());
|
||||
assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u64::from(u32::MAX));
|
||||
|
||||
assert_eq!(u64_to_usize(u64::MAX), usize::MAX);
|
||||
assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX);
|
||||
|
||||
assert_eq!(usize_to_u64(usize::MAX), u64::MAX);
|
||||
assert_eq!(u64_to_usize(usize_to_u64(usize::MAX)), usize::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_signed() {
|
||||
assert_eq!(i32_to_isize(i32::MAX), isize::try_from(i32::MAX).unwrap());
|
||||
assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i64::from(i32::MAX));
|
||||
|
||||
assert_eq!(i64_to_isize(i64::MAX), isize::MAX);
|
||||
assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX);
|
||||
|
||||
assert_eq!(isize_to_i64(isize::MAX), i64::MAX);
|
||||
assert_eq!(i64_to_isize(isize_to_i64(isize::MAX)), isize::MAX);
|
||||
}
|
||||
}
|
191
helper/src/fs.rs
191
helper/src/fs.rs
|
@ -4,7 +4,7 @@
|
|||
//! Note that this module's functions uses [`dirs`],
|
||||
//! which adheres to the XDG standard on Linux.
|
||||
//!
|
||||
//! This means that the values returned by these functions
|
||||
//! This means that the values returned by these statics
|
||||
//! may change at runtime depending on environment variables,
|
||||
//! for example:
|
||||
//!
|
||||
|
@ -17,7 +17,7 @@
|
|||
//! # if cfg!(target_os = "linux") {
|
||||
//! std::env::set_var("XDG_CONFIG_HOME", "/custom/path");
|
||||
//! assert_eq!(
|
||||
//! cuprate_config_dir().to_string_lossy(),
|
||||
//! CUPRATE_CONFIG_DIR.to_string_lossy(),
|
||||
//! "/custom/path/cuprate"
|
||||
//! );
|
||||
//! # }
|
||||
|
@ -28,10 +28,7 @@
|
|||
//! - <https://docs.rs/dirs>
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::OnceLock,
|
||||
};
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Const
|
||||
/// Cuprate's main directory.
|
||||
|
@ -62,71 +59,59 @@ pub const CUPRATE_DIR: &str = {
|
|||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Directories
|
||||
/// Create a (private) `OnceLock` and accessor function for common PATHs used by Cuprate.
|
||||
/// Create a `LazyLock` for common PATHs used by Cuprate.
|
||||
///
|
||||
/// This currently creates these directories:
|
||||
/// - [`cuprate_cache_dir()`]
|
||||
/// - [`cuprate_config_dir()`]
|
||||
/// - [`cuprate_data_dir()`]
|
||||
/// - [`cuprate_blockchain_dir()`]
|
||||
///
|
||||
/// FIXME: Use `LazyLock` when stabilized.
|
||||
/// <https://github.com/rust-lang/rust/issues/109736>.
|
||||
/// <https://doc.rust-lang.org/std/sync/struct.LazyLock.html>.
|
||||
macro_rules! impl_path_oncelock_and_fn {
|
||||
/// - [`CUPRATE_CACHE_DIR`]
|
||||
/// - [`CUPRATE_CONFIG_DIR`]
|
||||
/// - [`CUPRATE_DATA_DIR`]
|
||||
/// - [`CUPRATE_BLOCKCHAIN_DIR`]
|
||||
macro_rules! impl_path_lazylock {
|
||||
($(
|
||||
$(#[$attr:meta])* // Documentation and any `derive`'s.
|
||||
$fn:ident, // Name of the corresponding access function.
|
||||
$name:ident, // Name of the corresponding `LazyLock`.
|
||||
$dirs_fn:ident, // Name of the `dirs` function to use, the PATH prefix.
|
||||
$sub_dirs:literal // Any sub-directories to add onto the PATH.
|
||||
),* $(,)?) => {$(
|
||||
// Create the `OnceLock` if needed, append
|
||||
// Create the `LazyLock` if needed, append
|
||||
// the Cuprate directory string and return.
|
||||
$(#[$attr])*
|
||||
pub fn $fn() -> &'static Path {
|
||||
/// Local `OnceLock` containing the Path.
|
||||
static ONCE_LOCK: OnceLock<PathBuf> = OnceLock::new();
|
||||
pub static $name: LazyLock<PathBuf> = LazyLock::new(|| {
|
||||
// There's nothing we can do but panic if
|
||||
// we cannot acquire critical system directories.
|
||||
//
|
||||
// Although, this realistically won't panic on
|
||||
// normal systems for all OS's supported by `dirs`.
|
||||
let mut path = dirs::$dirs_fn().unwrap();
|
||||
|
||||
ONCE_LOCK.get_or_init(|| {
|
||||
// There's nothing we can do but panic if
|
||||
// we cannot acquire critical system directories.
|
||||
//
|
||||
// Although, this realistically won't panic on
|
||||
// normal systems for all OS's supported by `dirs`.
|
||||
let mut path = dirs::$dirs_fn().unwrap();
|
||||
// FIXME:
|
||||
// Consider a user who does `HOME=/ ./cuprated`
|
||||
//
|
||||
// Should we say "that's stupid" and panic here?
|
||||
// Or should it be respected?
|
||||
// We really don't want a `rm -rf /` type of situation...
|
||||
assert!(
|
||||
path.parent().is_some(),
|
||||
"SAFETY: returned OS PATH was either root or empty, aborting"
|
||||
);
|
||||
|
||||
// FIXME:
|
||||
// Consider a user who does `HOME=/ ./cuprated`
|
||||
//
|
||||
// Should we say "that's stupid" and panic here?
|
||||
// Or should it be respected?
|
||||
// We really don't want a `rm -rf /` type of situation...
|
||||
assert!(
|
||||
path.parent().is_some(),
|
||||
"SAFETY: returned OS PATH was either root or empty, aborting"
|
||||
);
|
||||
// Returned OS PATH should be absolute, not relative.
|
||||
assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute");
|
||||
|
||||
// Returned OS PATH should be absolute, not relative.
|
||||
assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute");
|
||||
// Unconditionally prefix with the top-level Cuprate directory.
|
||||
path.push(CUPRATE_DIR);
|
||||
|
||||
// Unconditionally prefix with the top-level Cuprate directory.
|
||||
path.push(CUPRATE_DIR);
|
||||
// Add any sub directories if specified in the macro.
|
||||
if !$sub_dirs.is_empty() {
|
||||
path.push($sub_dirs);
|
||||
}
|
||||
|
||||
// Add any sub directories if specified in the macro.
|
||||
if !$sub_dirs.is_empty() {
|
||||
path.push($sub_dirs);
|
||||
}
|
||||
|
||||
path
|
||||
})
|
||||
}
|
||||
path
|
||||
});
|
||||
)*};
|
||||
}
|
||||
|
||||
// Note that the `OnceLock`'s are prefixed with `__` to indicate:
|
||||
// 1. They're not really to be used directly
|
||||
// 2. To avoid name conflicts
|
||||
impl_path_oncelock_and_fn! {
|
||||
impl_path_lazylock! {
|
||||
/// Cuprate's cache directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate cache files.
|
||||
|
@ -136,7 +121,7 @@ impl_path_oncelock_and_fn! {
|
|||
/// | Windows | `C:\Users\Alice\AppData\Local\Cuprate\` |
|
||||
/// | macOS | `/Users/Alice/Library/Caches/Cuprate/` |
|
||||
/// | Linux | `/home/alice/.cache/cuprate/` |
|
||||
cuprate_cache_dir,
|
||||
CUPRATE_CACHE_DIR,
|
||||
cache_dir,
|
||||
"",
|
||||
|
||||
|
@ -149,7 +134,7 @@ impl_path_oncelock_and_fn! {
|
|||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` |
|
||||
/// | Linux | `/home/alice/.config/cuprate/` |
|
||||
cuprate_config_dir,
|
||||
CUPRATE_CONFIG_DIR,
|
||||
config_dir,
|
||||
"",
|
||||
|
||||
|
@ -162,7 +147,7 @@ impl_path_oncelock_and_fn! {
|
|||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/` |
|
||||
cuprate_data_dir,
|
||||
CUPRATE_DATA_DIR,
|
||||
data_dir,
|
||||
"",
|
||||
|
||||
|
@ -175,9 +160,22 @@ impl_path_oncelock_and_fn! {
|
|||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/blockchain/` |
|
||||
cuprate_blockchain_dir,
|
||||
CUPRATE_BLOCKCHAIN_DIR,
|
||||
data_dir,
|
||||
"blockchain",
|
||||
|
||||
/// Cuprate's transaction pool directory.
|
||||
///
|
||||
/// This is the PATH used for any Cuprate txpool files.
|
||||
///
|
||||
/// | OS | PATH |
|
||||
/// |---------|------------------------------------------------------------|
|
||||
/// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` |
|
||||
/// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` |
|
||||
/// | Linux | `/home/alice/.local/share/cuprate/txpool/` |
|
||||
CUPRATE_TXPOOL_DIR,
|
||||
data_dir,
|
||||
"txpool",
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
|
@ -192,60 +190,41 @@ mod test {
|
|||
// - It must `ends_with()` the expected end PATH for the OS
|
||||
#[test]
|
||||
fn path_sanity_check() {
|
||||
assert!(cuprate_cache_dir().is_absolute());
|
||||
assert!(cuprate_config_dir().is_absolute());
|
||||
assert!(cuprate_data_dir().is_absolute());
|
||||
assert!(cuprate_blockchain_dir().is_absolute());
|
||||
// Array of (PATH, expected_path_as_string).
|
||||
//
|
||||
// The different OS's will set the expected path below.
|
||||
let mut array = [
|
||||
(&*CUPRATE_CACHE_DIR, ""),
|
||||
(&*CUPRATE_CONFIG_DIR, ""),
|
||||
(&*CUPRATE_DATA_DIR, ""),
|
||||
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
|
||||
(&*CUPRATE_TXPOOL_DIR, ""),
|
||||
];
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
let dir = cuprate_cache_dir();
|
||||
println!("cuprate_cache_dir: {dir:?}");
|
||||
assert!(dir.ends_with(r"AppData\Local\Cuprate"));
|
||||
|
||||
let dir = cuprate_config_dir();
|
||||
println!("cuprate_config_dir: {dir:?}");
|
||||
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
|
||||
|
||||
let dir = cuprate_data_dir();
|
||||
println!("cuprate_data_dir: {dir:?}");
|
||||
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
|
||||
|
||||
let dir = cuprate_blockchain_dir();
|
||||
println!("cuprate_blockchain_dir: {dir:?}");
|
||||
assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain"));
|
||||
array[0].1 = r"AppData\Local\Cuprate";
|
||||
array[1].1 = r"AppData\Roaming\Cuprate";
|
||||
array[2].1 = r"AppData\Roaming\Cuprate";
|
||||
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
|
||||
array[4].1 = r"AppData\Roaming\Cuprate\txpool";
|
||||
} else if cfg!(target_os = "macos") {
|
||||
let dir = cuprate_cache_dir();
|
||||
println!("cuprate_cache_dir: {dir:?}");
|
||||
assert!(dir.ends_with("Library/Caches/Cuprate"));
|
||||
|
||||
let dir = cuprate_config_dir();
|
||||
println!("cuprate_config_dir: {dir:?}");
|
||||
assert!(dir.ends_with("Library/Application Support/Cuprate"));
|
||||
|
||||
let dir = cuprate_data_dir();
|
||||
println!("cuprate_data_dir: {dir:?}");
|
||||
assert!(dir.ends_with("Library/Application Support/Cuprate"));
|
||||
|
||||
let dir = cuprate_blockchain_dir();
|
||||
println!("cuprate_blockchain_dir: {dir:?}");
|
||||
assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain"));
|
||||
array[0].1 = "Library/Caches/Cuprate";
|
||||
array[1].1 = "Library/Application Support/Cuprate";
|
||||
array[2].1 = "Library/Application Support/Cuprate";
|
||||
array[3].1 = "Library/Application Support/Cuprate/blockchain";
|
||||
array[4].1 = "Library/Application Support/Cuprate/txpool";
|
||||
} else {
|
||||
// Assumes Linux.
|
||||
let dir = cuprate_cache_dir();
|
||||
println!("cuprate_cache_dir: {dir:?}");
|
||||
assert!(dir.ends_with(".cache/cuprate"));
|
||||
array[0].1 = ".cache/cuprate";
|
||||
array[1].1 = ".config/cuprate";
|
||||
array[2].1 = ".local/share/cuprate";
|
||||
array[3].1 = ".local/share/cuprate/blockchain";
|
||||
array[4].1 = ".local/share/cuprate/txpool";
|
||||
};
|
||||
|
||||
let dir = cuprate_config_dir();
|
||||
println!("cuprate_config_dir: {dir:?}");
|
||||
assert!(dir.ends_with(".config/cuprate"));
|
||||
|
||||
let dir = cuprate_data_dir();
|
||||
println!("cuprate_data_dir: {dir:?}");
|
||||
assert!(dir.ends_with(".local/share/cuprate"));
|
||||
|
||||
let dir = cuprate_blockchain_dir();
|
||||
println!("cuprate_blockchain_dir: {dir:?}");
|
||||
assert!(dir.ends_with(".local/share/cuprate/blockchain"));
|
||||
for (path, expected) in array {
|
||||
assert!(path.is_absolute());
|
||||
assert!(path.ends_with(expected));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,36 +1,4 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
//---------------------------------------------------------------------------------------------------- Lints
|
||||
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
|
||||
#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)]
|
||||
#![forbid(
|
||||
unused_unsafe,
|
||||
future_incompatible,
|
||||
break_with_label_and_loop,
|
||||
coherence_leak_check,
|
||||
duplicate_macro_attributes,
|
||||
exported_private_dependencies,
|
||||
for_loops_over_fallibles,
|
||||
large_assignments,
|
||||
overlapping_range_endpoints,
|
||||
// private_in_public,
|
||||
semicolon_in_expressions_from_macros,
|
||||
redundant_semicolons,
|
||||
unconditional_recursion,
|
||||
unreachable_patterns,
|
||||
unused_allocation,
|
||||
unused_braces,
|
||||
unused_comparisons,
|
||||
unused_doc_comments,
|
||||
unused_parens,
|
||||
unused_labels,
|
||||
while_true,
|
||||
keyword_idents,
|
||||
non_ascii_idents,
|
||||
noop_method_call,
|
||||
unreachable_pub,
|
||||
single_use_lifetimes,
|
||||
// variant_size_differences,
|
||||
)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Public API
|
||||
|
@ -40,6 +8,9 @@ pub mod asynch; // async collides
|
|||
#[cfg(feature = "atomic")]
|
||||
pub mod atomic;
|
||||
|
||||
#[cfg(feature = "cast")]
|
||||
pub mod cast;
|
||||
|
||||
#[cfg(feature = "constants")]
|
||||
pub mod constants;
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use monero_serai::transaction::Timelock;
|
||||
|
||||
use crate::cast::{u64_to_usize, usize_to_u64};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128`
|
||||
/// Split a [`u128`] value into 2 64-bit values.
|
||||
///
|
||||
|
@ -27,6 +29,7 @@ use monero_serai::transaction::Timelock;
|
|||
/// ```
|
||||
#[inline]
|
||||
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
|
||||
#[allow(clippy::cast_possible_truncation)]
|
||||
(value as u64, (value >> 64) as u64)
|
||||
}
|
||||
|
||||
|
@ -58,7 +61,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12
|
|||
/// Map a [`u64`] to a [`Timelock`].
|
||||
///
|
||||
/// Height/time is not differentiated via type, but rather:
|
||||
/// "height is any value less than 500_000_000 and timestamp is any value above"
|
||||
/// "height is any value less than `500_000_000` and timestamp is any value above"
|
||||
/// so the `u64/usize` is stored without any tag.
|
||||
///
|
||||
/// See [`timelock_to_u64`] for the inverse function.
|
||||
|
@ -73,11 +76,11 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12
|
|||
/// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999));
|
||||
/// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000));
|
||||
/// ```
|
||||
pub fn u64_to_timelock(u: u64) -> Timelock {
|
||||
pub const fn u64_to_timelock(u: u64) -> Timelock {
|
||||
if u == 0 {
|
||||
Timelock::None
|
||||
} else if u < 500_000_000 {
|
||||
Timelock::Block(usize::try_from(u).unwrap())
|
||||
Timelock::Block(u64_to_usize(u))
|
||||
} else {
|
||||
Timelock::Time(u)
|
||||
}
|
||||
|
@ -94,10 +97,10 @@ pub fn u64_to_timelock(u: u64) -> Timelock {
|
|||
/// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999);
|
||||
/// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000);
|
||||
/// ```
|
||||
pub fn timelock_to_u64(timelock: Timelock) -> u64 {
|
||||
pub const fn timelock_to_u64(timelock: Timelock) -> u64 {
|
||||
match timelock {
|
||||
Timelock::None => 0,
|
||||
Timelock::Block(u) => u64::try_from(u).unwrap(),
|
||||
Timelock::Block(u) => usize_to_u64(u),
|
||||
Timelock::Time(u) => u,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,11 +30,11 @@ pub enum Network {
|
|||
|
||||
impl Network {
|
||||
/// Returns the network ID for the current network.
|
||||
pub fn network_id(&self) -> [u8; 16] {
|
||||
pub const fn network_id(&self) -> [u8; 16] {
|
||||
match self {
|
||||
Network::Mainnet => MAINNET_NETWORK_ID,
|
||||
Network::Testnet => TESTNET_NETWORK_ID,
|
||||
Network::Stagenet => STAGENET_NETWORK_ID,
|
||||
Self::Mainnet => MAINNET_NETWORK_ID,
|
||||
Self::Testnet => TESTNET_NETWORK_ID,
|
||||
Self::Stagenet => STAGENET_NETWORK_ID,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,8 +89,9 @@ where
|
|||
/// assert_eq!(median(vec), 5);
|
||||
/// ```
|
||||
///
|
||||
/// # Safety
|
||||
/// # Invariant
|
||||
/// If not sorted the output will be invalid.
|
||||
#[allow(clippy::debug_assert_with_mut_call)]
|
||||
pub fn median<T>(array: impl AsRef<[T]>) -> T
|
||||
where
|
||||
T: Add<Output = T>
|
||||
|
|
|
@ -28,10 +28,10 @@ macro_rules! impl_thread_percent {
|
|||
$(
|
||||
$(#[$doc])*
|
||||
pub fn $fn_name() -> NonZeroUsize {
|
||||
// SAFETY:
|
||||
// unwrap here is okay because:
|
||||
// - THREADS().get() is always non-zero
|
||||
// - max() guards against 0
|
||||
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)]
|
||||
NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap()
|
||||
}
|
||||
)*
|
||||
|
@ -58,10 +58,10 @@ impl_thread_percent! {
|
|||
/// Originally from <https://docs.rs/lpt>.
|
||||
///
|
||||
/// # Windows
|
||||
/// Uses SetThreadPriority() with THREAD_PRIORITY_IDLE (-15).
|
||||
/// Uses `SetThreadPriority()` with `THREAD_PRIORITY_IDLE` (-15).
|
||||
///
|
||||
/// # Unix
|
||||
/// Uses libc::nice() with the max nice level.
|
||||
/// Uses `libc::nice()` with the max nice level.
|
||||
///
|
||||
/// On macOS and *BSD: +20
|
||||
/// On Linux: +19
|
||||
|
@ -74,7 +74,7 @@ pub fn low_priority_thread() {
|
|||
// SAFETY: calling C.
|
||||
// We are _lowering_ our priority, not increasing, so this function should never fail.
|
||||
unsafe {
|
||||
let _ = SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE);
|
||||
drop(SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ pub fn low_priority_thread() {
|
|||
// SAFETY: calling C.
|
||||
// We are _lowering_ our priority, not increasing, so this function should never fail.
|
||||
unsafe {
|
||||
let _ = libc::nice(NICE_MAX);
|
||||
libc::nice(NICE_MAX);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,6 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) {
|
|||
debug_assert!(m < 60);
|
||||
debug_assert!(s < 60);
|
||||
|
||||
#[allow(clippy::cast_possible_truncation)] // checked above
|
||||
(h as u8, m, s)
|
||||
}
|
||||
|
||||
|
@ -153,6 +154,7 @@ pub fn time() -> u32 {
|
|||
///
|
||||
/// This is guaranteed to return a value between `0..=86399`
|
||||
pub fn time_utc() -> u32 {
|
||||
#[allow(clippy::cast_sign_loss)] // checked in function calls
|
||||
unix_clock(chrono::offset::Local::now().timestamp() as u64)
|
||||
}
|
||||
|
||||
|
|
|
@ -87,4 +87,4 @@ On Rust-analyzer's VSCode plugin, you can add the following configuration if you
|
|||
|
||||
If you still deal with lags on VSCode or Neovim, you could try the following IDE:
|
||||
- RustRover: It have been reported to have excellent performance at managing huge workspace. It use its own fine-tuned plugins by jetbrains.
|
||||
- Zed: Rust-written IDE focused on performance. Still in beta and macOS only.
|
||||
- Zed: Rust-written IDE focused on performance. Stable on MacOS and Linux (requires Vulkan driver, therefore unable in virtual machines).
|
||||
|
|
|
@ -15,6 +15,7 @@ default = ["std"]
|
|||
std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
||||
cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false }
|
||||
|
||||
paste = "1.0.14"
|
||||
|
|
|
@ -65,6 +65,8 @@ use core::{ops::Deref, str::from_utf8 as str_from_utf8};
|
|||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
|
||||
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
|
||||
|
||||
pub mod container_as_blob;
|
||||
pub mod error;
|
||||
mod io;
|
||||
|
@ -242,7 +244,7 @@ pub fn write_bytes<T: AsRef<[u8]>, B: BufMut>(t: T, w: &mut B) -> Result<()> {
|
|||
let bytes = t.as_ref();
|
||||
let len = bytes.len();
|
||||
|
||||
write_varint(len.try_into()?, w)?;
|
||||
write_varint(usize_to_u64(len), w)?;
|
||||
|
||||
if w.remaining_mut() < len {
|
||||
return Err(Error::IO("Not enough capacity to write bytes"));
|
||||
|
@ -286,7 +288,7 @@ where
|
|||
I: Iterator<Item = T> + ExactSizeIterator,
|
||||
B: BufMut,
|
||||
{
|
||||
write_varint(iterator.len().try_into()?, w)?;
|
||||
write_varint(usize_to_u64(iterator.len()), w)?;
|
||||
for item in iterator.into_iter() {
|
||||
item.write(w)?;
|
||||
}
|
||||
|
@ -334,7 +336,7 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
|
|||
|
||||
if let Some(size) = marker.inner_marker.size() {
|
||||
let bytes_to_skip = size
|
||||
.checked_mul(len.try_into()?)
|
||||
.checked_mul(u64_to_usize(len))
|
||||
.ok_or(Error::Value("List is too big".to_string()))?;
|
||||
return advance(bytes_to_skip, r);
|
||||
};
|
||||
|
@ -352,8 +354,8 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
|
|||
| InnerMarker::U8
|
||||
| InnerMarker::Bool => unreachable!("These types are constant size."),
|
||||
InnerMarker::String => {
|
||||
let len = read_varint(r)?;
|
||||
advance(len.try_into()?, r)?;
|
||||
let len = u64_to_usize(read_varint(r)?);
|
||||
advance(len, r)?;
|
||||
}
|
||||
InnerMarker::Object => {
|
||||
*skipped_objects += 1;
|
||||
|
|
|
@ -7,6 +7,7 @@ use core::fmt::Debug;
|
|||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
|
||||
use cuprate_fixed_bytes::{ByteArray, ByteArrayVec};
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
|
||||
use crate::{
|
||||
io::{checked_read_primitive, checked_write_primitive},
|
||||
|
@ -66,11 +67,11 @@ impl<T: EpeeObject> EpeeValue for Vec<T> {
|
|||
"Marker is not sequence when a sequence was expected",
|
||||
));
|
||||
}
|
||||
let len = read_varint(r)?;
|
||||
let len = u64_to_usize(read_varint(r)?);
|
||||
|
||||
let individual_marker = Marker::new(marker.inner_marker);
|
||||
|
||||
let mut res = Vec::with_capacity(len.try_into()?);
|
||||
let mut res = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
res.push(T::read(r, &individual_marker)?);
|
||||
}
|
||||
|
@ -167,11 +168,13 @@ impl EpeeValue for Vec<u8> {
|
|||
return Err(Error::Format("Byte array exceeded max length"));
|
||||
}
|
||||
|
||||
if r.remaining() < len.try_into()? {
|
||||
let len = u64_to_usize(len);
|
||||
|
||||
if r.remaining() < len {
|
||||
return Err(Error::IO("Not enough bytes to fill object"));
|
||||
}
|
||||
|
||||
let mut res = vec![0; len.try_into()?];
|
||||
let mut res = vec![0; len];
|
||||
r.copy_to_slice(&mut res);
|
||||
|
||||
Ok(res)
|
||||
|
@ -203,11 +206,13 @@ impl EpeeValue for Bytes {
|
|||
return Err(Error::Format("Byte array exceeded max length"));
|
||||
}
|
||||
|
||||
if r.remaining() < len.try_into()? {
|
||||
let len = u64_to_usize(len);
|
||||
|
||||
if r.remaining() < len {
|
||||
return Err(Error::IO("Not enough bytes to fill object"));
|
||||
}
|
||||
|
||||
Ok(r.copy_to_bytes(len.try_into()?))
|
||||
Ok(r.copy_to_bytes(len))
|
||||
}
|
||||
|
||||
fn epee_default_value() -> Option<Self> {
|
||||
|
@ -236,11 +241,13 @@ impl EpeeValue for BytesMut {
|
|||
return Err(Error::Format("Byte array exceeded max length"));
|
||||
}
|
||||
|
||||
if r.remaining() < len.try_into()? {
|
||||
let len = u64_to_usize(len);
|
||||
|
||||
if r.remaining() < len {
|
||||
return Err(Error::IO("Not enough bytes to fill object"));
|
||||
}
|
||||
|
||||
let mut bytes = BytesMut::zeroed(len.try_into()?);
|
||||
let mut bytes = BytesMut::zeroed(len);
|
||||
r.copy_to_slice(&mut bytes);
|
||||
|
||||
Ok(bytes)
|
||||
|
@ -272,11 +279,13 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
|
|||
return Err(Error::Format("Byte array exceeded max length"));
|
||||
}
|
||||
|
||||
if r.remaining() < usize::try_from(len)? {
|
||||
let len = u64_to_usize(len);
|
||||
|
||||
if r.remaining() < len {
|
||||
return Err(Error::IO("Not enough bytes to fill object"));
|
||||
}
|
||||
|
||||
ByteArrayVec::try_from(r.copy_to_bytes(usize::try_from(len)?))
|
||||
ByteArrayVec::try_from(r.copy_to_bytes(len))
|
||||
.map_err(|_| Error::Format("Field has invalid length"))
|
||||
}
|
||||
|
||||
|
@ -302,7 +311,7 @@ impl<const N: usize> EpeeValue for ByteArray<N> {
|
|||
return Err(Error::Format("Marker does not match expected Marker"));
|
||||
}
|
||||
|
||||
let len: usize = read_varint(r)?.try_into()?;
|
||||
let len = u64_to_usize(read_varint(r)?);
|
||||
if len != N {
|
||||
return Err(Error::Format("Byte array has incorrect length"));
|
||||
}
|
||||
|
@ -370,11 +379,11 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> {
|
|||
));
|
||||
}
|
||||
|
||||
let len = read_varint(r)?;
|
||||
let len = u64_to_usize(read_varint(r)?);
|
||||
|
||||
let individual_marker = Marker::new(marker.inner_marker);
|
||||
|
||||
let mut res = Vec::with_capacity(len.try_into()?);
|
||||
let mut res = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
res.push(<[u8; N]>::read(r, &individual_marker)?);
|
||||
}
|
||||
|
@ -406,11 +415,11 @@ macro_rules! epee_seq {
|
|||
));
|
||||
}
|
||||
|
||||
let len = read_varint(r)?;
|
||||
let len = u64_to_usize(read_varint(r)?);
|
||||
|
||||
let individual_marker = Marker::new(marker.inner_marker.clone());
|
||||
|
||||
let mut res = Vec::with_capacity(len.try_into()?);
|
||||
let mut res = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
res.push(<$val>::read(r, &individual_marker)?);
|
||||
}
|
||||
|
|
|
@ -9,12 +9,12 @@ epee_object!(
|
|||
a: u8,
|
||||
);
|
||||
|
||||
struct TT {
|
||||
struct T2 {
|
||||
a: u8,
|
||||
}
|
||||
|
||||
epee_object!(
|
||||
TT,
|
||||
T2,
|
||||
a: u8 = 0,
|
||||
);
|
||||
|
||||
|
@ -35,5 +35,5 @@ fn duplicate_key_with_default() {
|
|||
b'a', 0x0B, 0x00,
|
||||
];
|
||||
|
||||
assert!(from_bytes::<TT, _>(&mut &data[..]).is_err());
|
||||
assert!(from_bytes::<T2, _>(&mut &data[..]).is_err());
|
||||
}
|
||||
|
|
|
@ -12,6 +12,8 @@ default = []
|
|||
tracing = ["dep:tracing", "tokio-util/tracing"]
|
||||
|
||||
[dependencies]
|
||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
||||
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true, features = ["std"] }
|
||||
bitflags = { workspace = true }
|
||||
|
|
|
@ -20,6 +20,8 @@ use std::{fmt::Debug, marker::PhantomData};
|
|||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
|
||||
use crate::{
|
||||
header::{Flags, HEADER_SIZE},
|
||||
message::{make_dummy_message, LevinMessage},
|
||||
|
@ -114,10 +116,7 @@ impl<C: LevinCommand + Debug> Decoder for LevinBucketCodec<C> {
|
|||
std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head));
|
||||
}
|
||||
LevinBucketState::WaitingForBody(head) => {
|
||||
let body_len = head
|
||||
.size
|
||||
.try_into()
|
||||
.map_err(|_| BucketError::BucketExceededMaxSize)?;
|
||||
let body_len = u64_to_usize(head.size);
|
||||
if src.len() < body_len {
|
||||
src.reserve(body_len - src.len());
|
||||
return Ok(None);
|
||||
|
@ -255,13 +254,11 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
|||
continue;
|
||||
};
|
||||
|
||||
let max_size = if self.bucket_codec.handshake_message_seen {
|
||||
let max_size = u64_to_usize(if self.bucket_codec.handshake_message_seen {
|
||||
self.bucket_codec.protocol.max_packet_size
|
||||
} else {
|
||||
self.bucket_codec.protocol.max_packet_size_before_handshake
|
||||
}
|
||||
.try_into()
|
||||
.expect("Levin max message size is too large, does not fit into a usize.");
|
||||
});
|
||||
|
||||
if bytes.len().saturating_add(bucket.body.len()) > max_size {
|
||||
return Err(BucketError::InvalidFragmentedMessage(
|
||||
|
@ -300,12 +297,7 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
|
|||
}
|
||||
|
||||
// Check the fragmented message contains enough bytes to build the message.
|
||||
if bytes.len().saturating_sub(HEADER_SIZE)
|
||||
< header
|
||||
.size
|
||||
.try_into()
|
||||
.map_err(|_| BucketError::BucketExceededMaxSize)?
|
||||
{
|
||||
if bytes.len().saturating_sub(HEADER_SIZE) < u64_to_usize(header.size) {
|
||||
return Err(BucketError::InvalidFragmentedMessage(
|
||||
"Fragmented message does not have enough bytes to fill bucket body",
|
||||
));
|
||||
|
|
|
@ -38,6 +38,8 @@ use std::fmt::Debug;
|
|||
use bytes::{Buf, Bytes};
|
||||
use thiserror::Error;
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
|
||||
pub mod codec;
|
||||
pub mod header;
|
||||
pub mod message;
|
||||
|
@ -212,7 +214,7 @@ impl<C: LevinCommand> BucketBuilder<C> {
|
|||
Bucket {
|
||||
header: BucketHead {
|
||||
signature: self.signature.unwrap(),
|
||||
size: body.len().try_into().unwrap(),
|
||||
size: usize_to_u64(body.len()),
|
||||
have_to_return_data: ty.have_to_return_data(),
|
||||
command: self.command.unwrap(),
|
||||
return_code: self.return_code.unwrap(),
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
//! for more control over what is actually sent over the wire at certain times.
|
||||
use bytes::{Bytes, BytesMut};
|
||||
|
||||
use cuprate_helper::cast::usize_to_u64;
|
||||
|
||||
use crate::{
|
||||
header::{Flags, HEADER_SIZE},
|
||||
Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol,
|
||||
|
@ -106,9 +108,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
|
|||
new_body.resize(fragment_size - HEADER_SIZE, 0);
|
||||
|
||||
bucket.body = new_body.freeze();
|
||||
bucket.header.size = (fragment_size - HEADER_SIZE)
|
||||
.try_into()
|
||||
.expect("Bucket size does not fit into u64");
|
||||
bucket.header.size = usize_to_u64(fragment_size - HEADER_SIZE);
|
||||
}
|
||||
|
||||
return Ok(vec![bucket]);
|
||||
|
@ -118,9 +118,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
|
|||
// The first fragment will set the START flag, the last will set the END flag.
|
||||
let fragment_head = BucketHead {
|
||||
signature: protocol.signature,
|
||||
size: (fragment_size - HEADER_SIZE)
|
||||
.try_into()
|
||||
.expect("Bucket size does not fit into u64"),
|
||||
size: usize_to_u64(fragment_size - HEADER_SIZE),
|
||||
have_to_return_data: false,
|
||||
// Just use a default command.
|
||||
command: T::Command::from(0),
|
||||
|
@ -191,7 +189,7 @@ pub(crate) fn make_dummy_message<T: LevinCommand>(protocol: &Protocol, size: usi
|
|||
// A header to put on the dummy message.
|
||||
let header = BucketHead {
|
||||
signature: protocol.signature,
|
||||
size: size.try_into().expect("Bucket size does not fit into u64"),
|
||||
size: usize_to_u64(size),
|
||||
have_to_return_data: false,
|
||||
// Just use a default command.
|
||||
command: T::from(0),
|
||||
|
|
|
@ -8,6 +8,8 @@ use tokio::{
|
|||
};
|
||||
use tokio_util::codec::{FramedRead, FramedWrite};
|
||||
|
||||
use cuprate_helper::cast::u64_to_usize;
|
||||
|
||||
use cuprate_levin::{
|
||||
message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand,
|
||||
LevinMessageCodec, MessageType, Protocol,
|
||||
|
@ -54,7 +56,7 @@ impl LevinBody for TestBody {
|
|||
_: MessageType,
|
||||
_: Self::Command,
|
||||
) -> Result<Self, BucketError> {
|
||||
let size = body.get_u64_le().try_into().unwrap();
|
||||
let size = u64_to_usize(body.get_u64_le());
|
||||
// bucket
|
||||
Ok(TestBody::Bytes(size, body.copy_to_bytes(size)))
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ cuprate-levin = { path = "../levin" }
|
|||
cuprate-epee-encoding = { path = "../epee-encoding" }
|
||||
cuprate-fixed-bytes = { path = "../fixed-bytes" }
|
||||
cuprate-types = { path = "../../types", default-features = false, features = ["epee"] }
|
||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
||||
|
||||
bitflags = { workspace = true, features = ["std"] }
|
||||
bytes = { workspace = true, features = ["std"] }
|
||||
|
|
|
@ -99,7 +99,7 @@ impl LevinCommandTrait for LevinCommand {
|
|||
LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
|
||||
LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
|
||||
|
||||
LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX),
|
||||
LevinCommand::Unknown(_) => u64::MAX,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
//!
|
||||
//! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`].
|
||||
//!
|
||||
//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool).
|
||||
//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPoolManager`](pool::DandelionPoolManager).
|
||||
//! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the
|
||||
//! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool.
|
||||
//!
|
||||
//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard
|
||||
//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool)
|
||||
//! This split was done not because the [`DandelionPoolManager`](pool::DandelionPoolManager) is unnecessary but because it is hard
|
||||
//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPoolManager`](pool::DandelionPoolManager)
|
||||
//! requires you to implement part of the paper yourself.
|
||||
//!
|
||||
//! # Features
|
||||
//!
|
||||
//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool).
|
||||
//! This crate only has one feature `txpool` which enables [`DandelionPoolManager`](pool::DandelionPoolManager).
|
||||
//!
|
||||
//! # Needed Services
|
||||
//!
|
||||
|
@ -45,7 +45,7 @@
|
|||
//!
|
||||
//! ## Backing Pool
|
||||
//!
|
||||
//! ([`DandelionPool`](pool::DandelionPool) only)
|
||||
//! ([`DandelionPoolManager`](pool::DandelionPoolManager) only)
|
||||
//!
|
||||
//! This service is a backing tx-pool, in memory or on disk.
|
||||
//! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of
|
||||
|
|
|
@ -1,509 +0,0 @@
|
|||
//! # Dandelion++ Pool
|
||||
//!
|
||||
//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store,
|
||||
//! which fully implements the dandelion++ protocol.
|
||||
//!
|
||||
//! ### How To Get Txs From [`DandelionPool`].
|
||||
//!
|
||||
//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or
|
||||
//! check what transactions are in it, to do this you must keep a handle to the backing transaction store
|
||||
//! yourself.
|
||||
//!
|
||||
//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing
|
||||
//! pool, so it makes sense to remove the "middle man".
|
||||
//!
|
||||
//! ### Keep Stem Transactions Hidden
|
||||
//!
|
||||
//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden.
|
||||
//! So handle any requests to the tx-pool like the stem side of the pool does not exist.
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::Future,
|
||||
hash::Hash,
|
||||
marker::PhantomData,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Exp;
|
||||
use tokio::{
|
||||
sync::{mpsc, oneshot},
|
||||
task::JoinSet,
|
||||
};
|
||||
use tokio_util::{sync::PollSender, time::DelayQueue};
|
||||
use tower::{Service, ServiceExt};
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::{
|
||||
traits::{TxStoreRequest, TxStoreResponse},
|
||||
DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState,
|
||||
};
|
||||
|
||||
/// Start the [`DandelionPool`].
|
||||
///
|
||||
/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send
|
||||
/// requests to the pool.
|
||||
///
|
||||
/// ### Args
|
||||
///
|
||||
/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`].
|
||||
/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow
|
||||
/// user to customise routing functionality.
|
||||
/// - `backing_pool` is the backing transaction storage service
|
||||
/// - `config` is [`DandelionConfig`].
|
||||
pub fn start_dandelion_pool<P, R, Tx, TxID, PID>(
|
||||
buffer_size: usize,
|
||||
dandelion_router: R,
|
||||
backing_pool: P,
|
||||
config: DandelionConfig,
|
||||
) -> DandelionPoolService<Tx, TxID, PID>
|
||||
where
|
||||
Tx: Clone + Send + 'static,
|
||||
TxID: Hash + Eq + Clone + Send + 'static,
|
||||
PID: Hash + Eq + Clone + Send + 'static,
|
||||
P: Service<
|
||||
TxStoreRequest<Tx, TxID>,
|
||||
Response = TxStoreResponse<Tx, TxID>,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
P::Future: Send + 'static,
|
||||
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>
|
||||
+ Send
|
||||
+ 'static,
|
||||
R::Future: Send + 'static,
|
||||
{
|
||||
let (tx, rx) = mpsc::channel(buffer_size);
|
||||
|
||||
let pool = DandelionPool {
|
||||
dandelion_router,
|
||||
backing_pool,
|
||||
routing_set: JoinSet::new(),
|
||||
stem_origins: HashMap::new(),
|
||||
embargo_timers: DelayQueue::new(),
|
||||
embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(),
|
||||
config,
|
||||
_tx: PhantomData,
|
||||
};
|
||||
|
||||
let span = tracing::debug_span!("dandelion_pool");
|
||||
|
||||
tokio::spawn(pool.run(rx).instrument(span));
|
||||
|
||||
DandelionPoolService {
|
||||
tx: PollSender::new(tx),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, thiserror::Error)]
|
||||
#[error("The dandelion pool was shutdown")]
|
||||
pub struct DandelionPoolShutDown;
|
||||
|
||||
/// An incoming transaction for the [`DandelionPool`] to handle.
|
||||
///
|
||||
/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen
|
||||
/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages,
|
||||
/// and request txs even if they are in the stem pool.
|
||||
pub struct IncomingTx<Tx, TxID, PID> {
|
||||
/// The transaction.
|
||||
///
|
||||
/// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing
|
||||
/// tx pool and [`DandelionRouter`](crate::DandelionRouter)
|
||||
pub tx: Tx,
|
||||
/// The transaction ID.
|
||||
pub tx_id: TxID,
|
||||
/// The routing state of this transaction.
|
||||
pub tx_state: TxState<PID>,
|
||||
}
|
||||
|
||||
/// The dandelion tx pool service.
|
||||
#[derive(Clone)]
|
||||
pub struct DandelionPoolService<Tx, TxID, PID> {
|
||||
/// The channel to [`DandelionPool`].
|
||||
tx: PollSender<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
|
||||
}
|
||||
|
||||
impl<Tx, TxID, PID> Service<IncomingTx<Tx, TxID, PID>> for DandelionPoolService<Tx, TxID, PID>
|
||||
where
|
||||
Tx: Clone + Send,
|
||||
TxID: Hash + Eq + Clone + Send + 'static,
|
||||
PID: Hash + Eq + Clone + Send + 'static,
|
||||
{
|
||||
type Response = ();
|
||||
type Error = DandelionPoolShutDown;
|
||||
type Future =
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: IncomingTx<Tx, TxID, PID>) -> Self::Future {
|
||||
// although the channel isn't sending anything we want to wait for the request to be handled before continuing.
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let res = self
|
||||
.tx
|
||||
.send_item((req, tx))
|
||||
.map_err(|_| DandelionPoolShutDown);
|
||||
|
||||
async move {
|
||||
res?;
|
||||
rx.await.expect("Oneshot dropped before response!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
/// The dandelion++ tx pool.
|
||||
///
|
||||
/// See the [module docs](self) for more.
|
||||
pub struct DandelionPool<P, R, Tx, TxID, PID> {
|
||||
/// The dandelion++ router
|
||||
dandelion_router: R,
|
||||
/// The backing tx storage.
|
||||
backing_pool: P,
|
||||
/// The set of tasks that are running the future returned from `dandelion_router`.
|
||||
routing_set: JoinSet<(TxID, Result<State, TxState<PID>>)>,
|
||||
|
||||
/// The origin of stem transactions.
|
||||
stem_origins: HashMap<TxID, HashSet<PID>>,
|
||||
|
||||
/// Current stem pool embargo timers.
|
||||
embargo_timers: DelayQueue<TxID>,
|
||||
/// The distrobution to sample to get embargo timers.
|
||||
embargo_dist: Exp<f64>,
|
||||
|
||||
/// The d++ config.
|
||||
config: DandelionConfig,
|
||||
|
||||
_tx: PhantomData<Tx>,
|
||||
}
|
||||
|
||||
impl<P, R, Tx, TxID, PID> DandelionPool<P, R, Tx, TxID, PID>
|
||||
where
|
||||
Tx: Clone + Send,
|
||||
TxID: Hash + Eq + Clone + Send + 'static,
|
||||
PID: Hash + Eq + Clone + Send + 'static,
|
||||
P: Service<
|
||||
TxStoreRequest<Tx, TxID>,
|
||||
Response = TxStoreResponse<Tx, TxID>,
|
||||
Error = tower::BoxError,
|
||||
>,
|
||||
P::Future: Send + 'static,
|
||||
R: Service<DandelionRouteReq<Tx, PID>, Response = State, Error = DandelionRouterError>,
|
||||
R::Future: Send + 'static,
|
||||
{
|
||||
/// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx.
|
||||
async fn store_tx_and_stem(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
tx_id: TxID,
|
||||
from: Option<PID>,
|
||||
) -> Result<(), tower::BoxError> {
|
||||
self.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Store(
|
||||
tx.clone(),
|
||||
tx_id.clone(),
|
||||
State::Stem,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let embargo_timer = self.embargo_dist.sample(&mut thread_rng());
|
||||
tracing::debug!(
|
||||
"Setting embargo timer for stem tx: {} seconds.",
|
||||
embargo_timer
|
||||
);
|
||||
self.embargo_timers
|
||||
.insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer));
|
||||
|
||||
self.stem_tx(tx, tx_id, from).await
|
||||
}
|
||||
|
||||
/// Stems the tx, setting the stem origin, if it wasn't already set.
|
||||
///
|
||||
/// This function does not add the tx to the backing pool.
|
||||
async fn stem_tx(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
tx_id: TxID,
|
||||
from: Option<PID>,
|
||||
) -> Result<(), tower::BoxError> {
|
||||
if let Some(peer) = &from {
|
||||
self.stem_origins
|
||||
.entry(tx_id.clone())
|
||||
.or_default()
|
||||
.insert(peer.clone());
|
||||
}
|
||||
|
||||
let state = from
|
||||
.map(|from| TxState::Stem { from })
|
||||
.unwrap_or(TxState::Local);
|
||||
|
||||
let fut = self
|
||||
.dandelion_router
|
||||
.ready()
|
||||
.await?
|
||||
.call(DandelionRouteReq {
|
||||
tx,
|
||||
state: state.clone(),
|
||||
});
|
||||
|
||||
self.routing_set
|
||||
.spawn(fut.map(|res| (tx_id, res.map_err(|_| state))));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx.
|
||||
async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||
// fluffs the tx first to prevent timing attacks where we could fluff at different average times
|
||||
// depending on if the tx was in the stem pool already or not.
|
||||
// Massively overkill but this is a minimal change.
|
||||
self.fluff_tx(tx.clone(), tx_id.clone()).await?;
|
||||
|
||||
// Remove the tx from the maps used during the stem phase.
|
||||
self.stem_origins.remove(&tx_id);
|
||||
|
||||
self.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Store(tx, tx_id, State::Fluff))
|
||||
.await?;
|
||||
|
||||
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
|
||||
// map. These timers should be relatively short, so it shouldn't be a problem.
|
||||
//self.embargo_timers.try_remove(&tx_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fluffs a tx, does not add the tx to the tx pool.
|
||||
async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||
let fut = self
|
||||
.dandelion_router
|
||||
.ready()
|
||||
.await?
|
||||
.call(DandelionRouteReq {
|
||||
tx,
|
||||
state: TxState::Fluff,
|
||||
});
|
||||
|
||||
self.routing_set
|
||||
.spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff))));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`].
|
||||
async fn handle_incoming_tx(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
tx_state: TxState<PID>,
|
||||
tx_id: TxID,
|
||||
) -> Result<(), tower::BoxError> {
|
||||
let TxStoreResponse::Contains(have_tx) = self
|
||||
.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Contains(tx_id.clone()))
|
||||
.await?
|
||||
else {
|
||||
panic!("Backing tx pool responded with wrong response for request.");
|
||||
};
|
||||
// If we have already fluffed this tx then we don't need to do anything.
|
||||
if have_tx == Some(State::Fluff) {
|
||||
tracing::debug!("Already fluffed incoming tx, ignoring.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match tx_state {
|
||||
TxState::Stem { from } => {
|
||||
if self
|
||||
.stem_origins
|
||||
.get(&tx_id)
|
||||
.is_some_and(|peers| peers.contains(&from))
|
||||
{
|
||||
tracing::debug!("Received stem tx twice from same peer, fluffing it");
|
||||
// The same peer sent us a tx twice, fluff it.
|
||||
self.promote_and_fluff_tx(tx_id).await
|
||||
} else {
|
||||
// This could be a new tx or it could have already been stemed, but we still stem it again
|
||||
// unless the same peer sends us a tx twice.
|
||||
tracing::debug!("Steming incoming tx");
|
||||
self.store_tx_and_stem(tx, tx_id, Some(from)).await
|
||||
}
|
||||
}
|
||||
TxState::Fluff => {
|
||||
tracing::debug!("Fluffing incoming tx");
|
||||
self.store_and_fluff_tx(tx, tx_id).await
|
||||
}
|
||||
TxState::Local => {
|
||||
// If we have already stemed this tx then nothing to do.
|
||||
if have_tx.is_some() {
|
||||
tracing::debug!("Received a local tx that we already have, skipping");
|
||||
return Ok(());
|
||||
}
|
||||
tracing::debug!("Steming local transaction");
|
||||
self.store_tx_and_stem(tx, tx_id, None).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Promotes a tx to the clear pool.
|
||||
async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||
// Remove the tx from the maps used during the stem phase.
|
||||
self.stem_origins.remove(&tx_id);
|
||||
|
||||
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
|
||||
// map. These timers should be relatively short, so it shouldn't be a problem.
|
||||
//self.embargo_timers.try_remove(&tx_id);
|
||||
|
||||
self.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Promote(tx_id))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Promotes a tx to the public fluff pool and fluffs the tx.
|
||||
async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> {
|
||||
tracing::debug!("Promoting transaction to public pool and fluffing it.");
|
||||
|
||||
let TxStoreResponse::Transaction(tx) = self
|
||||
.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Get(tx_id.clone()))
|
||||
.await?
|
||||
else {
|
||||
panic!("Backing tx pool responded with wrong response for request.");
|
||||
};
|
||||
|
||||
let Some((tx, state)) = tx else {
|
||||
tracing::debug!("Could not find tx, skipping.");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if state == State::Fluff {
|
||||
tracing::debug!("Transaction already fluffed, skipping.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.promote_tx(tx_id.clone()).await?;
|
||||
self.fluff_tx(tx, tx_id).await
|
||||
}
|
||||
|
||||
/// Returns a tx stored in the fluff _OR_ stem pool.
|
||||
async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result<Option<Tx>, tower::BoxError> {
|
||||
let TxStoreResponse::Transaction(tx) = self
|
||||
.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Get(tx_id))
|
||||
.await?
|
||||
else {
|
||||
panic!("Backing tx pool responded with wrong response for request.");
|
||||
};
|
||||
|
||||
Ok(tx.map(|tx| tx.0))
|
||||
}
|
||||
|
||||
/// Starts the [`DandelionPool`].
|
||||
async fn run(
|
||||
mut self,
|
||||
mut rx: mpsc::Receiver<(IncomingTx<Tx, TxID, PID>, oneshot::Sender<()>)>,
|
||||
) {
|
||||
tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config);
|
||||
|
||||
// On start up we just fluff all txs left in the stem pool.
|
||||
let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool)
|
||||
.oneshot(TxStoreRequest::IDsInStemPool)
|
||||
.await
|
||||
else {
|
||||
tracing::error!("Failed to get transactions in stem pool.");
|
||||
return;
|
||||
};
|
||||
|
||||
tracing::debug!(
|
||||
"Fluffing {} txs that are currently in the stem pool",
|
||||
ids.len()
|
||||
);
|
||||
|
||||
for id in ids {
|
||||
if let Err(e) = self.promote_and_fluff_tx(id).await {
|
||||
tracing::error!("Failed to fluff tx in the stem pool at start up, {e}.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
tracing::trace!("Waiting for next event.");
|
||||
tokio::select! {
|
||||
// biased to handle current txs before routing new ones.
|
||||
biased;
|
||||
Some(fired) = self.embargo_timers.next() => {
|
||||
tracing::debug!("Embargo timer fired, did not see stem tx in time.");
|
||||
|
||||
let tx_id = fired.into_inner();
|
||||
if let Err(e) = self.promote_and_fluff_tx(tx_id).await {
|
||||
tracing::error!("Error handling fired embargo timer: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
Some(Ok((tx_id, res))) = self.routing_set.join_next() => {
|
||||
tracing::trace!("Received d++ routing result.");
|
||||
|
||||
let res = match res {
|
||||
Ok(State::Fluff) => {
|
||||
tracing::debug!("Transaction was fluffed upgrading it to the public pool.");
|
||||
self.promote_tx(tx_id).await
|
||||
}
|
||||
Err(tx_state) => {
|
||||
tracing::debug!("Error routing transaction, trying again.");
|
||||
|
||||
match self.get_tx_from_pool(tx_id.clone()).await {
|
||||
Ok(Some(tx)) => match tx_state {
|
||||
TxState::Fluff => self.fluff_tx(tx, tx_id).await,
|
||||
TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await,
|
||||
TxState::Local => self.stem_tx(tx, tx_id, None).await,
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
Ok(State::Stem) => continue,
|
||||
};
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::error!("Error handling transaction routing return: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
req = rx.recv() => {
|
||||
tracing::debug!("Received new tx to route.");
|
||||
|
||||
let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await {
|
||||
let _ = res_tx.send(());
|
||||
|
||||
tracing::error!("Error handling transaction in dandelion pool: {e}");
|
||||
return;
|
||||
}
|
||||
let _ = res_tx.send(());
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
113
p2p/dandelion-tower/src/pool/incoming_tx.rs
Normal file
113
p2p/dandelion-tower/src/pool/incoming_tx.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
//! Contains [`IncomingTx`] and [`IncomingTxBuilder`]
|
||||
use crate::{State, TxState};
|
||||
|
||||
/// An incoming transaction that has gone through the preprocessing stage.
|
||||
pub struct IncomingTx<Tx, TxId, PeerId> {
|
||||
/// The transaction.
|
||||
pub(crate) tx: Tx,
|
||||
/// The transaction ID.
|
||||
pub(crate) tx_id: TxId,
|
||||
/// The routing state of the transaction.
|
||||
pub(crate) routing_state: TxState<PeerId>,
|
||||
}
|
||||
|
||||
/// An [`IncomingTx`] builder.
|
||||
///
|
||||
/// The const generics here are used to restrict what methods can be called.
|
||||
///
|
||||
/// - `RS`: routing state; a `bool` for if the routing state is set
|
||||
/// - `DBS`: database state; a `bool` for if the state in the DB is set
|
||||
pub struct IncomingTxBuilder<const RS: bool, const DBS: bool, Tx, TxId, PeerId> {
|
||||
/// The transaction.
|
||||
tx: Tx,
|
||||
/// The transaction ID.
|
||||
tx_id: TxId,
|
||||
/// The routing state of the transaction.
|
||||
routing_state: Option<TxState<PeerId>>,
|
||||
/// The state of this transaction in the DB.
|
||||
state_in_db: Option<State>,
|
||||
}
|
||||
|
||||
impl<Tx, TxId, PeerId> IncomingTxBuilder<false, false, Tx, TxId, PeerId> {
|
||||
/// Creates a new [`IncomingTxBuilder`].
|
||||
pub fn new(tx: Tx, tx_id: TxId) -> Self {
|
||||
Self {
|
||||
tx,
|
||||
tx_id,
|
||||
routing_state: None,
|
||||
state_in_db: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const DBS: bool, Tx, TxId, PeerId> IncomingTxBuilder<false, DBS, Tx, TxId, PeerId> {
|
||||
/// Adds the routing state to the builder.
|
||||
///
|
||||
/// The routing state is the origin of this transaction from our perspective.
|
||||
pub fn with_routing_state(
|
||||
self,
|
||||
state: TxState<PeerId>,
|
||||
) -> IncomingTxBuilder<true, DBS, Tx, TxId, PeerId> {
|
||||
IncomingTxBuilder {
|
||||
tx: self.tx,
|
||||
tx_id: self.tx_id,
|
||||
routing_state: Some(state),
|
||||
state_in_db: self.state_in_db,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const RS: bool, Tx, TxId, PeerId> IncomingTxBuilder<RS, false, Tx, TxId, PeerId> {
|
||||
/// Adds the database state to the builder.
|
||||
///
|
||||
/// If the transaction is not in the DB already then the state should be [`None`].
|
||||
pub fn with_state_in_db(
|
||||
self,
|
||||
state: Option<State>,
|
||||
) -> IncomingTxBuilder<RS, true, Tx, TxId, PeerId> {
|
||||
IncomingTxBuilder {
|
||||
tx: self.tx,
|
||||
tx_id: self.tx_id,
|
||||
routing_state: self.routing_state,
|
||||
state_in_db: state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tx, TxId, PeerId> IncomingTxBuilder<true, true, Tx, TxId, PeerId> {
|
||||
/// Builds the [`IncomingTx`].
|
||||
///
|
||||
/// If this returns [`None`] then the transaction does not need to be given to the dandelion pool
|
||||
/// manager.
|
||||
pub fn build(self) -> Option<IncomingTx<Tx, TxId, PeerId>> {
|
||||
let routing_state = self.routing_state.unwrap();
|
||||
|
||||
if self.state_in_db == Some(State::Fluff) {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(IncomingTx {
|
||||
tx: self.tx,
|
||||
tx_id: self.tx_id,
|
||||
routing_state,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_builder() {
|
||||
IncomingTxBuilder::new(1, 2)
|
||||
.with_routing_state(TxState::Stem { from: 3 })
|
||||
.with_state_in_db(None)
|
||||
.build();
|
||||
|
||||
IncomingTxBuilder::new(1, 2)
|
||||
.with_state_in_db(None)
|
||||
.with_routing_state(TxState::Stem { from: 3 })
|
||||
.build();
|
||||
}
|
||||
}
|
294
p2p/dandelion-tower/src/pool/manager.rs
Normal file
294
p2p/dandelion-tower/src/pool/manager.rs
Normal file
|
@ -0,0 +1,294 @@
|
|||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
hash::Hash,
|
||||
marker::PhantomData,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Exp;
|
||||
use tokio::{
|
||||
sync::{mpsc, oneshot},
|
||||
task::JoinSet,
|
||||
};
|
||||
use tokio_util::time::DelayQueue;
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
use crate::{
|
||||
pool::IncomingTx,
|
||||
traits::{TxStoreRequest, TxStoreResponse},
|
||||
DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Debug, thiserror::Error)]
|
||||
#[error("The dandelion pool was shutdown")]
|
||||
pub struct DandelionPoolShutDown;
|
||||
|
||||
/// The dandelion++ pool manager.
|
||||
///
|
||||
/// See the [module docs](super) for more.
|
||||
pub struct DandelionPoolManager<P, R, Tx, TxId, PeerId> {
|
||||
/// The dandelion++ router
|
||||
pub(crate) dandelion_router: R,
|
||||
/// The backing tx storage.
|
||||
pub(crate) backing_pool: P,
|
||||
/// The set of tasks that are running the future returned from `dandelion_router`.
|
||||
pub(crate) routing_set: JoinSet<(TxId, Result<State, TxState<PeerId>>)>,
|
||||
|
||||
/// The origin of stem transactions.
|
||||
pub(crate) stem_origins: HashMap<TxId, HashSet<PeerId>>,
|
||||
|
||||
/// Current stem pool embargo timers.
|
||||
pub(crate) embargo_timers: DelayQueue<TxId>,
|
||||
/// The distrobution to sample to get embargo timers.
|
||||
pub(crate) embargo_dist: Exp<f64>,
|
||||
|
||||
/// The d++ config.
|
||||
pub(crate) config: DandelionConfig,
|
||||
|
||||
pub(crate) _tx: PhantomData<Tx>,
|
||||
}
|
||||
|
||||
impl<P, R, Tx, TxId, PeerId> DandelionPoolManager<P, R, Tx, TxId, PeerId>
|
||||
where
|
||||
Tx: Clone + Send,
|
||||
TxId: Hash + Eq + Clone + Send + 'static,
|
||||
PeerId: Hash + Eq + Clone + Send + 'static,
|
||||
P: Service<TxStoreRequest<TxId>, Response = TxStoreResponse<Tx>, Error = tower::BoxError>,
|
||||
P::Future: Send + 'static,
|
||||
R: Service<DandelionRouteReq<Tx, PeerId>, Response = State, Error = DandelionRouterError>,
|
||||
R::Future: Send + 'static,
|
||||
{
|
||||
/// Adds a new embargo timer to the running timers, with a duration pulled from [`Self::embargo_dist`]
|
||||
fn add_embargo_timer_for_tx(&mut self, tx_id: TxId) {
|
||||
let embargo_timer = self.embargo_dist.sample(&mut thread_rng());
|
||||
tracing::debug!(
|
||||
"Setting embargo timer for stem tx: {} seconds.",
|
||||
embargo_timer
|
||||
);
|
||||
|
||||
self.embargo_timers
|
||||
.insert(tx_id, Duration::from_secs_f64(embargo_timer));
|
||||
}
|
||||
|
||||
/// Stems the tx, setting the stem origin, if it wasn't already set.
|
||||
///
|
||||
/// This function does not add the tx to the backing pool.
|
||||
async fn stem_tx(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
tx_id: TxId,
|
||||
from: Option<PeerId>,
|
||||
) -> Result<(), tower::BoxError> {
|
||||
if let Some(peer) = &from {
|
||||
self.stem_origins
|
||||
.entry(tx_id.clone())
|
||||
.or_default()
|
||||
.insert(peer.clone());
|
||||
}
|
||||
|
||||
let state = from
|
||||
.map(|from| TxState::Stem { from })
|
||||
.unwrap_or(TxState::Local);
|
||||
|
||||
let fut = self
|
||||
.dandelion_router
|
||||
.ready()
|
||||
.await?
|
||||
.call(DandelionRouteReq {
|
||||
tx,
|
||||
state: state.clone(),
|
||||
});
|
||||
|
||||
self.routing_set
|
||||
.spawn(fut.map(|res| (tx_id, res.map_err(|_| state))));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fluffs a tx, does not add the tx to the tx pool.
|
||||
async fn fluff_tx(&mut self, tx: Tx, tx_id: TxId) -> Result<(), tower::BoxError> {
|
||||
let fut = self
|
||||
.dandelion_router
|
||||
.ready()
|
||||
.await?
|
||||
.call(DandelionRouteReq {
|
||||
tx,
|
||||
state: TxState::Fluff,
|
||||
});
|
||||
|
||||
self.routing_set
|
||||
.spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff))));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Function to handle an [`IncomingTx`].
|
||||
async fn handle_incoming_tx(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
tx_state: TxState<PeerId>,
|
||||
tx_id: TxId,
|
||||
) -> Result<(), tower::BoxError> {
|
||||
match tx_state {
|
||||
TxState::Stem { from } => {
|
||||
if self
|
||||
.stem_origins
|
||||
.get(&tx_id)
|
||||
.is_some_and(|peers| peers.contains(&from))
|
||||
{
|
||||
tracing::debug!("Received stem tx twice from same peer, fluffing it");
|
||||
// The same peer sent us a tx twice, fluff it.
|
||||
self.promote_and_fluff_tx(tx_id).await?;
|
||||
} else {
|
||||
// This could be a new tx or it could have already been stemed, but we still stem it again
|
||||
// unless the same peer sends us a tx twice.
|
||||
tracing::debug!("Steming incoming tx");
|
||||
self.stem_tx(tx, tx_id.clone(), Some(from)).await?;
|
||||
self.add_embargo_timer_for_tx(tx_id);
|
||||
}
|
||||
}
|
||||
TxState::Fluff => {
|
||||
tracing::debug!("Fluffing incoming tx");
|
||||
self.fluff_tx(tx, tx_id).await?;
|
||||
}
|
||||
TxState::Local => {
|
||||
tracing::debug!("Steming local transaction");
|
||||
self.stem_tx(tx, tx_id.clone(), None).await?;
|
||||
self.add_embargo_timer_for_tx(tx_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Promotes a tx to the clear pool.
|
||||
async fn promote_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> {
|
||||
// Remove the tx from the maps used during the stem phase.
|
||||
self.stem_origins.remove(&tx_id);
|
||||
|
||||
// The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the
|
||||
// map. These timers should be relatively short, so it shouldn't be a problem.
|
||||
//self.embargo_timers.try_remove(&tx_id);
|
||||
|
||||
self.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Promote(tx_id))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Promotes a tx to the public fluff pool and fluffs the tx.
|
||||
async fn promote_and_fluff_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> {
|
||||
tracing::debug!("Promoting transaction to public pool and fluffing it.");
|
||||
|
||||
let TxStoreResponse::Transaction(tx) = self
|
||||
.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Get(tx_id.clone()))
|
||||
.await?
|
||||
else {
|
||||
panic!("Backing tx pool responded with wrong response for request.");
|
||||
};
|
||||
|
||||
let Some((tx, state)) = tx else {
|
||||
tracing::debug!("Could not find tx, skipping.");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if state == State::Fluff {
|
||||
tracing::debug!("Transaction already fluffed, skipping.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.promote_tx(tx_id.clone()).await?;
|
||||
self.fluff_tx(tx, tx_id).await
|
||||
}
|
||||
|
||||
/// Returns a tx stored in the fluff _OR_ stem pool.
|
||||
async fn get_tx_from_pool(&mut self, tx_id: TxId) -> Result<Option<Tx>, tower::BoxError> {
|
||||
let TxStoreResponse::Transaction(tx) = self
|
||||
.backing_pool
|
||||
.ready()
|
||||
.await?
|
||||
.call(TxStoreRequest::Get(tx_id))
|
||||
.await?
|
||||
else {
|
||||
panic!("Backing tx pool responded with wrong response for request.");
|
||||
};
|
||||
|
||||
Ok(tx.map(|tx| tx.0))
|
||||
}
|
||||
|
||||
/// Starts the [`DandelionPoolManager`].
|
||||
pub(crate) async fn run(
|
||||
mut self,
|
||||
mut rx: mpsc::Receiver<(IncomingTx<Tx, TxId, PeerId>, oneshot::Sender<()>)>,
|
||||
) {
|
||||
tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config);
|
||||
|
||||
loop {
|
||||
tracing::trace!("Waiting for next event.");
|
||||
tokio::select! {
|
||||
// biased to handle current txs before routing new ones.
|
||||
biased;
|
||||
Some(fired) = self.embargo_timers.next() => {
|
||||
tracing::debug!("Embargo timer fired, did not see stem tx in time.");
|
||||
|
||||
let tx_id = fired.into_inner();
|
||||
if let Err(e) = self.promote_and_fluff_tx(tx_id).await {
|
||||
tracing::error!("Error handling fired embargo timer: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
Some(Ok((tx_id, res))) = self.routing_set.join_next() => {
|
||||
tracing::trace!("Received d++ routing result.");
|
||||
|
||||
let res = match res {
|
||||
Ok(State::Fluff) => {
|
||||
tracing::debug!("Transaction was fluffed upgrading it to the public pool.");
|
||||
self.promote_tx(tx_id).await
|
||||
}
|
||||
Err(tx_state) => {
|
||||
tracing::debug!("Error routing transaction, trying again.");
|
||||
|
||||
match self.get_tx_from_pool(tx_id.clone()).await {
|
||||
Ok(Some(tx)) => match tx_state {
|
||||
TxState::Fluff => self.fluff_tx(tx, tx_id).await,
|
||||
TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await,
|
||||
TxState::Local => self.stem_tx(tx, tx_id, None).await,
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
Ok(State::Stem) => continue,
|
||||
};
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::error!("Error handling transaction routing return: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
req = rx.recv() => {
|
||||
tracing::debug!("Received new tx to route.");
|
||||
|
||||
let Some((IncomingTx { tx, tx_id, routing_state }, res_tx)) = req else {
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await {
|
||||
let _ = res_tx.send(());
|
||||
|
||||
tracing::error!("Error handling transaction in dandelion pool: {e}");
|
||||
return;
|
||||
}
|
||||
let _ = res_tx.send(());
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
145
p2p/dandelion-tower/src/pool/mod.rs
Normal file
145
p2p/dandelion-tower/src/pool/mod.rs
Normal file
|
@ -0,0 +1,145 @@
|
|||
//! # Dandelion++ Pool
|
||||
//!
|
||||
//! This module contains [`DandelionPoolManager`] which is a wrapper around a backing transaction store,
|
||||
//! which fully implements the dandelion++ protocol.
|
||||
//!
|
||||
//! The [`DandelionPoolManager`] is a middle man between a [preprocessing stage](#preprocessing-stage) and a dandelion router.
|
||||
//! It handles promoting transactions in the stem state to the fluff state and setting embargo timers on stem state transactions.
|
||||
//!
|
||||
//! ### Preprocessing stage
|
||||
//!
|
||||
//! The preprocessing stage (not handled in this crate) before giving the transaction to the [`DandelionPoolManager`]
|
||||
//! should handle:
|
||||
//!
|
||||
//! - verifying the tx.
|
||||
//! - checking if we have the tx in the pool already and giving that information to the [`IncomingTxBuilder`].
|
||||
//! - storing the tx in the pool, if it isn't there already.
|
||||
//!
|
||||
//! ### Keep Stem Transactions Hidden
|
||||
//!
|
||||
//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden.
|
||||
//! So handle any requests to the tx-pool like the stem side of the pool does not exist.
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
marker::PhantomData,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use rand_distr::Exp;
|
||||
use tokio::{
|
||||
sync::{mpsc, oneshot},
|
||||
task::JoinSet,
|
||||
};
|
||||
use tokio_util::{sync::PollSender, time::DelayQueue};
|
||||
use tower::Service;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::{
|
||||
pool::manager::DandelionPoolShutDown,
|
||||
traits::{TxStoreRequest, TxStoreResponse},
|
||||
DandelionConfig, DandelionRouteReq, DandelionRouterError, State,
|
||||
};
|
||||
|
||||
mod incoming_tx;
|
||||
mod manager;
|
||||
|
||||
pub use incoming_tx::{IncomingTx, IncomingTxBuilder};
|
||||
pub use manager::DandelionPoolManager;
|
||||
|
||||
/// Start the [`DandelionPoolManager`].
|
||||
///
|
||||
/// This function spawns the [`DandelionPoolManager`] and returns [`DandelionPoolService`] which can be used to send
|
||||
/// requests to the pool.
|
||||
///
|
||||
/// ### Args
|
||||
///
|
||||
/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPoolManager`].
|
||||
/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow
|
||||
/// user to customise routing functionality.
|
||||
/// - `backing_pool` is the backing transaction storage service
|
||||
/// - `config` is [`DandelionConfig`].
|
||||
pub fn start_dandelion_pool_manager<P, R, Tx, TxId, PeerId>(
|
||||
buffer_size: usize,
|
||||
dandelion_router: R,
|
||||
backing_pool: P,
|
||||
config: DandelionConfig,
|
||||
) -> DandelionPoolService<Tx, TxId, PeerId>
|
||||
where
|
||||
Tx: Clone + Send + 'static,
|
||||
TxId: Hash + Eq + Clone + Send + 'static,
|
||||
PeerId: Hash + Eq + Clone + Send + 'static,
|
||||
P: Service<TxStoreRequest<TxId>, Response = TxStoreResponse<Tx>, Error = tower::BoxError>
|
||||
+ Send
|
||||
+ 'static,
|
||||
P::Future: Send + 'static,
|
||||
R: Service<DandelionRouteReq<Tx, PeerId>, Response = State, Error = DandelionRouterError>
|
||||
+ Send
|
||||
+ 'static,
|
||||
R::Future: Send + 'static,
|
||||
{
|
||||
let (tx, rx) = mpsc::channel(buffer_size);
|
||||
|
||||
let pool = DandelionPoolManager {
|
||||
dandelion_router,
|
||||
backing_pool,
|
||||
routing_set: JoinSet::new(),
|
||||
stem_origins: HashMap::new(),
|
||||
embargo_timers: DelayQueue::new(),
|
||||
embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(),
|
||||
config,
|
||||
_tx: PhantomData,
|
||||
};
|
||||
|
||||
let span = tracing::debug_span!("dandelion_pool");
|
||||
|
||||
tokio::spawn(pool.run(rx).instrument(span));
|
||||
|
||||
DandelionPoolService {
|
||||
tx: PollSender::new(tx),
|
||||
}
|
||||
}
|
||||
|
||||
/// The dandelion pool manager service.
|
||||
///
|
||||
/// Used to send [`IncomingTx`]s to the [`DandelionPoolManager`]
|
||||
#[derive(Clone)]
|
||||
pub struct DandelionPoolService<Tx, TxId, PeerId> {
|
||||
/// The channel to [`DandelionPoolManager`].
|
||||
tx: PollSender<(IncomingTx<Tx, TxId, PeerId>, oneshot::Sender<()>)>,
|
||||
}
|
||||
|
||||
impl<Tx, TxId, PeerId> Service<IncomingTx<Tx, TxId, PeerId>>
|
||||
for DandelionPoolService<Tx, TxId, PeerId>
|
||||
where
|
||||
Tx: Clone + Send,
|
||||
TxId: Hash + Eq + Clone + Send + 'static,
|
||||
PeerId: Hash + Eq + Clone + Send + 'static,
|
||||
{
|
||||
type Response = ();
|
||||
type Error = DandelionPoolShutDown;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: IncomingTx<Tx, TxId, PeerId>) -> Self::Future {
|
||||
// although the channel isn't sending anything we want to wait for the request to be handled before continuing.
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let res = self
|
||||
.tx
|
||||
.send_item((req, tx))
|
||||
.map_err(|_| DandelionPoolShutDown);
|
||||
|
||||
async move {
|
||||
res?;
|
||||
rx.await.expect("Oneshot dropped before response!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -6,7 +6,7 @@
|
|||
//! ### What The Router Does Not Do
|
||||
//!
|
||||
//! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling
|
||||
//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPool)
|
||||
//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPoolManager)
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
|
@ -43,9 +43,9 @@ pub enum DandelionRouterError {
|
|||
}
|
||||
|
||||
/// A response from an attempt to retrieve an outbound peer.
|
||||
pub enum OutboundPeer<ID, T> {
|
||||
pub enum OutboundPeer<Id, T> {
|
||||
/// A peer.
|
||||
Peer(ID, T),
|
||||
Peer(Id, T),
|
||||
/// The peer store is exhausted and has no more to return.
|
||||
Exhausted,
|
||||
}
|
||||
|
@ -61,28 +61,28 @@ pub enum State {
|
|||
|
||||
/// The routing state of a transaction.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum TxState<ID> {
|
||||
pub enum TxState<Id> {
|
||||
/// Fluff state.
|
||||
Fluff,
|
||||
/// Stem state.
|
||||
Stem {
|
||||
/// The peer who sent us this transaction's ID.
|
||||
from: ID,
|
||||
/// The peer who sent us this transaction's Id.
|
||||
from: Id,
|
||||
},
|
||||
/// Local - the transaction originated from our node.
|
||||
Local,
|
||||
}
|
||||
|
||||
/// A request to route a transaction.
|
||||
pub struct DandelionRouteReq<Tx, ID> {
|
||||
pub struct DandelionRouteReq<Tx, Id> {
|
||||
/// The transaction.
|
||||
pub tx: Tx,
|
||||
/// The transaction state.
|
||||
pub state: TxState<ID>,
|
||||
pub state: TxState<Id>,
|
||||
}
|
||||
|
||||
/// The dandelion router service.
|
||||
pub struct DandelionRouter<P, B, ID, S, Tx> {
|
||||
pub struct DandelionRouter<P, B, Id, S, Tx> {
|
||||
// pub(crate) is for tests
|
||||
/// A [`Discover`] where we can get outbound peers from.
|
||||
outbound_peer_discover: Pin<Box<P>>,
|
||||
|
@ -95,14 +95,14 @@ pub struct DandelionRouter<P, B, ID, S, Tx> {
|
|||
epoch_start: Instant,
|
||||
|
||||
/// The stem our local transactions will be sent to.
|
||||
local_route: Option<ID>,
|
||||
/// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`.
|
||||
stem_routes: HashMap<ID, ID>,
|
||||
local_route: Option<Id>,
|
||||
/// A [`HashMap`] linking peer's Ids to Ids in `stem_peers`.
|
||||
stem_routes: HashMap<Id, Id>,
|
||||
/// Peers we are using for stemming.
|
||||
///
|
||||
/// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`]
|
||||
/// transactions.
|
||||
pub(crate) stem_peers: HashMap<ID, S>,
|
||||
pub(crate) stem_peers: HashMap<Id, S>,
|
||||
|
||||
/// The distribution to sample to get the [`State`], true is [`State::Fluff`].
|
||||
state_dist: Bernoulli,
|
||||
|
@ -116,10 +116,10 @@ pub struct DandelionRouter<P, B, ID, S, Tx> {
|
|||
_tx: PhantomData<Tx>,
|
||||
}
|
||||
|
||||
impl<Tx, ID, P, B, S> DandelionRouter<P, B, ID, S, Tx>
|
||||
impl<Tx, Id, P, B, S> DandelionRouter<P, B, Id, S, Tx>
|
||||
where
|
||||
ID: Hash + Eq + Clone,
|
||||
P: TryStream<Ok = OutboundPeer<ID, S>, Error = tower::BoxError>,
|
||||
Id: Hash + Eq + Clone,
|
||||
P: TryStream<Ok = OutboundPeer<Id, S>, Error = tower::BoxError>,
|
||||
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
|
||||
B::Future: Send + 'static,
|
||||
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
|
||||
|
@ -198,7 +198,7 @@ where
|
|||
fn stem_tx(
|
||||
&mut self,
|
||||
tx: Tx,
|
||||
from: ID,
|
||||
from: Id,
|
||||
) -> BoxFuture<'static, Result<State, DandelionRouterError>> {
|
||||
if self.stem_peers.is_empty() {
|
||||
tracing::debug!("Stem peers are empty, fluffing stem transaction.");
|
||||
|
@ -258,19 +258,10 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
## Generics ##
|
||||
|
||||
Tx: The tx type
|
||||
ID: Peer Id type - unique identifier for nodes.
|
||||
P: Peer Set discover - where we can get outbound peers from
|
||||
B: Broadcast service - where we send txs to get diffused.
|
||||
S: The Peer service - handles routing messages to a single node.
|
||||
*/
|
||||
impl<Tx, ID, P, B, S> Service<DandelionRouteReq<Tx, ID>> for DandelionRouter<P, B, ID, S, Tx>
|
||||
impl<Tx, Id, P, B, S> Service<DandelionRouteReq<Tx, Id>> for DandelionRouter<P, B, Id, S, Tx>
|
||||
where
|
||||
ID: Hash + Eq + Clone,
|
||||
P: TryStream<Ok = OutboundPeer<ID, S>, Error = tower::BoxError>,
|
||||
Id: Hash + Eq + Clone,
|
||||
P: TryStream<Ok = OutboundPeer<Id, S>, Error = tower::BoxError>,
|
||||
B: Service<DiffuseRequest<Tx>, Error = tower::BoxError>,
|
||||
B::Future: Send + 'static,
|
||||
S: Service<StemRequest<Tx>, Error = tower::BoxError>,
|
||||
|
@ -336,7 +327,7 @@ where
|
|||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: DandelionRouteReq<Tx, ID>) -> Self::Future {
|
||||
fn call(&mut self, req: DandelionRouteReq<Tx, Id>) -> Self::Future {
|
||||
tracing::trace!(parent: &self.span, "Handling route request.");
|
||||
|
||||
match req.state {
|
||||
|
|
|
@ -76,11 +76,9 @@ pub fn mock_in_memory_backing_pool<
|
|||
TxID: Clone + Hash + Eq + Send + 'static,
|
||||
>() -> (
|
||||
impl Service<
|
||||
TxStoreRequest<Tx, TxID>,
|
||||
Response = TxStoreResponse<Tx, TxID>,
|
||||
Future = impl Future<Output = Result<TxStoreResponse<Tx, TxID>, tower::BoxError>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
TxStoreRequest<TxID>,
|
||||
Response = TxStoreResponse<Tx>,
|
||||
Future = impl Future<Output = Result<TxStoreResponse<Tx>, tower::BoxError>> + Send + 'static,
|
||||
Error = tower::BoxError,
|
||||
> + Send
|
||||
+ 'static,
|
||||
|
@ -90,33 +88,14 @@ pub fn mock_in_memory_backing_pool<
|
|||
let txs_2 = txs.clone();
|
||||
|
||||
(
|
||||
service_fn(move |req: TxStoreRequest<Tx, TxID>| {
|
||||
service_fn(move |req: TxStoreRequest<TxID>| {
|
||||
let txs = txs.clone();
|
||||
async move {
|
||||
match req {
|
||||
TxStoreRequest::Store(tx, tx_id, state) => {
|
||||
txs.lock().unwrap().insert(tx_id, (tx, state));
|
||||
Ok(TxStoreResponse::Ok)
|
||||
}
|
||||
TxStoreRequest::Get(tx_id) => {
|
||||
let tx_state = txs.lock().unwrap().get(&tx_id).cloned();
|
||||
Ok(TxStoreResponse::Transaction(tx_state))
|
||||
}
|
||||
TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains(
|
||||
txs.lock().unwrap().get(&tx_id).map(|res| res.1),
|
||||
)),
|
||||
TxStoreRequest::IDsInStemPool => {
|
||||
// horribly inefficient, but it's test code :)
|
||||
let ids = txs
|
||||
.lock()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(_, (_, state))| matches!(state, State::Stem))
|
||||
.map(|tx| tx.0.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(TxStoreResponse::IDs(ids))
|
||||
}
|
||||
TxStoreRequest::Promote(tx_id) => {
|
||||
let _ = txs
|
||||
.lock()
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
pool::{start_dandelion_pool, IncomingTx},
|
||||
pool::{start_dandelion_pool_manager, IncomingTx},
|
||||
DandelionConfig, DandelionRouter, Graph, TxState,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn basic_functionality() {
|
||||
let config = DandelionConfig {
|
||||
|
@ -21,9 +20,9 @@ async fn basic_functionality() {
|
|||
|
||||
let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config);
|
||||
|
||||
let (pool_svc, pool) = mock_in_memory_backing_pool();
|
||||
let (pool_svc, _pool) = mock_in_memory_backing_pool();
|
||||
|
||||
let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config);
|
||||
let mut pool_svc = start_dandelion_pool_manager(15, router, pool_svc, config);
|
||||
|
||||
pool_svc
|
||||
.ready()
|
||||
|
@ -32,11 +31,13 @@ async fn basic_functionality() {
|
|||
.call(IncomingTx {
|
||||
tx: 0_usize,
|
||||
tx_id: 1_usize,
|
||||
tx_state: TxState::Fluff,
|
||||
routing_state: TxState::Fluff,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(pool.lock().unwrap().contains_key(&1));
|
||||
// TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test
|
||||
// all functionality.
|
||||
//assert!(pool.lock().unwrap().contains_key(&1));
|
||||
assert!(broadcast_rx.try_recv().is_ok())
|
||||
}
|
||||
|
|
|
@ -8,42 +8,24 @@ pub struct StemRequest<Tx>(pub Tx);
|
|||
|
||||
#[cfg(feature = "txpool")]
|
||||
/// A request sent to the backing transaction pool storage.
|
||||
pub enum TxStoreRequest<Tx, TxID> {
|
||||
/// A request to store a transaction with the ID to store it under and the pool to store it in.
|
||||
///
|
||||
/// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it
|
||||
/// to the fluff pool, _if this request state is fluff_.
|
||||
Store(Tx, TxID, crate::State),
|
||||
/// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool.
|
||||
pub enum TxStoreRequest<TxId> {
|
||||
/// A request to retrieve a `Tx` with the given Id from the pool, should not remove that tx from the pool.
|
||||
///
|
||||
/// Must return [`TxStoreResponse::Transaction`]
|
||||
Get(TxID),
|
||||
Get(TxId),
|
||||
/// Promote a transaction from the stem pool to the public pool.
|
||||
///
|
||||
/// If the tx is already in the fluff pool do nothing.
|
||||
///
|
||||
/// This should not error if the tx isn't in the pool at all.
|
||||
Promote(TxID),
|
||||
/// A request to check if a translation is in the pool.
|
||||
///
|
||||
/// Must return [`TxStoreResponse::Contains`]
|
||||
Contains(TxID),
|
||||
/// Returns the IDs of all the transaction in the stem pool.
|
||||
///
|
||||
/// Must return [`TxStoreResponse::IDs`]
|
||||
IDsInStemPool,
|
||||
Promote(TxId),
|
||||
}
|
||||
|
||||
#[cfg(feature = "txpool")]
|
||||
/// A response sent back from the backing transaction pool.
|
||||
pub enum TxStoreResponse<Tx, TxID> {
|
||||
pub enum TxStoreResponse<Tx> {
|
||||
/// A generic ok response.
|
||||
Ok,
|
||||
/// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool
|
||||
/// the tx is in.
|
||||
Contains(Option<crate::State>),
|
||||
/// A response containing a requested transaction.
|
||||
Transaction(Option<(Tx, crate::State)>),
|
||||
/// A list of transaction IDs.
|
||||
IDs(Vec<TxID>),
|
||||
}
|
||||
|
|
|
@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-inte
|
|||
keywords = ["cuprate", "rpc", "interface"]
|
||||
|
||||
[features]
|
||||
default = ["dummy", "serde"]
|
||||
dummy = []
|
||||
default = ["dummy", "serde"]
|
||||
dummy = []
|
||||
|
||||
[dependencies]
|
||||
cuprate-epee-encoding = { path = "../../net/epee-encoding", default-features = false }
|
||||
|
@ -18,15 +18,20 @@ cuprate-json-rpc = { path = "../json-rpc", default-features = false }
|
|||
cuprate-rpc-types = { path = "../types", features = ["serde", "epee"], default-features = false }
|
||||
cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
axum = { version = "0.7.5", features = ["json"], default-features = false }
|
||||
serde = { workspace = true, optional = true }
|
||||
serde_json = { workspace = true, features = ["std"] }
|
||||
tower = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { path = "../../test-utils" }
|
||||
|
||||
axum = { version = "0.7.5", features = ["json", "tokio", "http2"] }
|
||||
serde_json = { workspace = true, features = ["std"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
ureq = { version = "2.10.0", features = ["json"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
|
@ -17,7 +17,7 @@ CLIENT ─► ROUTE ─► REQUEST ─► HANDLER ─► RESPONSE ─► CLIENT
|
|||
|
||||
Everything coming _in_ from a client is handled by this crate.
|
||||
|
||||
This is where your [`RpcHandler`] turns this [`RpcRequest`] into a [`RpcResponse`].
|
||||
This is where your [`RpcHandler`] turns this `Request` into a `Response`.
|
||||
|
||||
You hand this `Response` back to `cuprate-rpc-interface` and it will take care of sending it back to the client.
|
||||
|
||||
|
@ -42,16 +42,19 @@ The proper usage of this crate is to:
|
|||
3. Do whatever with it
|
||||
|
||||
# The [`RpcHandler`]
|
||||
This is your [`tower::Service`] that converts [`RpcRequest`]s into [`RpcResponse`]s,
|
||||
This is your [`tower::Service`] that converts `Request`s into `Response`s,
|
||||
i.e. the "inner handler".
|
||||
|
||||
Said concretely, `RpcHandler` is a `tower::Service` where the associated types are from this crate:
|
||||
- [`RpcRequest`]
|
||||
- [`RpcResponse`]
|
||||
- [`RpcError`]
|
||||
Said concretely, `RpcHandler` is 3 `tower::Service`s where the
|
||||
request/response types are the 3 endpoint enums from [`cuprate_rpc_types`]:
|
||||
- [`JsonRpcRequest`](cuprate_rpc_types::json::JsonRpcRequest) & [`JsonRpcResponse`](cuprate_rpc_types::json::JsonRpcResponse)
|
||||
- [`BinRequest`](cuprate_rpc_types::bin::BinRequest) & [`BinResponse`](cuprate_rpc_types::bin::BinRequest)
|
||||
- [`OtherRequest`](cuprate_rpc_types::other::OtherRequest) & [`OtherResponse`](cuprate_rpc_types::other::OtherRequest)
|
||||
|
||||
`RpcHandler`'s [`Future`](std::future::Future) is generic, _although_,
|
||||
it must output `Result<RpcResponse, RpcError>`.
|
||||
it must output `Result<$RESPONSE, anyhow::Error>`.
|
||||
|
||||
The error type must always be [`anyhow::Error`].
|
||||
|
||||
The `RpcHandler` must also hold some state that is required
|
||||
for RPC server operation.
|
||||
|
@ -83,7 +86,7 @@ use cuprate_rpc_types::{
|
|||
json::{JsonRpcRequest, JsonRpcResponse, GetBlockCountResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy, RpcRequest};
|
||||
use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy};
|
||||
|
||||
// Send a `/get_height` request. This endpoint has no inputs.
|
||||
async fn get_height(port: u16) -> OtherResponse {
|
||||
|
|
|
@ -1,123 +1,25 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
//---------------------------------------------------------------------------------------------------- Lints
|
||||
// Forbid lints.
|
||||
// Our code, and code generated (e.g macros) cannot overrule these.
|
||||
#![forbid(
|
||||
// `unsafe` is allowed but it _must_ be
|
||||
// commented with `SAFETY: reason`.
|
||||
clippy::undocumented_unsafe_blocks,
|
||||
|
||||
// Never.
|
||||
unused_unsafe,
|
||||
redundant_semicolons,
|
||||
unused_allocation,
|
||||
coherence_leak_check,
|
||||
while_true,
|
||||
|
||||
// Maybe can be put into `#[deny]`.
|
||||
unconditional_recursion,
|
||||
for_loops_over_fallibles,
|
||||
unused_braces,
|
||||
unused_labels,
|
||||
keyword_idents,
|
||||
non_ascii_idents,
|
||||
variant_size_differences,
|
||||
single_use_lifetimes,
|
||||
|
||||
// Probably can be put into `#[deny]`.
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
break_with_label_and_loop,
|
||||
duplicate_macro_attributes,
|
||||
exported_private_dependencies,
|
||||
large_assignments,
|
||||
overlapping_range_endpoints,
|
||||
semicolon_in_expressions_from_macros,
|
||||
noop_method_call,
|
||||
)]
|
||||
// Deny lints.
|
||||
// Some of these are `#[allow]`'ed on a per-case basis.
|
||||
#![deny(
|
||||
clippy::all,
|
||||
clippy::correctness,
|
||||
clippy::suspicious,
|
||||
clippy::style,
|
||||
clippy::complexity,
|
||||
clippy::perf,
|
||||
clippy::pedantic,
|
||||
clippy::nursery,
|
||||
clippy::cargo,
|
||||
unused_doc_comments,
|
||||
unused_mut,
|
||||
missing_docs,
|
||||
deprecated,
|
||||
unused_comparisons,
|
||||
nonstandard_style,
|
||||
unreachable_pub
|
||||
)]
|
||||
#![allow(
|
||||
// FIXME: this lint affects crates outside of
|
||||
// `database/` for some reason, allow for now.
|
||||
clippy::cargo_common_metadata,
|
||||
|
||||
// FIXME: adding `#[must_use]` onto everything
|
||||
// might just be more annoying than useful...
|
||||
// although it is sometimes nice.
|
||||
clippy::must_use_candidate,
|
||||
|
||||
// FIXME: good lint but too many false positives
|
||||
// with our `Env` + `RwLock` setup.
|
||||
clippy::significant_drop_tightening,
|
||||
|
||||
// FIXME: good lint but is less clear in most cases.
|
||||
clippy::items_after_statements,
|
||||
|
||||
// TODO
|
||||
rustdoc::bare_urls,
|
||||
|
||||
clippy::module_name_repetitions,
|
||||
clippy::module_inception,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::option_if_let_else,
|
||||
)]
|
||||
// Allow some lints when running in debug mode.
|
||||
#![cfg_attr(
|
||||
debug_assertions,
|
||||
allow(
|
||||
clippy::todo,
|
||||
clippy::multiple_crate_versions,
|
||||
unused_imports,
|
||||
unused_variables
|
||||
)
|
||||
)]
|
||||
// Allow some lints in tests.
|
||||
#![cfg_attr(
|
||||
test,
|
||||
allow(
|
||||
clippy::cognitive_complexity,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::too_many_lines
|
||||
)
|
||||
)]
|
||||
// TODO: remove me after finishing impl
|
||||
#![allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Mod
|
||||
mod route;
|
||||
mod router_builder;
|
||||
mod rpc_error;
|
||||
mod rpc_handler;
|
||||
#[cfg(feature = "dummy")]
|
||||
mod rpc_handler_dummy;
|
||||
mod rpc_request;
|
||||
mod rpc_response;
|
||||
mod rpc_service;
|
||||
|
||||
pub use router_builder::RouterBuilder;
|
||||
pub use rpc_error::RpcError;
|
||||
pub use rpc_handler::RpcHandler;
|
||||
#[cfg(feature = "dummy")]
|
||||
pub use rpc_handler_dummy::RpcHandlerDummy;
|
||||
pub use rpc_request::RpcRequest;
|
||||
pub use rpc_response::RpcResponse;
|
||||
pub use rpc_service::RpcService;
|
||||
|
||||
// false-positive: used in `README.md`'s doc-test.
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
extern crate axum;
|
||||
extern crate cuprate_test_utils;
|
||||
extern crate serde_json;
|
||||
extern crate tokio;
|
||||
extern crate ureq;
|
||||
}
|
||||
|
|
|
@ -5,9 +5,16 @@ use axum::{body::Bytes, extract::State, http::StatusCode};
|
|||
use tower::ServiceExt;
|
||||
|
||||
use cuprate_epee_encoding::from_bytes;
|
||||
use cuprate_rpc_types::bin::{BinRequest, BinResponse, GetTransactionPoolHashesRequest};
|
||||
use cuprate_rpc_types::{
|
||||
bin::{
|
||||
BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksRequest, GetHashesRequest,
|
||||
GetOutputIndexesRequest, GetOutsRequest, GetTransactionPoolHashesRequest,
|
||||
},
|
||||
json::GetOutputDistributionRequest,
|
||||
RpcCall,
|
||||
};
|
||||
|
||||
use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse};
|
||||
use crate::rpc_handler::RpcHandler;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Routes
|
||||
/// This macro generates route functions that expect input.
|
||||
|
@ -66,14 +73,17 @@ macro_rules! generate_endpoints_inner {
|
|||
($variant:ident, $handler:ident, $request:expr) => {
|
||||
paste::paste! {
|
||||
{
|
||||
// Send request.
|
||||
let request = RpcRequest::Binary($request);
|
||||
let channel = $handler.oneshot(request).await?;
|
||||
// Check if restricted.
|
||||
if [<$variant Request>]::IS_RESTRICTED && $handler.restricted() {
|
||||
// TODO: mimic `monerod` behavior.
|
||||
return Err(StatusCode::FORBIDDEN);
|
||||
}
|
||||
|
||||
// Assert the response from the inner handler is correct.
|
||||
let RpcResponse::Binary(response) = channel else {
|
||||
panic!("RPC handler did not return a binary response");
|
||||
// Send request.
|
||||
let Ok(response) = $handler.oneshot($request).await else {
|
||||
return Err(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
};
|
||||
|
||||
let BinResponse::$variant(response) = response else {
|
||||
panic!("RPC handler returned incorrect response");
|
||||
};
|
||||
|
@ -81,7 +91,7 @@ macro_rules! generate_endpoints_inner {
|
|||
// Serialize to bytes and respond.
|
||||
match cuprate_epee_encoding::to_bytes(response) {
|
||||
Ok(bytes) => Ok(bytes.freeze()),
|
||||
Err(e) => Err(StatusCode::INTERNAL_SERVER_ERROR),
|
||||
Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,21 +8,21 @@ use tower::ServiceExt;
|
|||
|
||||
use cuprate_json_rpc::{
|
||||
error::{ErrorCode, ErrorObject},
|
||||
Id,
|
||||
Id, Response,
|
||||
};
|
||||
use cuprate_rpc_types::{
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
RpcCallValue,
|
||||
};
|
||||
|
||||
use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse};
|
||||
use crate::rpc_handler::RpcHandler;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Routes
|
||||
/// The `/json_rpc` route function used in [`crate::RouterBuilder`].
|
||||
pub(crate) async fn json_rpc<H: RpcHandler>(
|
||||
State(handler): State<H>,
|
||||
Json(request): Json<cuprate_json_rpc::Request<JsonRpcRequest>>,
|
||||
) -> Result<Json<cuprate_json_rpc::Response<JsonRpcResponse>>, StatusCode> {
|
||||
) -> Result<Json<Response<JsonRpcResponse>>, StatusCode> {
|
||||
// TODO: <https://www.jsonrpc.org/specification#notification>
|
||||
//
|
||||
// JSON-RPC notifications (requests without `id`)
|
||||
|
@ -30,6 +30,11 @@ pub(crate) async fn json_rpc<H: RpcHandler>(
|
|||
// must remain. How to do this considering this function will
|
||||
// always return and cause `axum` to respond?
|
||||
|
||||
// JSON-RPC 2.0 rule:
|
||||
// If there was an error in detecting the `Request`'s ID,
|
||||
// the `Response` must contain an `Id::Null`
|
||||
let id = request.id.unwrap_or(Id::Null);
|
||||
|
||||
// Return early if this RPC server is restricted and
|
||||
// the requested method is only for non-restricted RPC.
|
||||
if request.body.is_restricted() && handler.restricted() {
|
||||
|
@ -39,26 +44,17 @@ pub(crate) async fn json_rpc<H: RpcHandler>(
|
|||
data: None,
|
||||
};
|
||||
|
||||
// JSON-RPC 2.0 rule:
|
||||
// If there was an error in detecting the `Request`'s ID,
|
||||
// the `Response` must contain an `Id::Null`
|
||||
let id = request.id.unwrap_or(Id::Null);
|
||||
|
||||
let response = cuprate_json_rpc::Response::err(id, error_object);
|
||||
let response = Response::err(id, error_object);
|
||||
|
||||
return Ok(Json(response));
|
||||
}
|
||||
|
||||
// Send request.
|
||||
let request = RpcRequest::JsonRpc(request);
|
||||
let channel = handler.oneshot(request).await?;
|
||||
|
||||
// Assert the response from the inner handler is correct.
|
||||
let RpcResponse::JsonRpc(response) = channel else {
|
||||
panic!("RPC handler returned incorrect response");
|
||||
let Ok(response) = handler.oneshot(request.body).await else {
|
||||
return Err(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
};
|
||||
|
||||
Ok(Json(response))
|
||||
Ok(Json(Response::ok(id, response)))
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
|
|
|
@ -25,7 +25,7 @@ use cuprate_rpc_types::{
|
|||
RpcCall,
|
||||
};
|
||||
|
||||
use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse};
|
||||
use crate::rpc_handler::RpcHandler;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Routes
|
||||
/// This macro generates route functions that expect input.
|
||||
|
@ -81,13 +81,11 @@ macro_rules! generate_endpoints_inner {
|
|||
}
|
||||
|
||||
// Send request.
|
||||
let request = RpcRequest::Other(OtherRequest::$variant($request));
|
||||
let channel = $handler.oneshot(request).await?;
|
||||
|
||||
// Assert the response from the inner handler is correct.
|
||||
let RpcResponse::Other(response) = channel else {
|
||||
panic!("RPC handler did not return a binary response");
|
||||
let request = OtherRequest::$variant($request);
|
||||
let Ok(response) = $handler.oneshot(request).await else {
|
||||
return Err(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
};
|
||||
|
||||
let OtherResponse::$variant(response) = response else {
|
||||
panic!("RPC handler returned incorrect response")
|
||||
};
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
//! Free functions.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use axum::{
|
||||
routing::{method_routing::get, post},
|
||||
Router,
|
||||
};
|
||||
use axum::Router;
|
||||
|
||||
use crate::{
|
||||
route::{bin, fallback, json_rpc, other},
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
//! RPC errors.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use axum::http::StatusCode;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcError
|
||||
/// Possible errors during RPC operation.
|
||||
///
|
||||
/// These are any errors that can happen _during_ a handler function.
|
||||
/// I.e. if this error surfaces, it happened _after_ the request was
|
||||
/// deserialized.
|
||||
///
|
||||
/// This is the `Error` type required to be used in an [`RpcHandler`](crate::RpcHandler).
|
||||
///
|
||||
/// TODO: This is empty as possible errors will be
|
||||
/// enumerated when the handler functions are created.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
|
||||
pub enum RpcError {}
|
||||
|
||||
impl From<RpcError> for StatusCode {
|
||||
fn from(value: RpcError) -> Self {
|
||||
// TODO
|
||||
Self::INTERNAL_SERVER_ERROR
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
// use super::*;
|
||||
}
|
|
@ -1,49 +1,42 @@
|
|||
//! RPC handler trait.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::{future::Future, task::Poll};
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
|
||||
use axum::{http::StatusCode, response::IntoResponse};
|
||||
use futures::{channel::oneshot::channel, FutureExt};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
use cuprate_json_rpc::Id;
|
||||
use cuprate_rpc_types::json::JsonRpcRequest;
|
||||
|
||||
use crate::{rpc_error::RpcError, rpc_request::RpcRequest, rpc_response::RpcResponse};
|
||||
use crate::RpcService;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcHandler
|
||||
/// An RPC handler.
|
||||
///
|
||||
/// This trait represents a type that can turn [`RpcRequest`]s into [`RpcResponse`]s.
|
||||
/// This trait represents a type that can turn `Request`s into `Response`s.
|
||||
///
|
||||
/// Implementors of this trait must be [`tower::Service`]s that use:
|
||||
/// - [`RpcRequest`] as the generic `Request` type
|
||||
/// - [`RpcResponse`] as the associated `Response` type
|
||||
/// - [`RpcError`] as the associated `Error` type
|
||||
/// - A generic [`Future`] that outputs `Result<RpcResponse, RpcError>`
|
||||
/// Implementors of this trait must be:
|
||||
/// - A [`tower::Service`] that uses [`JsonRpcRequest`] & [`JsonRpcResponse`]
|
||||
/// - A [`tower::Service`] that uses [`BinRequest`] & [`BinResponse`]
|
||||
/// - A [`tower::Service`] that uses [`OtherRequest`] & [`OtherResponse`]
|
||||
///
|
||||
/// In other words, an [`RpcHandler`] is a type that implements [`tower::Service`] 3 times,
|
||||
/// one for each request/response enum type found in [`cuprate_rpc_types`].
|
||||
///
|
||||
/// The error type must always be [`anyhow::Error`].
|
||||
///
|
||||
/// See this crate's `RpcHandlerDummy` for an implementation example of this trait.
|
||||
///
|
||||
/// # Panics
|
||||
/// Your [`RpcHandler`] must reply to [`RpcRequest`]s with the correct
|
||||
/// [`RpcResponse`] or else this crate will panic during routing functions.
|
||||
/// Your [`RpcHandler`] must reply to `Request`s with the correct
|
||||
/// `Response` or else this crate will panic during routing functions.
|
||||
///
|
||||
/// For example, upon a [`RpcRequest::Binary`] must be replied with
|
||||
/// [`RpcRequest::Binary`]. If an [`RpcRequest::Other`] were returned instead,
|
||||
/// this crate would panic.
|
||||
/// For example, a [`JsonRpcRequest::GetBlockCount`] must be replied with
|
||||
/// [`JsonRpcResponse::GetBlockCount`]. If anything else is returned,
|
||||
/// this crate may panic.
|
||||
pub trait RpcHandler:
|
||||
Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
+ Service<
|
||||
RpcRequest,
|
||||
Response = RpcResponse,
|
||||
Error = RpcError,
|
||||
Future: Future<Output = Result<RpcResponse, RpcError>> + Send + Sync + 'static,
|
||||
>
|
||||
RpcService<JsonRpcRequest, JsonRpcResponse>
|
||||
+ RpcService<BinRequest, BinResponse>
|
||||
+ RpcService<OtherRequest, OtherResponse>
|
||||
{
|
||||
/// Is this [`RpcHandler`] restricted?
|
||||
///
|
||||
|
|
|
@ -3,20 +3,21 @@
|
|||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::task::Poll;
|
||||
|
||||
use futures::{channel::oneshot::channel, FutureExt};
|
||||
use anyhow::Error;
|
||||
use futures::channel::oneshot::channel;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tower::Service;
|
||||
|
||||
use cuprate_helper::asynch::InfallibleOneshotReceiver;
|
||||
use cuprate_json_rpc::Id;
|
||||
use cuprate_rpc_types::json::JsonRpcRequest;
|
||||
|
||||
use crate::{
|
||||
rpc_error::RpcError, rpc_handler::RpcHandler, rpc_request::RpcRequest,
|
||||
rpc_response::RpcResponse,
|
||||
use cuprate_rpc_types::{
|
||||
bin::{BinRequest, BinResponse},
|
||||
json::{JsonRpcRequest, JsonRpcResponse},
|
||||
other::{OtherRequest, OtherResponse},
|
||||
};
|
||||
|
||||
use crate::rpc_handler::RpcHandler;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcHandlerDummy
|
||||
/// An [`RpcHandler`] that always returns [`Default::default`].
|
||||
///
|
||||
|
@ -43,96 +44,133 @@ impl RpcHandler for RpcHandlerDummy {
|
|||
}
|
||||
}
|
||||
|
||||
impl Service<RpcRequest> for RpcHandlerDummy {
|
||||
type Response = RpcResponse;
|
||||
type Error = RpcError;
|
||||
type Future = InfallibleOneshotReceiver<Result<RpcResponse, RpcError>>;
|
||||
impl Service<JsonRpcRequest> for RpcHandlerDummy {
|
||||
type Response = JsonRpcResponse;
|
||||
type Error = Error;
|
||||
type Future = InfallibleOneshotReceiver<Result<JsonRpcResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: RpcRequest) -> Self::Future {
|
||||
use cuprate_rpc_types::bin::BinRequest as BReq;
|
||||
use cuprate_rpc_types::bin::BinResponse as BResp;
|
||||
use cuprate_rpc_types::json::JsonRpcRequest as JReq;
|
||||
use cuprate_rpc_types::json::JsonRpcResponse as JResp;
|
||||
use cuprate_rpc_types::other::OtherRequest as OReq;
|
||||
use cuprate_rpc_types::other::OtherResponse as OResp;
|
||||
fn call(&mut self, req: JsonRpcRequest) -> Self::Future {
|
||||
use cuprate_rpc_types::json::JsonRpcRequest as Req;
|
||||
use cuprate_rpc_types::json::JsonRpcResponse as Resp;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::default_trait_access)]
|
||||
let resp = match req {
|
||||
RpcRequest::JsonRpc(j) => RpcResponse::JsonRpc(cuprate_json_rpc::Response::ok(Id::Null, match j.body {
|
||||
JReq::GetBlockCount(_) => JResp::GetBlockCount(Default::default()),
|
||||
JReq::OnGetBlockHash(_) => JResp::OnGetBlockHash(Default::default()),
|
||||
JReq::SubmitBlock(_) => JResp::SubmitBlock(Default::default()),
|
||||
JReq::GenerateBlocks(_) => JResp::GenerateBlocks(Default::default()),
|
||||
JReq::GetLastBlockHeader(_) => JResp::GetLastBlockHeader(Default::default()),
|
||||
JReq::GetBlockHeaderByHash(_) => JResp::GetBlockHeaderByHash(Default::default()),
|
||||
JReq::GetBlockHeaderByHeight(_) => JResp::GetBlockHeaderByHeight(Default::default()),
|
||||
JReq::GetBlockHeadersRange(_) => JResp::GetBlockHeadersRange(Default::default()),
|
||||
JReq::GetBlock(_) => JResp::GetBlock(Default::default()),
|
||||
JReq::GetConnections(_) => JResp::GetConnections(Default::default()),
|
||||
JReq::GetInfo(_) => JResp::GetInfo(Default::default()),
|
||||
JReq::HardForkInfo(_) => JResp::HardForkInfo(Default::default()),
|
||||
JReq::SetBans(_) => JResp::SetBans(Default::default()),
|
||||
JReq::GetBans(_) => JResp::GetBans(Default::default()),
|
||||
JReq::Banned(_) => JResp::Banned(Default::default()),
|
||||
JReq::FlushTransactionPool(_) => JResp::FlushTransactionPool(Default::default()),
|
||||
JReq::GetOutputHistogram(_) => JResp::GetOutputHistogram(Default::default()),
|
||||
JReq::GetCoinbaseTxSum(_) => JResp::GetCoinbaseTxSum(Default::default()),
|
||||
JReq::GetVersion(_) => JResp::GetVersion(Default::default()),
|
||||
JReq::GetFeeEstimate(_) => JResp::GetFeeEstimate(Default::default()),
|
||||
JReq::GetAlternateChains(_) => JResp::GetAlternateChains(Default::default()),
|
||||
JReq::RelayTx(_) => JResp::RelayTx(Default::default()),
|
||||
JReq::SyncInfo(_) => JResp::SyncInfo(Default::default()),
|
||||
JReq::GetTransactionPoolBacklog(_) => JResp::GetTransactionPoolBacklog(Default::default()),
|
||||
JReq::GetMinerData(_) => JResp::GetMinerData(Default::default()),
|
||||
JReq::PruneBlockchain(_) => JResp::PruneBlockchain(Default::default()),
|
||||
JReq::CalcPow(_) => JResp::CalcPow(Default::default()),
|
||||
JReq::FlushCache(_) => JResp::FlushCache(Default::default()),
|
||||
JReq::AddAuxPow(_) => JResp::AddAuxPow(Default::default()),
|
||||
JReq::GetTxIdsLoose(_) => JResp::GetTxIdsLoose(Default::default()),
|
||||
})),
|
||||
RpcRequest::Binary(b) => RpcResponse::Binary(match b {
|
||||
BReq::GetBlocks(_) => BResp::GetBlocks(Default::default()),
|
||||
BReq::GetBlocksByHeight(_) => BResp::GetBlocksByHeight(Default::default()),
|
||||
BReq::GetHashes(_) => BResp::GetHashes(Default::default()),
|
||||
BReq::GetOutputIndexes(_) => BResp::GetOutputIndexes(Default::default()),
|
||||
BReq::GetOuts(_) => BResp::GetOuts(Default::default()),
|
||||
BReq::GetTransactionPoolHashes(_) => BResp::GetTransactionPoolHashes(Default::default()),
|
||||
BReq::GetOutputDistribution(_) => BResp::GetOutputDistribution(Default::default()),
|
||||
}),
|
||||
RpcRequest::Other(o) => RpcResponse::Other(match o {
|
||||
OReq::GetHeight(_) => OResp::GetHeight(Default::default()),
|
||||
OReq::GetTransactions(_) => OResp::GetTransactions(Default::default()),
|
||||
OReq::GetAltBlocksHashes(_) => OResp::GetAltBlocksHashes(Default::default()),
|
||||
OReq::IsKeyImageSpent(_) => OResp::IsKeyImageSpent(Default::default()),
|
||||
OReq::SendRawTransaction(_) => OResp::SendRawTransaction(Default::default()),
|
||||
OReq::StartMining(_) => OResp::StartMining(Default::default()),
|
||||
OReq::StopMining(_) => OResp::StopMining(Default::default()),
|
||||
OReq::MiningStatus(_) => OResp::MiningStatus(Default::default()),
|
||||
OReq::SaveBc(_) => OResp::SaveBc(Default::default()),
|
||||
OReq::GetPeerList(_) => OResp::GetPeerList(Default::default()),
|
||||
OReq::SetLogHashRate(_) => OResp::SetLogHashRate(Default::default()),
|
||||
OReq::SetLogLevel(_) => OResp::SetLogLevel(Default::default()),
|
||||
OReq::SetLogCategories(_) => OResp::SetLogCategories(Default::default()),
|
||||
OReq::SetBootstrapDaemon(_) => OResp::SetBootstrapDaemon(Default::default()),
|
||||
OReq::GetTransactionPool(_) => OResp::GetTransactionPool(Default::default()),
|
||||
OReq::GetTransactionPoolStats(_) => OResp::GetTransactionPoolStats(Default::default()),
|
||||
OReq::StopDaemon(_) => OResp::StopDaemon(Default::default()),
|
||||
OReq::GetLimit(_) => OResp::GetLimit(Default::default()),
|
||||
OReq::SetLimit(_) => OResp::SetLimit(Default::default()),
|
||||
OReq::OutPeers(_) => OResp::OutPeers(Default::default()),
|
||||
OReq::InPeers(_) => OResp::InPeers(Default::default()),
|
||||
OReq::GetNetStats(_) => OResp::GetNetStats(Default::default()),
|
||||
OReq::GetOuts(_) => OResp::GetOuts(Default::default()),
|
||||
OReq::Update(_) => OResp::Update(Default::default()),
|
||||
OReq::PopBlocks(_) => OResp::PopBlocks(Default::default()),
|
||||
OReq::GetTransactionPoolHashes(_) => OResp::GetTransactionPoolHashes(Default::default()),
|
||||
OReq::GetPublicNodes(_) => OResp::GetPublicNodes(Default::default()),
|
||||
})
|
||||
Req::GetBlockCount(_) => Resp::GetBlockCount(Default::default()),
|
||||
Req::OnGetBlockHash(_) => Resp::OnGetBlockHash(Default::default()),
|
||||
Req::SubmitBlock(_) => Resp::SubmitBlock(Default::default()),
|
||||
Req::GenerateBlocks(_) => Resp::GenerateBlocks(Default::default()),
|
||||
Req::GetLastBlockHeader(_) => Resp::GetLastBlockHeader(Default::default()),
|
||||
Req::GetBlockHeaderByHash(_) => Resp::GetBlockHeaderByHash(Default::default()),
|
||||
Req::GetBlockHeaderByHeight(_) => Resp::GetBlockHeaderByHeight(Default::default()),
|
||||
Req::GetBlockHeadersRange(_) => Resp::GetBlockHeadersRange(Default::default()),
|
||||
Req::GetBlock(_) => Resp::GetBlock(Default::default()),
|
||||
Req::GetConnections(_) => Resp::GetConnections(Default::default()),
|
||||
Req::GetInfo(_) => Resp::GetInfo(Default::default()),
|
||||
Req::HardForkInfo(_) => Resp::HardForkInfo(Default::default()),
|
||||
Req::SetBans(_) => Resp::SetBans(Default::default()),
|
||||
Req::GetBans(_) => Resp::GetBans(Default::default()),
|
||||
Req::Banned(_) => Resp::Banned(Default::default()),
|
||||
Req::FlushTransactionPool(_) => Resp::FlushTransactionPool(Default::default()),
|
||||
Req::GetOutputHistogram(_) => Resp::GetOutputHistogram(Default::default()),
|
||||
Req::GetCoinbaseTxSum(_) => Resp::GetCoinbaseTxSum(Default::default()),
|
||||
Req::GetVersion(_) => Resp::GetVersion(Default::default()),
|
||||
Req::GetFeeEstimate(_) => Resp::GetFeeEstimate(Default::default()),
|
||||
Req::GetAlternateChains(_) => Resp::GetAlternateChains(Default::default()),
|
||||
Req::RelayTx(_) => Resp::RelayTx(Default::default()),
|
||||
Req::SyncInfo(_) => Resp::SyncInfo(Default::default()),
|
||||
Req::GetTransactionPoolBacklog(_) => {
|
||||
Resp::GetTransactionPoolBacklog(Default::default())
|
||||
}
|
||||
Req::GetMinerData(_) => Resp::GetMinerData(Default::default()),
|
||||
Req::PruneBlockchain(_) => Resp::PruneBlockchain(Default::default()),
|
||||
Req::CalcPow(_) => Resp::CalcPow(Default::default()),
|
||||
Req::FlushCache(_) => Resp::FlushCache(Default::default()),
|
||||
Req::AddAuxPow(_) => Resp::AddAuxPow(Default::default()),
|
||||
Req::GetTxIdsLoose(_) => Resp::GetTxIdsLoose(Default::default()),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel();
|
||||
drop(tx.send(Ok(resp)));
|
||||
InfallibleOneshotReceiver::from(rx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<BinRequest> for RpcHandlerDummy {
|
||||
type Response = BinResponse;
|
||||
type Error = Error;
|
||||
type Future = InfallibleOneshotReceiver<Result<BinResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: BinRequest) -> Self::Future {
|
||||
use cuprate_rpc_types::bin::BinRequest as Req;
|
||||
use cuprate_rpc_types::bin::BinResponse as Resp;
|
||||
|
||||
#[allow(clippy::default_trait_access)]
|
||||
let resp = match req {
|
||||
Req::GetBlocks(_) => Resp::GetBlocks(Default::default()),
|
||||
Req::GetBlocksByHeight(_) => Resp::GetBlocksByHeight(Default::default()),
|
||||
Req::GetHashes(_) => Resp::GetHashes(Default::default()),
|
||||
Req::GetOutputIndexes(_) => Resp::GetOutputIndexes(Default::default()),
|
||||
Req::GetOuts(_) => Resp::GetOuts(Default::default()),
|
||||
Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()),
|
||||
Req::GetOutputDistribution(_) => Resp::GetOutputDistribution(Default::default()),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel();
|
||||
drop(tx.send(Ok(resp)));
|
||||
InfallibleOneshotReceiver::from(rx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<OtherRequest> for RpcHandlerDummy {
|
||||
type Response = OtherResponse;
|
||||
type Error = Error;
|
||||
type Future = InfallibleOneshotReceiver<Result<OtherResponse, Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: OtherRequest) -> Self::Future {
|
||||
use cuprate_rpc_types::other::OtherRequest as Req;
|
||||
use cuprate_rpc_types::other::OtherResponse as Resp;
|
||||
|
||||
#[allow(clippy::default_trait_access)]
|
||||
let resp = match req {
|
||||
Req::GetHeight(_) => Resp::GetHeight(Default::default()),
|
||||
Req::GetTransactions(_) => Resp::GetTransactions(Default::default()),
|
||||
Req::GetAltBlocksHashes(_) => Resp::GetAltBlocksHashes(Default::default()),
|
||||
Req::IsKeyImageSpent(_) => Resp::IsKeyImageSpent(Default::default()),
|
||||
Req::SendRawTransaction(_) => Resp::SendRawTransaction(Default::default()),
|
||||
Req::StartMining(_) => Resp::StartMining(Default::default()),
|
||||
Req::StopMining(_) => Resp::StopMining(Default::default()),
|
||||
Req::MiningStatus(_) => Resp::MiningStatus(Default::default()),
|
||||
Req::SaveBc(_) => Resp::SaveBc(Default::default()),
|
||||
Req::GetPeerList(_) => Resp::GetPeerList(Default::default()),
|
||||
Req::SetLogHashRate(_) => Resp::SetLogHashRate(Default::default()),
|
||||
Req::SetLogLevel(_) => Resp::SetLogLevel(Default::default()),
|
||||
Req::SetLogCategories(_) => Resp::SetLogCategories(Default::default()),
|
||||
Req::SetBootstrapDaemon(_) => Resp::SetBootstrapDaemon(Default::default()),
|
||||
Req::GetTransactionPool(_) => Resp::GetTransactionPool(Default::default()),
|
||||
Req::GetTransactionPoolStats(_) => Resp::GetTransactionPoolStats(Default::default()),
|
||||
Req::StopDaemon(_) => Resp::StopDaemon(Default::default()),
|
||||
Req::GetLimit(_) => Resp::GetLimit(Default::default()),
|
||||
Req::SetLimit(_) => Resp::SetLimit(Default::default()),
|
||||
Req::OutPeers(_) => Resp::OutPeers(Default::default()),
|
||||
Req::InPeers(_) => Resp::InPeers(Default::default()),
|
||||
Req::GetNetStats(_) => Resp::GetNetStats(Default::default()),
|
||||
Req::GetOuts(_) => Resp::GetOuts(Default::default()),
|
||||
Req::Update(_) => Resp::Update(Default::default()),
|
||||
Req::PopBlocks(_) => Resp::PopBlocks(Default::default()),
|
||||
Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()),
|
||||
Req::GetPublicNodes(_) => Resp::GetPublicNodes(Default::default()),
|
||||
};
|
||||
|
||||
let (tx, rx) = channel();
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
//! RPC requests.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_rpc_types::{bin::BinRequest, json::JsonRpcRequest, other::OtherRequest};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcRequest
|
||||
/// All possible RPC requests.
|
||||
///
|
||||
/// This enum encapsulates all possible RPC requests:
|
||||
/// - JSON RPC 2.0 requests
|
||||
/// - Binary requests
|
||||
/// - Other JSON requests
|
||||
///
|
||||
/// It is the `Request` type required to be used in an [`RpcHandler`](crate::RpcHandler).
|
||||
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
|
||||
pub enum RpcRequest {
|
||||
/// JSON-RPC 2.0 requests.
|
||||
JsonRpc(cuprate_json_rpc::Request<JsonRpcRequest>),
|
||||
/// Binary requests.
|
||||
Binary(BinRequest),
|
||||
/// Other JSON requests.
|
||||
Other(OtherRequest),
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
// use super::*;
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
//! RPC responses.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use cuprate_rpc_types::{bin::BinResponse, json::JsonRpcResponse, other::OtherResponse};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcResponse
|
||||
/// All possible RPC responses.
|
||||
///
|
||||
/// This enum encapsulates all possible RPC responses:
|
||||
/// - JSON RPC 2.0 responses
|
||||
/// - Binary responses
|
||||
/// - Other JSON responses
|
||||
///
|
||||
/// It is the `Response` type required to be used in an [`RpcHandler`](crate::RpcHandler).
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
|
||||
pub enum RpcResponse {
|
||||
/// JSON RPC 2.0 responses.
|
||||
JsonRpc(cuprate_json_rpc::Response<JsonRpcResponse>),
|
||||
/// Binary responses.
|
||||
Binary(BinResponse),
|
||||
/// Other JSON responses.
|
||||
Other(OtherResponse),
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
// use super::*;
|
||||
}
|
50
rpc/interface/src/rpc_service.rs
Normal file
50
rpc/interface/src/rpc_service.rs
Normal file
|
@ -0,0 +1,50 @@
|
|||
//! RPC [`tower::Service`] trait.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::future::Future;
|
||||
|
||||
use tower::Service;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- RpcService
|
||||
/// An RPC [`tower::Service`].
|
||||
///
|
||||
/// This trait solely exists to encapsulate the traits needed
|
||||
/// to handle RPC requests and respond with responses - **it is
|
||||
/// not meant to be used directly.**
|
||||
///
|
||||
/// The `Request` and `Response` are generic and
|
||||
/// are used in the [`tower::Service`] bounds.
|
||||
///
|
||||
/// The error type is always [`anyhow::Error`].
|
||||
///
|
||||
/// There is a blanket implementation that implements this
|
||||
/// trait on types that implement `tower::Service` correctly.
|
||||
///
|
||||
/// See [`RpcHandler`](crate::RpcHandler) for more information.
|
||||
pub trait RpcService<Request, Response>:
|
||||
Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
+ Service<
|
||||
Request,
|
||||
Response = Response,
|
||||
Error = anyhow::Error,
|
||||
Future: Future<Output = Result<Response, anyhow::Error>> + Send + 'static,
|
||||
>
|
||||
{
|
||||
}
|
||||
|
||||
impl<Request, Response, T> RpcService<Request, Response> for T where
|
||||
Self: Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
+ Service<
|
||||
Request,
|
||||
Response = Response,
|
||||
Error = anyhow::Error,
|
||||
Future: Future<Output = Result<Response, anyhow::Error>> + Send + 'static,
|
||||
>
|
||||
{
|
||||
}
|
|
@ -18,3 +18,6 @@ thiserror = { workspace = true }
|
|||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
|
@ -1,96 +1,5 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
//---------------------------------------------------------------------------------------------------- Lints
|
||||
// Forbid lints.
|
||||
// Our code, and code generated (e.g macros) cannot overrule these.
|
||||
#![forbid(
|
||||
// `unsafe` is allowed but it _must_ be
|
||||
// commented with `SAFETY: reason`.
|
||||
clippy::undocumented_unsafe_blocks,
|
||||
|
||||
// Never.
|
||||
unused_unsafe,
|
||||
redundant_semicolons,
|
||||
unused_allocation,
|
||||
coherence_leak_check,
|
||||
while_true,
|
||||
|
||||
// Maybe can be put into `#[deny]`.
|
||||
unconditional_recursion,
|
||||
for_loops_over_fallibles,
|
||||
unused_braces,
|
||||
unused_labels,
|
||||
keyword_idents,
|
||||
non_ascii_idents,
|
||||
variant_size_differences,
|
||||
single_use_lifetimes,
|
||||
|
||||
// Probably can be put into `#[deny]`.
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
break_with_label_and_loop,
|
||||
duplicate_macro_attributes,
|
||||
exported_private_dependencies,
|
||||
large_assignments,
|
||||
overlapping_range_endpoints,
|
||||
semicolon_in_expressions_from_macros,
|
||||
noop_method_call,
|
||||
unreachable_pub,
|
||||
)]
|
||||
// Deny lints.
|
||||
// Some of these are `#[allow]`'ed on a per-case basis.
|
||||
#![deny(
|
||||
clippy::all,
|
||||
clippy::correctness,
|
||||
clippy::suspicious,
|
||||
clippy::style,
|
||||
clippy::complexity,
|
||||
clippy::perf,
|
||||
clippy::pedantic,
|
||||
clippy::nursery,
|
||||
clippy::cargo,
|
||||
clippy::missing_docs_in_private_items,
|
||||
unused_mut,
|
||||
missing_docs,
|
||||
deprecated,
|
||||
unused_comparisons,
|
||||
nonstandard_style
|
||||
)]
|
||||
#![allow(
|
||||
// FIXME: this lint affects crates outside of
|
||||
// `database/` for some reason, allow for now.
|
||||
clippy::cargo_common_metadata,
|
||||
|
||||
// FIXME: adding `#[must_use]` onto everything
|
||||
// might just be more annoying than useful...
|
||||
// although it is sometimes nice.
|
||||
clippy::must_use_candidate,
|
||||
|
||||
// FIXME: good lint but too many false positives
|
||||
// with our `Env` + `RwLock` setup.
|
||||
clippy::significant_drop_tightening,
|
||||
|
||||
// FIXME: good lint but is less clear in most cases.
|
||||
clippy::items_after_statements,
|
||||
|
||||
clippy::module_name_repetitions,
|
||||
clippy::module_inception,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::option_if_let_else,
|
||||
)]
|
||||
// Allow some lints when running in debug mode.
|
||||
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
|
||||
// Allow some lints in tests.
|
||||
#![cfg_attr(
|
||||
test,
|
||||
allow(
|
||||
clippy::cognitive_complexity,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::too_many_lines
|
||||
)
|
||||
)]
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Mod/Use
|
||||
pub mod error;
|
||||
|
||||
mod id;
|
||||
|
@ -105,6 +14,5 @@ pub use request::Request;
|
|||
mod response;
|
||||
pub use response::Response;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TESTS
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
|
|
@ -304,14 +304,14 @@ where
|
|||
if payload.is_none() {
|
||||
payload = Some(Ok(map.next_value::<T>()?));
|
||||
} else {
|
||||
return Err(serde::de::Error::duplicate_field("result/error"));
|
||||
return Err(Error::duplicate_field("result/error"));
|
||||
}
|
||||
}
|
||||
Key::Error => {
|
||||
if payload.is_none() {
|
||||
payload = Some(Err(map.next_value::<ErrorObject>()?));
|
||||
} else {
|
||||
return Err(serde::de::Error::duplicate_field("result/error"));
|
||||
return Err(Error::duplicate_field("result/error"));
|
||||
}
|
||||
}
|
||||
Key::Unknown => {
|
||||
|
|
|
@ -52,6 +52,7 @@ where
|
|||
}
|
||||
|
||||
/// Tests an input JSON string matches an expected type `T`.
|
||||
#[allow(clippy::needless_pass_by_value)] // serde signature
|
||||
fn assert_de<T>(json: &'static str, expected: T)
|
||||
where
|
||||
T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq,
|
||||
|
|
|
@ -18,13 +18,14 @@ cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true }
|
|||
cuprate-fixed-bytes = { path = "../../net/fixed-bytes" }
|
||||
cuprate-types = { path = "../../types" }
|
||||
|
||||
monero-serai = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
cuprate-test-utils = { path = "../../test-utils" }
|
||||
cuprate-json-rpc = { path = "../json-rpc" }
|
||||
|
||||
serde_json = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
|
@ -13,22 +13,17 @@ use cuprate_epee_encoding::{
|
|||
container_as_blob::ContainerAsBlob,
|
||||
epee_object, error,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue,
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder,
|
||||
};
|
||||
|
||||
use cuprate_types::BlockCompleteEntry;
|
||||
|
||||
use crate::{
|
||||
base::{AccessResponseBase, ResponseBase},
|
||||
defaults::{default_false, default_height, default_string, default_vec, default_zero},
|
||||
free::{is_one, is_zero},
|
||||
base::AccessResponseBase,
|
||||
defaults::{default_false, default_zero},
|
||||
macros::{define_request, define_request_and_response, define_request_and_response_doc},
|
||||
misc::{
|
||||
AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, GetOutputsOut,
|
||||
HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, Peer, PoolInfoExtent,
|
||||
PoolTxInfo, SetBan, Span, Status, TxBacklogEntry,
|
||||
},
|
||||
rpc_call::{RpcCall, RpcCallValue},
|
||||
misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status},
|
||||
rpc_call::RpcCallValue,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Definitions
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
//! `height`, it will use [`default_height`] to fill that in.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::borrow::Cow;
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TODO
|
||||
/// Default [`bool`] type used in request/response types, `false`.
|
||||
|
@ -23,12 +22,6 @@ pub(crate) const fn default_true() -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
/// Default `Cow<'static, str` type used in request/response types.
|
||||
#[inline]
|
||||
pub(crate) const fn default_cow_str() -> Cow<'static, str> {
|
||||
Cow::Borrowed("")
|
||||
}
|
||||
|
||||
/// Default [`String`] type used in request/response types.
|
||||
#[inline]
|
||||
pub(crate) const fn default_string() -> String {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
/// Returns `true` if the input `u` is equal to `0`.
|
||||
#[inline]
|
||||
#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&`
|
||||
#[allow(dead_code)] // TODO: see if needed after handlers.
|
||||
pub(crate) const fn is_zero(u: &u64) -> bool {
|
||||
*u == 0
|
||||
}
|
||||
|
@ -13,6 +14,7 @@ pub(crate) const fn is_zero(u: &u64) -> bool {
|
|||
/// Returns `true` the input `u` is equal to `1`.
|
||||
#[inline]
|
||||
#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&`
|
||||
#[allow(dead_code)] // TODO: see if needed after handlers.
|
||||
pub(crate) const fn is_one(u: &u64) -> bool {
|
||||
*u == 1
|
||||
}
|
||||
|
|
|
@ -12,12 +12,11 @@ use crate::{
|
|||
default_false, default_height, default_one, default_string, default_true, default_vec,
|
||||
default_zero,
|
||||
},
|
||||
free::{is_one, is_zero},
|
||||
macros::define_request_and_response,
|
||||
misc::{
|
||||
AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan,
|
||||
GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan,
|
||||
Span, Status, SyncInfoPeer, TxBacklogEntry,
|
||||
GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, SetBan, Span, Status,
|
||||
SyncInfoPeer, TxBacklogEntry,
|
||||
},
|
||||
rpc_call::RpcCallValue,
|
||||
};
|
||||
|
|
|
@ -1,113 +1,6 @@
|
|||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
//---------------------------------------------------------------------------------------------------- Lints
|
||||
// Forbid lints.
|
||||
// Our code, and code generated (e.g macros) cannot overrule these.
|
||||
#![forbid(
|
||||
// `unsafe` is allowed but it _must_ be
|
||||
// commented with `SAFETY: reason`.
|
||||
clippy::undocumented_unsafe_blocks,
|
||||
|
||||
// Never.
|
||||
unused_unsafe,
|
||||
redundant_semicolons,
|
||||
unused_allocation,
|
||||
coherence_leak_check,
|
||||
while_true,
|
||||
|
||||
// Maybe can be put into `#[deny]`.
|
||||
unconditional_recursion,
|
||||
for_loops_over_fallibles,
|
||||
unused_braces,
|
||||
unused_labels,
|
||||
keyword_idents,
|
||||
non_ascii_idents,
|
||||
variant_size_differences,
|
||||
single_use_lifetimes,
|
||||
|
||||
// Probably can be put into `#[deny]`.
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
break_with_label_and_loop,
|
||||
duplicate_macro_attributes,
|
||||
exported_private_dependencies,
|
||||
large_assignments,
|
||||
overlapping_range_endpoints,
|
||||
semicolon_in_expressions_from_macros,
|
||||
noop_method_call,
|
||||
)]
|
||||
// Deny lints.
|
||||
// Some of these are `#[allow]`'ed on a per-case basis.
|
||||
#![deny(
|
||||
clippy::all,
|
||||
clippy::correctness,
|
||||
clippy::suspicious,
|
||||
clippy::style,
|
||||
clippy::complexity,
|
||||
clippy::perf,
|
||||
clippy::pedantic,
|
||||
clippy::nursery,
|
||||
clippy::cargo,
|
||||
unused_doc_comments,
|
||||
unused_mut,
|
||||
missing_docs,
|
||||
deprecated,
|
||||
unused_comparisons,
|
||||
nonstandard_style,
|
||||
unreachable_pub
|
||||
)]
|
||||
#![allow(
|
||||
// FIXME: this lint affects crates outside of
|
||||
// `database/` for some reason, allow for now.
|
||||
clippy::cargo_common_metadata,
|
||||
|
||||
// FIXME: adding `#[must_use]` onto everything
|
||||
// might just be more annoying than useful...
|
||||
// although it is sometimes nice.
|
||||
clippy::must_use_candidate,
|
||||
|
||||
// FIXME: good lint but too many false positives
|
||||
// with our `Env` + `RwLock` setup.
|
||||
clippy::significant_drop_tightening,
|
||||
|
||||
// FIXME: good lint but is less clear in most cases.
|
||||
clippy::items_after_statements,
|
||||
|
||||
// TODO
|
||||
rustdoc::bare_urls,
|
||||
|
||||
clippy::module_name_repetitions,
|
||||
clippy::module_inception,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::option_if_let_else,
|
||||
)]
|
||||
// Allow some lints when running in debug mode.
|
||||
#![cfg_attr(
|
||||
debug_assertions,
|
||||
allow(
|
||||
clippy::todo,
|
||||
clippy::multiple_crate_versions,
|
||||
unused_imports,
|
||||
unused_variables
|
||||
)
|
||||
)]
|
||||
// Allow some lints in tests.
|
||||
#![cfg_attr(
|
||||
test,
|
||||
allow(
|
||||
clippy::cognitive_complexity,
|
||||
clippy::needless_pass_by_value,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::too_many_lines
|
||||
)
|
||||
)]
|
||||
// TODO: remove me after finishing impl
|
||||
#![allow(
|
||||
dead_code,
|
||||
rustdoc::broken_intra_doc_links // TODO: remove after `{bin,json,other}.rs` gets merged
|
||||
)]
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Mod
|
||||
mod constants;
|
||||
mod defaults;
|
||||
mod free;
|
||||
|
@ -129,3 +22,10 @@ pub use constants::{
|
|||
CORE_RPC_VERSION_MINOR,
|
||||
};
|
||||
pub use rpc_call::{RpcCall, RpcCallValue};
|
||||
|
||||
// false-positive: used in tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
extern crate cuprate_test_utils;
|
||||
extern crate serde_json;
|
||||
}
|
||||
|
|
|
@ -1,17 +1,14 @@
|
|||
//! Output distributions for [`crate::json::GetOutputDistributionResponse`].
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Use
|
||||
use std::mem::size_of;
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{ser::SerializeStruct, Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::{
|
||||
epee_object, error,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
read_epee_value, read_varint, write_field, write_varint, EpeeObject, EpeeObjectBuilder,
|
||||
EpeeValue, Marker,
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Free
|
||||
|
@ -24,7 +21,7 @@ use cuprate_epee_encoding::{
|
|||
45..=55
|
||||
)]
|
||||
#[cfg(feature = "epee")]
|
||||
fn compress_integer_array(array: &[u64]) -> error::Result<Vec<u8>> {
|
||||
fn compress_integer_array(_: &[u64]) -> error::Result<Vec<u8>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -36,7 +33,7 @@ fn compress_integer_array(array: &[u64]) -> error::Result<Vec<u8>> {
|
|||
"rpc/core_rpc_server_commands_defs.h",
|
||||
57..=72
|
||||
)]
|
||||
fn decompress_integer_array(array: &[u8]) -> Vec<u64> {
|
||||
fn decompress_integer_array(_: &[u8]) -> Vec<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -281,9 +278,9 @@ impl EpeeObject for Distribution {
|
|||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
// use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
// use super::*;
|
||||
|
||||
// TODO: re-enable tests after (de)compression functions are implemented.
|
||||
|
||||
|
|
|
@ -5,23 +5,13 @@
|
|||
//! the [`crate::misc::ConnectionInfo`] struct defined here.
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- Import
|
||||
use std::fmt::Display;
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::{
|
||||
epee_object,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
EpeeValue, Marker,
|
||||
};
|
||||
use cuprate_epee_encoding::epee_object;
|
||||
|
||||
use crate::{
|
||||
constants::{
|
||||
CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK,
|
||||
CORE_RPC_STATUS_PAYMENT_REQUIRED,
|
||||
},
|
||||
defaults::{default_string, default_zero},
|
||||
macros::monero_definition_link,
|
||||
};
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
mod binary_string;
|
||||
mod distribution;
|
||||
mod key_image_spent_status;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod misc;
|
||||
mod pool_info_extent;
|
||||
mod status;
|
||||
|
|
|
@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
#[cfg(feature = "epee")]
|
||||
use cuprate_epee_encoding::{
|
||||
epee_object, error,
|
||||
error,
|
||||
macros::bytes::{Buf, BufMut},
|
||||
read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, Marker,
|
||||
EpeeObject, EpeeObjectBuilder,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------------------------------- TxEntry
|
||||
|
@ -123,7 +123,7 @@ impl Default for TxEntry {
|
|||
//---------------------------------------------------------------------------------------------------- Epee
|
||||
#[cfg(feature = "epee")]
|
||||
impl EpeeObjectBuilder<TxEntry> for () {
|
||||
fn add_field<B: Buf>(&mut self, name: &str, r: &mut B) -> error::Result<bool> {
|
||||
fn add_field<B: Buf>(&mut self, _: &str, _: &mut B) -> error::Result<bool> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ impl EpeeObject for TxEntry {
|
|||
unreachable!()
|
||||
}
|
||||
|
||||
fn write_fields<B: BufMut>(self, w: &mut B) -> error::Result<()> {
|
||||
fn write_fields<B: BufMut>(self, _: &mut B) -> error::Result<()> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,10 +11,9 @@ use crate::{
|
|||
defaults::{default_false, default_string, default_true, default_vec, default_zero},
|
||||
macros::define_request_and_response,
|
||||
misc::{
|
||||
GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status,
|
||||
TxEntry, TxInfo, TxpoolStats,
|
||||
GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo,
|
||||
TxpoolStats,
|
||||
},
|
||||
rpc_call::RpcCall,
|
||||
RpcCallValue,
|
||||
};
|
||||
|
||||
|
@ -191,7 +190,7 @@ define_request_and_response! {
|
|||
}
|
||||
)]
|
||||
AccessResponseBase {
|
||||
/// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form.
|
||||
/// FIXME: These are [`KeyImageSpentStatus`](crate::misc::KeyImageSpentStatus) in [`u8`] form.
|
||||
spent_status: Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,5 +28,5 @@ where
|
|||
//---------------------------------------------------------------------------------------------------- Tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
// use super::*;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue