diff --git a/Cargo.lock b/Cargo.lock index 22e7fdd0..0bb4612a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "ahash" version = "0.8.11" @@ -44,21 +50,6 @@ dependencies = [ "libc", ] -[[package]] -name = "anstream" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - [[package]] name = "anstyle" version = "1.0.7" @@ -66,32 +57,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] -name = "anstyle-parse" -version = "0.2.4" +name = "anyhow" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" -dependencies = [ - "anstyle", - "windows-sys 0.52.0", -] +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "async-stream" @@ -203,7 +172,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.3", "object", "rustc-demangle", ] @@ -395,10 +364,8 @@ version = "4.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" dependencies = [ - "anstream", "anstyle", "clap_lex", - "strsim", ] [[package]] @@ -419,12 +386,6 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" -[[package]] -name = "colorchoice" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" - [[package]] name = "core-foundation" version = "0.9.4" @@ -697,6 +658,7 @@ version = "0.5.0" dependencies = [ "bytes", "cuprate-fixed-bytes", + "cuprate-helper", "hex", "paste", "ref-cast", @@ -764,6 +726,7 @@ version = "0.1.0" dependencies = [ "bitflags 2.5.0", "bytes", + "cuprate-helper", "futures", "proptest", "rand", @@ -842,11 +805,13 @@ dependencies = [ name = "cuprate-rpc-interface" version = "0.0.0" dependencies = [ + "anyhow", "axum", "cuprate-epee-encoding", "cuprate-helper", "cuprate-json-rpc", "cuprate-rpc-types", + "cuprate-test-utils", "futures", "paste", "serde", @@ -862,12 +827,9 @@ version = "0.0.0" dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", - "cuprate-json-rpc", "cuprate-test-utils", "cuprate-types", - "monero-serai", "paste", - "pretty_assertions", "serde", "serde_json", ] @@ -901,12 +863,29 @@ dependencies = [ [[package]] name = "cuprate-txpool" version = "0.0.0" +dependencies = [ + "bitflags 2.5.0", + "bytemuck", + "cuprate-database", + "cuprate-database-service", + "cuprate-helper", + "cuprate-test-utils", + "cuprate-types", + "hex", + "hex-literal", + "monero-serai", + "rayon", + "serde", + "tempfile", + "thiserror", + "tokio", + "tower", +] [[package]] name = "cuprate-types" version = "0.0.0" dependencies = [ - "borsh", "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", @@ -926,6 +905,7 @@ dependencies = [ "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", + "cuprate-helper", "cuprate-levin", "cuprate-types", "hex", @@ -936,18 +916,63 @@ dependencies = [ name = "cuprated" version = "0.1.0" dependencies = [ + "anyhow", + "async-trait", + "bitflags 2.5.0", + "borsh", + "bytemuck", + "bytes", + "cfg-if", + "chrono", "clap", + "crossbeam", + "crypto-bigint", + "cuprate-address-book", + "cuprate-async-buffer", "cuprate-blockchain", "cuprate-consensus", + "cuprate-consensus-rules", "cuprate-cryptonight", + "cuprate-dandelion-tower", + "cuprate-database", + "cuprate-database-service", + "cuprate-epee-encoding", + "cuprate-fast-sync", + "cuprate-fixed-bytes", + "cuprate-helper", + "cuprate-json-rpc", + "cuprate-levin", "cuprate-p2p", "cuprate-p2p-core", + "cuprate-pruning", + "cuprate-rpc-interface", + "cuprate-rpc-types", + "cuprate-test-utils", + "cuprate-txpool", "cuprate-types", + "cuprate-wire", + "curve25519-dalek", + "dashmap", + "dirs", "futures", "hex", + "hex-literal", + "indexmap", + "monero-serai", + "paste", + "pin-project", + "rand", + "rand_distr", + "randomx-rs", "rayon", + "serde", + "serde_bytes", + "serde_json", "thiserror", + "thread_local", "tokio", + "tokio-stream", + "tokio-util", "tower", "tracing", "tracing-subscriber", @@ -985,7 +1010,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -1129,18 +1154,18 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "blake2", "digest", @@ -1288,9 +1313,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -1644,12 +1669,6 @@ dependencies = [ "hashbrown", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" - [[package]] name = "itoa" version = "1.0.11" @@ -1790,6 +1809,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "0.8.11" @@ -1804,7 +1832,7 @@ dependencies = [ [[package]] name = "monero-address" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "monero-io", @@ -1817,7 +1845,7 @@ dependencies = [ [[package]] name = "monero-borromean" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "monero-generators", @@ -1830,7 +1858,7 @@ dependencies = [ [[package]] name = "monero-bulletproofs" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "monero-generators", @@ -1845,7 +1873,7 @@ dependencies = [ [[package]] name = "monero-clsag" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "dalek-ff-group", @@ -1865,7 +1893,7 @@ dependencies = [ [[package]] name = "monero-generators" version = "0.4.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "dalek-ff-group", @@ -1879,7 +1907,7 @@ dependencies = [ [[package]] name = "monero-io" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "std-shims", @@ -1888,7 +1916,7 @@ dependencies = [ [[package]] name = "monero-mlsag" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "monero-generators", @@ -1902,7 +1930,7 @@ dependencies = [ [[package]] name = "monero-primitives" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "monero-generators", @@ -1915,7 +1943,7 @@ dependencies = [ [[package]] name = "monero-rpc" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "async-trait", "curve25519-dalek", @@ -1932,7 +1960,7 @@ dependencies = [ [[package]] name = "monero-serai" version = "0.1.4-alpha" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "hex-literal", @@ -1950,7 +1978,7 @@ dependencies = [ [[package]] name = "monero-simple-request-rpc" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "async-trait", "digest_auth", @@ -1960,16 +1988,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -2017,12 +2035,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "page_size" version = "0.6.0" @@ -2468,9 +2480,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -2560,6 +2572,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.203" @@ -2625,15 +2646,6 @@ dependencies = [ "keccak", ] -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -2646,7 +2658,7 @@ dependencies = [ [[package]] name = "simple-request" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "http-body-util", "hyper", @@ -2702,18 +2714,12 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/Cuprate/serai.git?rev=50686e8#50686e84022edbd0065d2af655ea4aa5faf486b8" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "hashbrown", "spin", ] -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - [[package]] name = "subtle" version = "2.5.0" @@ -3001,18 +3007,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", ] [[package]] @@ -3021,12 +3015,7 @@ version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "nu-ansi-term", - "sharded-slab", - "smallvec", - "thread_local", "tracing-core", - "tracing-log", ] [[package]] @@ -3061,9 +3050,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64", "flate2", @@ -3100,18 +3089,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - [[package]] name = "version_check" version = "0.9.4" @@ -3198,9 +3175,9 @@ checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" dependencies = [ "rustls-pki-types", ] diff --git a/Cargo.toml b/Cargo.toml index e1f068eb..cbeded93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ opt-level = 1 opt-level = 3 [workspace.dependencies] +anyhow = { version = "1.0.87", default-features = false } async-trait = { version = "0.1.74", default-features = false } bitflags = { version = "2.4.2", default-features = false } borsh = { version = "1.2.1", default-features = false } @@ -76,7 +77,7 @@ serde_bytes = { version = "0.11.12", default-features = false } serde_json = { version = "1.0.108", default-features = false } serde = { version = "1.0.190", default-features = false } thiserror = { version = "1.0.50", default-features = false } -thread_local = { version = "1.1.7", default-features = false } +thread_local = { version = "1.1.7", default-features = false } tokio-util = { version = "0.7.10", default-features = false } tokio-stream = { version = "0.1.14", default-features = false } tokio = { version = "1.33.0", default-features = false } @@ -262,6 +263,7 @@ empty_structs_with_brackets = "deny" empty_enum_variants_with_brackets = "deny" empty_drop = "deny" clone_on_ref_ptr = "deny" +upper_case_acronyms = "deny" # Hot # inline_always = "deny" @@ -278,13 +280,15 @@ clone_on_ref_ptr = "deny" # allow_attributes_without_reason = "deny" # missing_assert_message = "deny" # missing_docs_in_private_items = "deny" -# undocumented_unsafe_blocks = "deny" +undocumented_unsafe_blocks = "deny" # multiple_unsafe_ops_per_block = "deny" # single_char_lifetime_names = "deny" # wildcard_enum_match_arm = "deny" [workspace.lints.rust] # Cold +future_incompatible = { level = "deny", priority = -1 } +nonstandard_style = { level = "deny", priority = -1 } absolute_paths_not_starting_with_crate = "deny" explicit_outlives_requirements = "deny" keyword_idents_2018 = "deny" @@ -305,10 +309,11 @@ ambiguous_glob_imports = "deny" unused_unsafe = "deny" # Warm -let_underscore_drop = "deny" +let_underscore = { level = "deny", priority = -1 } unreachable_pub = "deny" unused_qualifications = "deny" variant_size_differences = "deny" +non_camel_case_types = "deny" # Hot # unused_results = "deny" diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index a04c0be2..0c70a31d 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -8,23 +8,69 @@ authors = ["Boog900", "hinto-janai", "SyntheticBird45"] repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" [dependencies] -cuprate-consensus = { path = "../../consensus" } -cuprate-blockchain = { path = "../../storage/blockchain" } -cuprate-p2p = { path = "../../p2p/p2p" } -cuprate-p2p-core = { path = "../../p2p/p2p-core" } -cuprate-types = { path = "../../types" } -cuprate-cryptonight = { path = "../../cryptonight" } +# TODO: after v1.0.0, remove unneeded dependencies. +cuprate-consensus = { path = "../../consensus" } +cuprate-fast-sync = { path = "../../consensus/fast-sync" } +cuprate-consensus-rules = { path = "../../consensus/rules" } +cuprate-cryptonight = { path = "../../cryptonight" } +cuprate-helper = { path = "../../helper" } +cuprate-epee-encoding = { path = "../../net/epee-encoding" } +cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } +cuprate-levin = { path = "../../net/levin" } +cuprate-wire = { path = "../../net/wire" } +cuprate-p2p = { path = "../../p2p/p2p" } +cuprate-p2p-core = { path = "../../p2p/p2p-core" } +cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower" } +cuprate-async-buffer = { path = "../../p2p/async-buffer" } +cuprate-address-book = { path = "../../p2p/address-book" } +cuprate-blockchain = { path = "../../storage/blockchain" } +cuprate-database-service = { path = "../../storage/service" } +cuprate-txpool = { path = "../../storage/txpool" } +cuprate-database = { path = "../../storage/database" } +cuprate-pruning = { path = "../../pruning" } +cuprate-test-utils = { path = "../../test-utils" } +cuprate-types = { path = "../../types" } +cuprate-json-rpc = { path = "../../rpc/json-rpc" } +cuprate-rpc-interface = { path = "../../rpc/interface" } +cuprate-rpc-types = { path = "../../rpc/types" } -rayon = { workspace = true } -futures = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -tower = { workspace = true } -tracing = { workspace = true } -thiserror = { workspace = true } -hex = { workspace = true } - -clap = { workspace = true, features = ["default", "derive"] } -tracing-subscriber = { workspace = true, features = ["default"] } +# TODO: after v1.0.0, remove unneeded dependencies. +anyhow = { workspace = true } +async-trait = { workspace = true } +bitflags = { workspace = true } +borsh = { workspace = true } +bytemuck = { workspace = true } +bytes = { workspace = true } +cfg-if = { workspace = true } +clap = { workspace = true } +chrono = { workspace = true } +crypto-bigint = { workspace = true } +crossbeam = { workspace = true } +curve25519-dalek = { workspace = true } +dashmap = { workspace = true } +dirs = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } +indexmap = { workspace = true } +monero-serai = { workspace = true } +paste = { workspace = true } +pin-project = { workspace = true } +randomx-rs = { workspace = true } +rand = { workspace = true } +rand_distr = { workspace = true } +rayon = { workspace = true } +serde_bytes = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +thiserror = { workspace = true } +thread_local = { workspace = true } +tokio-util = { workspace = true } +tokio-stream = { workspace = true } +tokio = { workspace = true } +tower = { workspace = true } +tracing-subscriber = { workspace = true } +tracing = { workspace = true } #[lints] #workspace = true diff --git a/binaries/cuprated/README.md b/binaries/cuprated/README.md new file mode 100644 index 00000000..47f04080 --- /dev/null +++ b/binaries/cuprated/README.md @@ -0,0 +1,2 @@ +# `cuprated` +TODO diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index 87fc7aa5..62a7056a 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -1,3 +1,16 @@ +#![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![allow( + unused_imports, + unreachable_pub, + unused_crate_dependencies, + dead_code, + unused_variables, + clippy::needless_pass_by_value, + clippy::unused_async, + reason = "TODO: remove after v1.0.0" +)] + use tokio::runtime::Runtime; use tracing::Level; diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs index 80b2789e..9ebcd1b8 100644 --- a/binaries/cuprated/src/rpc.rs +++ b/binaries/cuprated/src/rpc.rs @@ -2,4 +2,9 @@ //! //! Will contain the code to initiate the RPC and a request handler. -mod request_handler; +mod bin; +mod handler; +mod json; +mod other; + +pub use handler::{CupratedRpcHandler, CupratedRpcHandlerState}; diff --git a/binaries/cuprated/src/rpc/bin.rs b/binaries/cuprated/src/rpc/bin.rs new file mode 100644 index 00000000..60d92c12 --- /dev/null +++ b/binaries/cuprated/src/rpc/bin.rs @@ -0,0 +1,85 @@ +use anyhow::Error; + +use cuprate_rpc_types::{ + bin::{ + BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksByHeightResponse, + GetBlocksRequest, GetBlocksResponse, GetHashesRequest, GetHashesResponse, + GetOutputIndexesRequest, GetOutputIndexesResponse, GetOutsRequest, GetOutsResponse, + GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse, + }, + json::{GetOutputDistributionRequest, GetOutputDistributionResponse}, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`BinRequest`] to the function that will lead to a [`BinResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: BinRequest, +) -> Result { + use BinRequest as Req; + use BinResponse as Resp; + + Ok(match request { + Req::GetBlocks(r) => Resp::GetBlocks(get_blocks(state, r).await?), + Req::GetBlocksByHeight(r) => Resp::GetBlocksByHeight(get_blocks_by_height(state, r).await?), + Req::GetHashes(r) => Resp::GetHashes(get_hashes(state, r).await?), + Req::GetOutputIndexes(r) => Resp::GetOutputIndexes(get_output_indexes(state, r).await?), + Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?), + Req::GetTransactionPoolHashes(r) => { + Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?) + } + Req::GetOutputDistribution(r) => { + Resp::GetOutputDistribution(get_output_distribution(state, r).await?) + } + }) +} + +async fn get_blocks( + state: CupratedRpcHandlerState, + request: GetBlocksRequest, +) -> Result { + todo!() +} + +async fn get_blocks_by_height( + state: CupratedRpcHandlerState, + request: GetBlocksByHeightRequest, +) -> Result { + todo!() +} + +async fn get_hashes( + state: CupratedRpcHandlerState, + request: GetHashesRequest, +) -> Result { + todo!() +} + +async fn get_output_indexes( + state: CupratedRpcHandlerState, + request: GetOutputIndexesRequest, +) -> Result { + todo!() +} + +async fn get_outs( + state: CupratedRpcHandlerState, + request: GetOutsRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_hashes( + state: CupratedRpcHandlerState, + request: GetTransactionPoolHashesRequest, +) -> Result { + todo!() +} + +async fn get_output_distribution( + state: CupratedRpcHandlerState, + request: GetOutputDistributionRequest, +) -> Result { + todo!() +} diff --git a/binaries/cuprated/src/rpc/handler.rs b/binaries/cuprated/src/rpc/handler.rs new file mode 100644 index 00000000..8ba25eab --- /dev/null +++ b/binaries/cuprated/src/rpc/handler.rs @@ -0,0 +1,103 @@ +//! Dummy implementation of [`RpcHandler`]. + +use std::task::{Context, Poll}; + +use anyhow::Error; +use futures::{channel::oneshot::channel, future::BoxFuture}; +use serde::{Deserialize, Serialize}; +use tower::Service; + +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_json_rpc::Id; +use cuprate_rpc_interface::RpcHandler; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; +use cuprate_txpool::service::TxpoolReadHandle; + +use crate::rpc::{bin, json, other}; + +/// TODO +#[derive(Clone)] +pub struct CupratedRpcHandler { + /// Should this RPC server be [restricted](RpcHandler::restricted)? + // + // INVARIANT: + // We don't need to include this in `state` and check for + // `self.is_restricted()` because `cuprate-rpc-interface` handles that. + pub restricted: bool, + + /// State needed for request -> response mapping. + pub state: CupratedRpcHandlerState, +} + +/// TODO +#[derive(Clone)] +pub struct CupratedRpcHandlerState { + /// Read handle to the blockchain database. + pub blockchain: BlockchainReadHandle, + + /// Read handle to the transaction pool database. + pub txpool: TxpoolReadHandle, +} + +impl CupratedRpcHandler { + /// TODO + pub fn init() { + todo!() + } +} + +impl RpcHandler for CupratedRpcHandler { + fn restricted(&self) -> bool { + self.restricted + } +} + +impl Service for CupratedRpcHandler { + type Response = JsonRpcResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: JsonRpcRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(json::map_request(state, request)) + } +} + +impl Service for CupratedRpcHandler { + type Response = BinResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: BinRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(bin::map_request(state, request)) + } +} + +impl Service for CupratedRpcHandler { + type Response = OtherResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: OtherRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(other::map_request(state, request)) + } +} diff --git a/binaries/cuprated/src/rpc/json.rs b/binaries/cuprated/src/rpc/json.rs new file mode 100644 index 00000000..41398d48 --- /dev/null +++ b/binaries/cuprated/src/rpc/json.rs @@ -0,0 +1,294 @@ +use std::sync::Arc; + +use anyhow::Error; +use tower::ServiceExt; + +use cuprate_rpc_types::json::{ + AddAuxPowRequest, AddAuxPowResponse, BannedRequest, BannedResponse, CalcPowRequest, + CalcPowResponse, FlushCacheRequest, FlushCacheResponse, FlushTransactionPoolRequest, + FlushTransactionPoolResponse, GenerateBlocksRequest, GenerateBlocksResponse, + GetAlternateChainsRequest, GetAlternateChainsResponse, GetBansRequest, GetBansResponse, + GetBlockCountRequest, GetBlockCountResponse, GetBlockHeaderByHashRequest, + GetBlockHeaderByHashResponse, GetBlockHeaderByHeightRequest, GetBlockHeaderByHeightResponse, + GetBlockHeadersRangeRequest, GetBlockHeadersRangeResponse, GetBlockRequest, GetBlockResponse, + GetCoinbaseTxSumRequest, GetCoinbaseTxSumResponse, GetConnectionsRequest, + GetConnectionsResponse, GetFeeEstimateRequest, GetFeeEstimateResponse, GetInfoRequest, + GetInfoResponse, GetLastBlockHeaderRequest, GetLastBlockHeaderResponse, GetMinerDataRequest, + GetMinerDataResponse, GetOutputHistogramRequest, GetOutputHistogramResponse, + GetTransactionPoolBacklogRequest, GetTransactionPoolBacklogResponse, GetTxIdsLooseRequest, + GetTxIdsLooseResponse, GetVersionRequest, GetVersionResponse, HardForkInfoRequest, + HardForkInfoResponse, JsonRpcRequest, JsonRpcResponse, OnGetBlockHashRequest, + OnGetBlockHashResponse, PruneBlockchainRequest, PruneBlockchainResponse, RelayTxRequest, + RelayTxResponse, SetBansRequest, SetBansResponse, SubmitBlockRequest, SubmitBlockResponse, + SyncInfoRequest, SyncInfoResponse, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`JsonRpcRequest`] to the function that will lead to a [`JsonRpcResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: JsonRpcRequest, +) -> Result { + use JsonRpcRequest as Req; + use JsonRpcResponse as Resp; + + Ok(match request { + Req::GetBlockCount(r) => Resp::GetBlockCount(get_block_count(state, r).await?), + Req::OnGetBlockHash(r) => Resp::OnGetBlockHash(on_get_block_hash(state, r).await?), + Req::SubmitBlock(r) => Resp::SubmitBlock(submit_block(state, r).await?), + Req::GenerateBlocks(r) => Resp::GenerateBlocks(generate_blocks(state, r).await?), + Req::GetLastBlockHeader(r) => { + Resp::GetLastBlockHeader(get_last_block_header(state, r).await?) + } + Req::GetBlockHeaderByHash(r) => { + Resp::GetBlockHeaderByHash(get_block_header_by_hash(state, r).await?) + } + Req::GetBlockHeaderByHeight(r) => { + Resp::GetBlockHeaderByHeight(get_block_header_by_height(state, r).await?) + } + Req::GetBlockHeadersRange(r) => { + Resp::GetBlockHeadersRange(get_block_headers_range(state, r).await?) + } + Req::GetBlock(r) => Resp::GetBlock(get_block(state, r).await?), + Req::GetConnections(r) => Resp::GetConnections(get_connections(state, r).await?), + Req::GetInfo(r) => Resp::GetInfo(get_info(state, r).await?), + Req::HardForkInfo(r) => Resp::HardForkInfo(hard_fork_info(state, r).await?), + Req::SetBans(r) => Resp::SetBans(set_bans(state, r).await?), + Req::GetBans(r) => Resp::GetBans(get_bans(state, r).await?), + Req::Banned(r) => Resp::Banned(banned(state, r).await?), + Req::FlushTransactionPool(r) => { + Resp::FlushTransactionPool(flush_transaction_pool(state, r).await?) + } + Req::GetOutputHistogram(r) => { + Resp::GetOutputHistogram(get_output_histogram(state, r).await?) + } + Req::GetCoinbaseTxSum(r) => Resp::GetCoinbaseTxSum(get_coinbase_tx_sum(state, r).await?), + Req::GetVersion(r) => Resp::GetVersion(get_version(state, r).await?), + Req::GetFeeEstimate(r) => Resp::GetFeeEstimate(get_fee_estimate(state, r).await?), + Req::GetAlternateChains(r) => { + Resp::GetAlternateChains(get_alternate_chains(state, r).await?) + } + Req::RelayTx(r) => Resp::RelayTx(relay_tx(state, r).await?), + Req::SyncInfo(r) => Resp::SyncInfo(sync_info(state, r).await?), + Req::GetTransactionPoolBacklog(r) => { + Resp::GetTransactionPoolBacklog(get_transaction_pool_backlog(state, r).await?) + } + Req::GetMinerData(r) => Resp::GetMinerData(get_miner_data(state, r).await?), + Req::PruneBlockchain(r) => Resp::PruneBlockchain(prune_blockchain(state, r).await?), + Req::CalcPow(r) => Resp::CalcPow(calc_pow(state, r).await?), + Req::FlushCache(r) => Resp::FlushCache(flush_cache(state, r).await?), + Req::AddAuxPow(r) => Resp::AddAuxPow(add_aux_pow(state, r).await?), + Req::GetTxIdsLoose(r) => Resp::GetTxIdsLoose(get_tx_ids_loose(state, r).await?), + }) +} + +async fn get_block_count( + state: CupratedRpcHandlerState, + request: GetBlockCountRequest, +) -> Result { + todo!() +} + +async fn on_get_block_hash( + state: CupratedRpcHandlerState, + request: OnGetBlockHashRequest, +) -> Result { + todo!() +} + +async fn submit_block( + state: CupratedRpcHandlerState, + request: SubmitBlockRequest, +) -> Result { + todo!() +} + +async fn generate_blocks( + state: CupratedRpcHandlerState, + request: GenerateBlocksRequest, +) -> Result { + todo!() +} + +async fn get_last_block_header( + state: CupratedRpcHandlerState, + request: GetLastBlockHeaderRequest, +) -> Result { + todo!() +} + +async fn get_block_header_by_hash( + state: CupratedRpcHandlerState, + request: GetBlockHeaderByHashRequest, +) -> Result { + todo!() +} + +async fn get_block_header_by_height( + state: CupratedRpcHandlerState, + request: GetBlockHeaderByHeightRequest, +) -> Result { + todo!() +} + +async fn get_block_headers_range( + state: CupratedRpcHandlerState, + request: GetBlockHeadersRangeRequest, +) -> Result { + todo!() +} + +async fn get_block( + state: CupratedRpcHandlerState, + request: GetBlockRequest, +) -> Result { + todo!() +} + +async fn get_connections( + state: CupratedRpcHandlerState, + request: GetConnectionsRequest, +) -> Result { + todo!() +} + +async fn get_info( + state: CupratedRpcHandlerState, + request: GetInfoRequest, +) -> Result { + todo!() +} + +async fn hard_fork_info( + state: CupratedRpcHandlerState, + request: HardForkInfoRequest, +) -> Result { + todo!() +} + +async fn set_bans( + state: CupratedRpcHandlerState, + request: SetBansRequest, +) -> Result { + todo!() +} + +async fn get_bans( + state: CupratedRpcHandlerState, + request: GetBansRequest, +) -> Result { + todo!() +} + +async fn banned( + state: CupratedRpcHandlerState, + request: BannedRequest, +) -> Result { + todo!() +} + +async fn flush_transaction_pool( + state: CupratedRpcHandlerState, + request: FlushTransactionPoolRequest, +) -> Result { + todo!() +} + +async fn get_output_histogram( + state: CupratedRpcHandlerState, + request: GetOutputHistogramRequest, +) -> Result { + todo!() +} + +async fn get_coinbase_tx_sum( + state: CupratedRpcHandlerState, + request: GetCoinbaseTxSumRequest, +) -> Result { + todo!() +} + +async fn get_version( + state: CupratedRpcHandlerState, + request: GetVersionRequest, +) -> Result { + todo!() +} + +async fn get_fee_estimate( + state: CupratedRpcHandlerState, + request: GetFeeEstimateRequest, +) -> Result { + todo!() +} + +async fn get_alternate_chains( + state: CupratedRpcHandlerState, + request: GetAlternateChainsRequest, +) -> Result { + todo!() +} + +async fn relay_tx( + state: CupratedRpcHandlerState, + request: RelayTxRequest, +) -> Result { + todo!() +} + +async fn sync_info( + state: CupratedRpcHandlerState, + request: SyncInfoRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_backlog( + state: CupratedRpcHandlerState, + request: GetTransactionPoolBacklogRequest, +) -> Result { + todo!() +} + +async fn get_miner_data( + state: CupratedRpcHandlerState, + request: GetMinerDataRequest, +) -> Result { + todo!() +} + +async fn prune_blockchain( + state: CupratedRpcHandlerState, + request: PruneBlockchainRequest, +) -> Result { + todo!() +} + +async fn calc_pow( + state: CupratedRpcHandlerState, + request: CalcPowRequest, +) -> Result { + todo!() +} + +async fn flush_cache( + state: CupratedRpcHandlerState, + request: FlushCacheRequest, +) -> Result { + todo!() +} + +async fn add_aux_pow( + state: CupratedRpcHandlerState, + request: AddAuxPowRequest, +) -> Result { + todo!() +} + +async fn get_tx_ids_loose( + state: CupratedRpcHandlerState, + request: GetTxIdsLooseRequest, +) -> Result { + todo!() +} diff --git a/binaries/cuprated/src/rpc/other.rs b/binaries/cuprated/src/rpc/other.rs new file mode 100644 index 00000000..c0df3993 --- /dev/null +++ b/binaries/cuprated/src/rpc/other.rs @@ -0,0 +1,260 @@ +use anyhow::Error; + +use cuprate_rpc_types::other::{ + GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse, + GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest, + GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest, + GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse, + GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest, + GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse, + InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse, + MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest, + OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse, + SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest, + SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest, + SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, SetLogLevelRequest, + SetLogLevelResponse, StartMiningRequest, StartMiningResponse, StopDaemonRequest, + StopDaemonResponse, StopMiningRequest, StopMiningResponse, UpdateRequest, UpdateResponse, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`OtherRequest`] to the function that will lead to a [`OtherResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: OtherRequest, +) -> Result { + use OtherRequest as Req; + use OtherResponse as Resp; + + Ok(match request { + Req::GetHeight(r) => Resp::GetHeight(get_height(state, r).await?), + Req::GetTransactions(r) => Resp::GetTransactions(get_transactions(state, r).await?), + Req::GetAltBlocksHashes(r) => { + Resp::GetAltBlocksHashes(get_alt_blocks_hashes(state, r).await?) + } + Req::IsKeyImageSpent(r) => Resp::IsKeyImageSpent(is_key_image_spent(state, r).await?), + Req::SendRawTransaction(r) => { + Resp::SendRawTransaction(send_raw_transaction(state, r).await?) + } + Req::StartMining(r) => Resp::StartMining(start_mining(state, r).await?), + Req::StopMining(r) => Resp::StopMining(stop_mining(state, r).await?), + Req::MiningStatus(r) => Resp::MiningStatus(mining_status(state, r).await?), + Req::SaveBc(r) => Resp::SaveBc(save_bc(state, r).await?), + Req::GetPeerList(r) => Resp::GetPeerList(get_peer_list(state, r).await?), + Req::SetLogHashRate(r) => Resp::SetLogHashRate(set_log_hash_rate(state, r).await?), + Req::SetLogLevel(r) => Resp::SetLogLevel(set_log_level(state, r).await?), + Req::SetLogCategories(r) => Resp::SetLogCategories(set_log_categories(state, r).await?), + Req::SetBootstrapDaemon(r) => { + Resp::SetBootstrapDaemon(set_bootstrap_daemon(state, r).await?) + } + Req::GetTransactionPool(r) => { + Resp::GetTransactionPool(get_transaction_pool(state, r).await?) + } + Req::GetTransactionPoolStats(r) => { + Resp::GetTransactionPoolStats(get_transaction_pool_stats(state, r).await?) + } + Req::StopDaemon(r) => Resp::StopDaemon(stop_daemon(state, r).await?), + Req::GetLimit(r) => Resp::GetLimit(get_limit(state, r).await?), + Req::SetLimit(r) => Resp::SetLimit(set_limit(state, r).await?), + Req::OutPeers(r) => Resp::OutPeers(out_peers(state, r).await?), + Req::InPeers(r) => Resp::InPeers(in_peers(state, r).await?), + Req::GetNetStats(r) => Resp::GetNetStats(get_net_stats(state, r).await?), + Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?), + Req::Update(r) => Resp::Update(update(state, r).await?), + Req::PopBlocks(r) => Resp::PopBlocks(pop_blocks(state, r).await?), + Req::GetTransactionPoolHashes(r) => { + Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?) + } + Req::GetPublicNodes(r) => Resp::GetPublicNodes(get_public_nodes(state, r).await?), + }) +} + +async fn get_height( + state: CupratedRpcHandlerState, + request: GetHeightRequest, +) -> Result { + todo!() +} + +async fn get_transactions( + state: CupratedRpcHandlerState, + request: GetTransactionsRequest, +) -> Result { + todo!() +} + +async fn get_alt_blocks_hashes( + state: CupratedRpcHandlerState, + request: GetAltBlocksHashesRequest, +) -> Result { + todo!() +} + +async fn is_key_image_spent( + state: CupratedRpcHandlerState, + request: IsKeyImageSpentRequest, +) -> Result { + todo!() +} + +async fn send_raw_transaction( + state: CupratedRpcHandlerState, + request: SendRawTransactionRequest, +) -> Result { + todo!() +} + +async fn start_mining( + state: CupratedRpcHandlerState, + request: StartMiningRequest, +) -> Result { + todo!() +} + +async fn stop_mining( + state: CupratedRpcHandlerState, + request: StopMiningRequest, +) -> Result { + todo!() +} + +async fn mining_status( + state: CupratedRpcHandlerState, + request: MiningStatusRequest, +) -> Result { + todo!() +} + +async fn save_bc( + state: CupratedRpcHandlerState, + request: SaveBcRequest, +) -> Result { + todo!() +} + +async fn get_peer_list( + state: CupratedRpcHandlerState, + request: GetPeerListRequest, +) -> Result { + todo!() +} + +async fn set_log_hash_rate( + state: CupratedRpcHandlerState, + request: SetLogHashRateRequest, +) -> Result { + todo!() +} + +async fn set_log_level( + state: CupratedRpcHandlerState, + request: SetLogLevelRequest, +) -> Result { + todo!() +} + +async fn set_log_categories( + state: CupratedRpcHandlerState, + request: SetLogCategoriesRequest, +) -> Result { + todo!() +} + +async fn set_bootstrap_daemon( + state: CupratedRpcHandlerState, + request: SetBootstrapDaemonRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool( + state: CupratedRpcHandlerState, + request: GetTransactionPoolRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_stats( + state: CupratedRpcHandlerState, + request: GetTransactionPoolStatsRequest, +) -> Result { + todo!() +} + +async fn stop_daemon( + state: CupratedRpcHandlerState, + request: StopDaemonRequest, +) -> Result { + todo!() +} + +async fn get_limit( + state: CupratedRpcHandlerState, + request: GetLimitRequest, +) -> Result { + todo!() +} + +async fn set_limit( + state: CupratedRpcHandlerState, + request: SetLimitRequest, +) -> Result { + todo!() +} + +async fn out_peers( + state: CupratedRpcHandlerState, + request: OutPeersRequest, +) -> Result { + todo!() +} + +async fn in_peers( + state: CupratedRpcHandlerState, + request: InPeersRequest, +) -> Result { + todo!() +} + +async fn get_net_stats( + state: CupratedRpcHandlerState, + request: GetNetStatsRequest, +) -> Result { + todo!() +} + +async fn get_outs( + state: CupratedRpcHandlerState, + request: GetOutsRequest, +) -> Result { + todo!() +} + +async fn update( + state: CupratedRpcHandlerState, + request: UpdateRequest, +) -> Result { + todo!() +} + +async fn pop_blocks( + state: CupratedRpcHandlerState, + request: PopBlocksRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_hashes( + state: CupratedRpcHandlerState, + request: GetTransactionPoolHashesRequest, +) -> Result { + todo!() +} + +async fn get_public_nodes( + state: CupratedRpcHandlerState, + request: GetPublicNodesRequest, +) -> Result { + todo!() +} diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index ad521df0..d97d223d 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -93,17 +93,20 @@ --- -- [⚪️ Resource model](resource-model/intro.md) - - [⚪️ File system](resource-model/file-system.md) - - [⚪️ Sockets](resource-model/sockets.md) - - [⚪️ Memory](resource-model/memory.md) - - [🟡 Concurrency and parallelism](resource-model/concurrency-and-parallelism/intro.md) - - [⚪️ Map](resource-model/concurrency-and-parallelism/map.md) - - [⚪️ The RPC server](resource-model/concurrency-and-parallelism/the-rpc-server.md) - - [⚪️ The database](resource-model/concurrency-and-parallelism/the-database.md) - - [⚪️ The block downloader](resource-model/concurrency-and-parallelism/the-block-downloader.md) - - [⚪️ The verifier](resource-model/concurrency-and-parallelism/the-verifier.md) - - [⚪️ Thread exit](resource-model/concurrency-and-parallelism/thread-exit.md) +- [⚪️ Resources](resources/intro.md) + - [⚪️ File system](resources/fs/intro.md) + - [🟡 Index of PATHs](resources/fs/paths.md) + - [⚪️ Sockets](resources/sockets/index.md) + - [🔴 Index of ports](resources/sockets/ports.md) + - [⚪️ Memory](resources/memory.md) + - [🟡 Concurrency and parallelism](resources/cap/intro.md) + - [⚪️ Map](resources/cap/map.md) + - [⚪️ The RPC server](resources/cap/the-rpc-server.md) + - [⚪️ The database](resources/cap/the-database.md) + - [⚪️ The block downloader](resources/cap/the-block-downloader.md) + - [⚪️ The verifier](resources/cap/the-verifier.md) + - [⚪️ Thread exit](resources/cap/thread-exit.md) + - [🔴 Index of threads](resources/cap/threads.md) --- diff --git a/books/architecture/src/resource-model/intro.md b/books/architecture/src/resource-model/intro.md deleted file mode 100644 index 28d1dd61..00000000 --- a/books/architecture/src/resource-model/intro.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Resource model diff --git a/books/architecture/src/resource-model/sockets.md b/books/architecture/src/resource-model/sockets.md deleted file mode 100644 index 0d590ca4..00000000 --- a/books/architecture/src/resource-model/sockets.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Sockets diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/intro.md b/books/architecture/src/resources/cap/intro.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/intro.md rename to books/architecture/src/resources/cap/intro.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/map.md b/books/architecture/src/resources/cap/map.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/map.md rename to books/architecture/src/resources/cap/map.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md b/books/architecture/src/resources/cap/the-block-downloader.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md rename to books/architecture/src/resources/cap/the-block-downloader.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md b/books/architecture/src/resources/cap/the-database.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md rename to books/architecture/src/resources/cap/the-database.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md b/books/architecture/src/resources/cap/the-rpc-server.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md rename to books/architecture/src/resources/cap/the-rpc-server.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md b/books/architecture/src/resources/cap/the-verifier.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md rename to books/architecture/src/resources/cap/the-verifier.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md b/books/architecture/src/resources/cap/thread-exit.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md rename to books/architecture/src/resources/cap/thread-exit.md diff --git a/books/architecture/src/resources/cap/threads.md b/books/architecture/src/resources/cap/threads.md new file mode 100644 index 00000000..e40f2c7d --- /dev/null +++ b/books/architecture/src/resources/cap/threads.md @@ -0,0 +1,2 @@ +# Index of threads +This is an index of all of the system threads Cuprate actively uses. \ No newline at end of file diff --git a/books/architecture/src/resource-model/file-system.md b/books/architecture/src/resources/fs/intro.md similarity index 100% rename from books/architecture/src/resource-model/file-system.md rename to books/architecture/src/resources/fs/intro.md diff --git a/books/architecture/src/resources/fs/paths.md b/books/architecture/src/resources/fs/paths.md new file mode 100644 index 00000000..0e5dc3d6 --- /dev/null +++ b/books/architecture/src/resources/fs/paths.md @@ -0,0 +1,87 @@ +# Index of PATHs +This is an index of all of the filesystem PATHs Cuprate actively uses. + +The [`cuprate_helper::fs`](https://doc.cuprate.org/cuprate_helper/fs/index.html) +module defines the general locations used throughout Cuprate. + +[`dirs`](https://docs.rs/dirs) is used internally, which follows +the PATH standards/conventions on each OS Cuprate supports, i.e.: +- the [XDG base directory](https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html) and the [XDG user directory](https://www.freedesktop.org/wiki/Software/xdg-user-dirs/) specifications on Linux +- the [Known Folder](https://msdn.microsoft.com/en-us/library/windows/desktop/bb776911(v=vs.85).aspx) system on Windows +- the [Standard Directories](https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html#//apple_ref/doc/uid/TP40010672-CH2-SW6) on macOS + +## Cache +Cuprate's cache directory. + +| OS | PATH | +|---------|-----------------------------------------| +| Windows | `C:\Users\Alice\AppData\Local\Cuprate\` | +| macOS | `/Users/Alice/Library/Caches/Cuprate/` | +| Linux | `/home/alice/.cache/cuprate/` | + +## Config +Cuprate's config directory. + +| OS | PATH | +|---------|-----------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/` | +| Linux | `/home/alice/.config/cuprate/` | + +## Data +Cuprate's data directory. + +| OS | PATH | +|---------|-----------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/` | +| Linux | `/home/alice/.local/share/cuprate/` | + +## Blockchain +Cuprate's blockchain directory. + +| OS | PATH | +|---------|----------------------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | +| Linux | `/home/alice/.local/share/cuprate/blockchain/` | + +## Transaction pool +Cuprate's transaction pool directory. + +| OS | PATH | +|---------|------------------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | +| Linux | `/home/alice/.local/share/cuprate/txpool/` | + +## Database +Cuprate's database location/filenames depend on: + +- Which database it is +- Which backend is being used + +--- + +`cuprate_blockchain` files are in the above mentioned `blockchain` folder. + +`cuprate_txpool` files are in the above mentioned `txpool` folder. + +--- + +If the `heed` backend is being used, these files will be created: + +| Filename | Purpose | +|------------|--------------------| +| `data.mdb` | Main data file | +| `lock.mdb` | Database lock file | + +For example: `/home/alice/.local/share/cuprate/blockchain/lock.mdb`. + +If the `redb` backend is being used, these files will be created: + +| Filename | Purpose | +|-------------|--------------------| +| `data.redb` | Main data file | + +For example: `/home/alice/.local/share/cuprate/txpool/data.redb`. \ No newline at end of file diff --git a/books/architecture/src/resources/intro.md b/books/architecture/src/resources/intro.md new file mode 100644 index 00000000..3c1229ee --- /dev/null +++ b/books/architecture/src/resources/intro.md @@ -0,0 +1 @@ +# Resources diff --git a/books/architecture/src/resource-model/memory.md b/books/architecture/src/resources/memory.md similarity index 100% rename from books/architecture/src/resource-model/memory.md rename to books/architecture/src/resources/memory.md diff --git a/books/architecture/src/resources/sockets/index.md b/books/architecture/src/resources/sockets/index.md new file mode 100644 index 00000000..1e65ffc4 --- /dev/null +++ b/books/architecture/src/resources/sockets/index.md @@ -0,0 +1 @@ +# Sockets diff --git a/books/architecture/src/resources/sockets/ports.md b/books/architecture/src/resources/sockets/ports.md new file mode 100644 index 00000000..38ebc1db --- /dev/null +++ b/books/architecture/src/resources/sockets/ports.md @@ -0,0 +1,2 @@ +# Index of ports +This is an index of all of the network sockets Cuprate actively uses. \ No newline at end of file diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..cc94ec53 --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ +upper-case-acronyms-aggressive = true diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 2cf03e39..8ba321d6 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -11,7 +11,7 @@ proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] -cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] } cuprate-types = { path = "../../types", default-features = false } cuprate-cryptonight = {path = "../../cryptonight"} diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index 59348149..a8821f3c 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -1,36 +1,27 @@ -use std::sync::OnceLock; - -/// Decomposed amount table. -/// -static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new(); - #[rustfmt::skip] -pub fn decomposed_amounts() -> &'static [u64; 172] { - DECOMPOSED_AMOUNTS.get_or_init(|| { - [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 200, 300, 400, 500, 600, 700, 800, 900, - 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, - 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, - 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, - 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, - 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, - 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, - 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, - 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, - 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, - 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, - 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, - 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, - 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, - 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, - 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, - 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, - 10000000000000000000 - ] - }) -} +/// Decomposed amount table. +pub static DECOMPOSED_AMOUNTS: [u64; 172] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 200, 300, 400, 500, 600, 700, 800, 900, + 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, + 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, + 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, + 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, + 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, + 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, + 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, + 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, + 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, + 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, + 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, + 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, + 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, + 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, + 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, + 10000000000000000000 +]; /// Checks that an output amount is decomposed. /// @@ -40,7 +31,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] { /// ref: #[inline] pub fn is_decomposed_amount(amount: &u64) -> bool { - decomposed_amounts().binary_search(amount).is_ok() + DECOMPOSED_AMOUNTS.binary_search(amount).is_ok() } #[cfg(test)] @@ -49,7 +40,7 @@ mod tests { #[test] fn decomposed_amounts_return_decomposed() { - for amount in decomposed_amounts() { + for amount in DECOMPOSED_AMOUNTS.iter() { assert!(is_decomposed_amount(amount)) } } diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 0bea08ce..4da8fd53 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -9,12 +9,14 @@ use proptest::{collection::vec, prelude::*}; use monero_serai::transaction::Output; +use cuprate_helper::cast::u64_to_usize; + use super::*; -use crate::decomposed_amount::decomposed_amounts; +use crate::decomposed_amount::DECOMPOSED_AMOUNTS; #[test] fn test_check_output_amount_v1() { - for amount in decomposed_amounts() { + for amount in DECOMPOSED_AMOUNTS.iter() { assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok()) } @@ -164,7 +166,7 @@ prop_compose! { if timebased || lock_height > 500_000_000 { Timelock::Time(time_for_time_lock) } else { - Timelock::Block(usize::try_from(lock_height).unwrap()) + Timelock::Block(u64_to_usize(lock_height)) } } } @@ -179,7 +181,7 @@ prop_compose! { match ty { 0 => Timelock::None, 1 => Timelock::Time(time_for_time_lock), - _ => Timelock::Block(usize::try_from(lock_height).unwrap()) + _ => Timelock::Block(u64_to_usize(lock_height)) } } } diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index 513697e9..b20b4f26 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -14,7 +14,7 @@ use cuprate_consensus_rules::{ miner_tx::MinerTxError, ConsensusError, }; -use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_helper::{asynch::rayon_spawn_async, cast::u64_to_usize}; use cuprate_types::{ AltBlockInformation, Chain, ChainId, TransactionVerificationData, VerifiedTransactionInformation, @@ -24,7 +24,7 @@ use crate::{ block::{free::pull_ordered_transactions, PreparedBlock}, context::{ difficulty::DifficultyCache, - rx_vms::RandomXVM, + rx_vms::RandomXVm, weight::{self, BlockWeightsCache}, AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, }, @@ -101,7 +101,7 @@ where // Check the alt block timestamp is in the correct range. if let Some(median_timestamp) = - difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap()) + difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)) { check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? }; @@ -195,7 +195,7 @@ async fn alt_rx_vm( parent_chain: Chain, alt_chain_context: &mut AltChainContextCache, context_svc: C, -) -> Result>, ExtendedConsensusError> +) -> Result>, ExtendedConsensusError> where C: Service< BlockChainContextRequest, diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index 9974d6d1..d32cd765 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -15,7 +15,7 @@ use cuprate_helper::asynch::rayon_spawn_async; use crate::{ block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, - context::rx_vms::RandomXVM, + context::rx_vms::RandomXVm, transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, @@ -148,7 +148,7 @@ where tracing::debug!("New randomX seed in batch, initialising VM"); let new_vm = rayon_spawn_async(move || { - Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) + Arc::new(RandomXVm::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) }) .await; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 26be75c3..9e713046 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -33,7 +33,7 @@ mod tokens; use cuprate_types::Chain; use difficulty::DifficultyCache; -use rx_vms::RandomXVM; +use rx_vms::RandomXVm; use weight::BlockWeightsCache; pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; @@ -236,7 +236,7 @@ pub enum BlockChainContextRequest { /// seed. /// /// This should include the seed used to init this VM and the VM. - NewRXVM(([u8; 32], Arc)), + NewRXVM(([u8; 32], Arc)), /// A request to add a new block to the cache. Update(NewBlockData), /// Pop blocks from the cache to the specified height. @@ -313,7 +313,7 @@ pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), /// A map of seed height to RandomX VMs. - RxVms(HashMap>), + RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), /// An alt chain context cache. @@ -321,7 +321,7 @@ pub enum BlockChainContextResponse { /// A difficulty cache for an alt chain. AltChainDifficultyCache(DifficultyCache), /// A randomX VM for an alt chain. - AltChainRxVM(Arc), + AltChainRxVM(Arc), /// A weight cache for an alt chain AltChainWeightCache(BlockWeightsCache), /// A generic Ok response. diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 5586226b..937e847e 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -11,7 +11,7 @@ use cuprate_types::{ use crate::{ ExtendedConsensusError, __private::Database, - context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache}, + context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache}, }; pub(crate) mod sealed { @@ -32,7 +32,7 @@ pub struct AltChainContextCache { pub difficulty_cache: Option, /// A cached RX VM. - pub cached_rx_vm: Option<(usize, Arc)>, + pub cached_rx_vm: Option<(usize, Arc)>, /// The chain height of the alt chain. pub chain_height: usize, diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 01aa9738..b1ab102b 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -9,7 +9,7 @@ use std::{ }; use futures::{stream::FuturesOrdered, StreamExt}; -use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner}; +use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VmInner}; use rayon::prelude::*; use thread_local::ThreadLocal; use tower::ServiceExt; @@ -33,16 +33,16 @@ const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] -pub struct RandomXVM { +pub struct RandomXVm { /// These RandomX VMs all share the same cache. - vms: ThreadLocal, + vms: ThreadLocal, /// The RandomX cache. cache: RandomXCache, /// The flags used to start the RandomX VMs. flags: RandomXFlag, } -impl RandomXVM { +impl RandomXVm { /// Create a new multithreaded randomX VM with the provided seed. pub fn new(seed: &[u8; 32]) -> Result { // TODO: allow passing in flags. @@ -50,7 +50,7 @@ impl RandomXVM { let cache = RandomXCache::new(flags, seed.as_slice())?; - Ok(RandomXVM { + Ok(RandomXVm { vms: ThreadLocal::new(), cache, flags, @@ -58,12 +58,12 @@ impl RandomXVM { } } -impl RandomX for RandomXVM { +impl RandomX for RandomXVm { type Error = RandomXError; fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> { self.vms - .get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))? + .get_or_try(|| VmInner::new(self.flags, Some(self.cache.clone()), None))? .calculate_hash(buf) .map(|out| out.try_into().unwrap()) } @@ -72,17 +72,17 @@ impl RandomX for RandomXVM { /// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub struct RandomXVMCache { +pub struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). - pub(crate) vms: HashMap>, + pub(crate) vms: HashMap>, /// A single cached VM that was given to us from a part of Cuprate. - pub(crate) cached_vm: Option<([u8; 32], Arc)>, + pub(crate) cached_vm: Option<([u8; 32], Arc)>, } -impl RandomXVMCache { +impl RandomXVmCache { #[instrument(name = "init_rx_vm_cache", level = "info", skip(database))] pub async fn init_from_chain_height( chain_height: usize, @@ -106,7 +106,7 @@ impl RandomXVMCache { .map(|(height, seed)| { ( *height, - Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")), + Arc::new(RandomXVm::new(seed).expect("Failed to create RandomX VM!")), ) }) .collect() @@ -117,7 +117,7 @@ impl RandomXVMCache { HashMap::new() }; - Ok(RandomXVMCache { + Ok(RandomXVmCache { seeds, vms, cached_vm: None, @@ -125,7 +125,7 @@ impl RandomXVMCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } @@ -136,7 +136,7 @@ impl RandomXVMCache { height: usize, chain: Chain, database: D, - ) -> Result, ExtendedConsensusError> { + ) -> Result, ExtendedConsensusError> { let seed_height = randomx_seed_height(height); let BlockchainResponse::BlockHash(seed_hash) = database @@ -156,13 +156,13 @@ impl RandomXVMCache { } } - let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await; + let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVm::new(&seed_hash).unwrap())).await; Ok(alt_vm) } /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -184,7 +184,7 @@ impl RandomXVMCache { } }; - rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap())) + rayon_spawn_async(move || Arc::new(RandomXVm::new(&next_seed_hash).unwrap())) .await }; @@ -200,7 +200,7 @@ impl RandomXVMCache { seeds_clone .par_iter() .map(|(height, seed)| { - let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!"); + let vm = RandomXVm::new(seed).expect("Failed to create RandomX VM!"); let vm = Arc::new(vm); (*height, vm) }) diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 8939446a..bc54285a 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -9,6 +9,7 @@ use tower::ServiceExt; use tracing::Instrument; use cuprate_consensus_rules::blocks::ContextToVerifyBlock; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, @@ -45,7 +46,7 @@ pub struct ContextTask { /// The weight cache. weight_cache: weight::BlockWeightsCache, /// The RX VM cache. - rx_vm_cache: rx_vms::RandomXVMCache, + rx_vm_cache: rx_vms::RandomXVmCache, /// The hard-fork state cache. hardfork_state: hardforks::HardForkState, @@ -127,7 +128,7 @@ impl ContextTask { let db = database.clone(); let rx_seed_handle = tokio::spawn(async move { - rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await + rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await }); let context_svc = ContextTask { @@ -168,9 +169,9 @@ impl ContextTask { .weight_cache .effective_median_block_weight(¤t_hf), top_hash: self.top_block_hash, - median_block_timestamp: self.difficulty_cache.median_timestamp( - usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(), - ), + median_block_timestamp: self + .difficulty_cache + .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)), chain_height: self.chain_height, current_hf, next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 29f59038..18de7dc4 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -10,7 +10,7 @@ //! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds //! with [`BlockchainResponse`]. //! -use cuprate_consensus_rules::{ConsensusError, HardFork}; +use cuprate_consensus_rules::ConsensusError; mod batch_verifier; pub mod block; diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index f18a9b59..5c198cf6 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -9,7 +9,7 @@ use cuprate_consensus_rules::{ }; use crate::{ - context::rx_vms::{get_last_rx_seed_heights, RandomXVMCache}, + context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache}, tests::mock_db::*, }; @@ -42,7 +42,7 @@ fn rx_heights_consistent() { async fn rx_vm_created_on_hf_12() { let db = DummyDatabaseBuilder::default().finish(Some(10)); - let mut cache = RandomXVMCache::init_from_chain_height(10, &HardFork::V11, db) + let mut cache = RandomXVmCache::init_from_chain_height(10, &HardFork::V11, db) .await .unwrap(); @@ -67,7 +67,7 @@ proptest! { let rt = Builder::new_multi_thread().enable_all().build().unwrap(); rt.block_on(async move { - let cache = RandomXVMCache::init_from_chain_height(10, &hf, db).await.unwrap(); + let cache = RandomXVmCache::init_from_chain_height(10, &hf, db).await.unwrap(); assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12); }); } diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 59e4e71d..c74e40fd 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -10,14 +10,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" [features] # All features on by default. -default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"] +default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"] std = [] atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] +cast = [] constants = [] fs = ["dep:dirs"] num = [] -map = ["dep:monero-serai"] +map = ["cast", "dep:monero-serai"] time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] @@ -39,3 +40,6 @@ target_os_lib = { package = "libc", version = "0.2.151", optional = true } [dev-dependencies] tokio = { workspace = true, features = ["full"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/helper/src/asynch.rs b/helper/src/asynch.rs index ea89dd79..9868191b 100644 --- a/helper/src/asynch.rs +++ b/helper/src/asynch.rs @@ -19,7 +19,7 @@ pub struct InfallibleOneshotReceiver(oneshot::Receiver); impl From> for InfallibleOneshotReceiver { fn from(value: oneshot::Receiver) -> Self { - InfallibleOneshotReceiver(value) + Self(value) } } @@ -43,7 +43,7 @@ where { let (tx, rx) = oneshot::channel(); rayon::spawn(move || { - let _ = tx.send(f()); + drop(tx.send(f())); }); rx.await.expect("The sender must not be dropped") } @@ -62,7 +62,7 @@ mod test { #[tokio::test] // Assert that basic channel operations work. async fn infallible_oneshot_receiver() { - let (tx, rx) = futures::channel::oneshot::channel::(); + let (tx, rx) = oneshot::channel::(); let msg = "hello world!".to_string(); tx.send(msg.clone()).unwrap(); @@ -84,7 +84,7 @@ mod test { let barrier = Arc::new(Barrier::new(2)); let task = |barrier: &Barrier| barrier.wait(); - let b_2 = barrier.clone(); + let b_2 = Arc::clone(&barrier); let (tx, rx) = std::sync::mpsc::channel(); diff --git a/helper/src/atomic.rs b/helper/src/atomic.rs index f253737a..47958964 100644 --- a/helper/src/atomic.rs +++ b/helper/src/atomic.rs @@ -49,6 +49,8 @@ pub type AtomicF64 = AtomicCell; //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { + #![allow(clippy::float_cmp)] + use super::*; #[test] diff --git a/helper/src/cast.rs b/helper/src/cast.rs new file mode 100644 index 00000000..99b7f53e --- /dev/null +++ b/helper/src/cast.rs @@ -0,0 +1,86 @@ +//! Casting. +//! +//! This modules provides utilities for casting between types. +//! +//! `#[no_std]` compatible. + +#![allow(clippy::cast_possible_truncation)] + +#[rustfmt::skip] +//============================ SAFETY: DO NOT REMOVE ===========================// +// // +// // +// Only allow building 64-bit targets. // +// This allows us to assume 64-bit invariants in this file. // + #[cfg(not(target_pointer_width = "64"))] + compile_error!("Cuprate is only compatible with 64-bit CPUs"); +// // +// // +//============================ SAFETY: DO NOT REMOVE ===========================// + +//---------------------------------------------------------------------------------------------------- Free functions +/// Cast [`u64`] to [`usize`]. +#[inline(always)] +pub const fn u64_to_usize(u: u64) -> usize { + u as usize +} + +/// Cast [`u32`] to [`usize`]. +#[inline(always)] +pub const fn u32_to_usize(u: u32) -> usize { + u as usize +} + +/// Cast [`usize`] to [`u64`]. +#[inline(always)] +pub const fn usize_to_u64(u: usize) -> u64 { + u as u64 +} + +/// Cast [`i64`] to [`isize`]. +#[inline(always)] +pub const fn i64_to_isize(i: i64) -> isize { + i as isize +} + +/// Cast [`i32`] to [`isize`]. +#[inline(always)] +pub const fn i32_to_isize(i: i32) -> isize { + i as isize +} + +/// Cast [`isize`] to [`i64`]. +#[inline(always)] +pub const fn isize_to_i64(i: isize) -> i64 { + i as i64 +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn max_unsigned() { + assert_eq!(u32_to_usize(u32::MAX), usize::try_from(u32::MAX).unwrap()); + assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u64::from(u32::MAX)); + + assert_eq!(u64_to_usize(u64::MAX), usize::MAX); + assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX); + + assert_eq!(usize_to_u64(usize::MAX), u64::MAX); + assert_eq!(u64_to_usize(usize_to_u64(usize::MAX)), usize::MAX); + } + + #[test] + fn max_signed() { + assert_eq!(i32_to_isize(i32::MAX), isize::try_from(i32::MAX).unwrap()); + assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i64::from(i32::MAX)); + + assert_eq!(i64_to_isize(i64::MAX), isize::MAX); + assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX); + + assert_eq!(isize_to_i64(isize::MAX), i64::MAX); + assert_eq!(i64_to_isize(isize_to_i64(isize::MAX)), isize::MAX); + } +} diff --git a/helper/src/fs.rs b/helper/src/fs.rs index 1efb20cb..5d62a644 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -4,7 +4,7 @@ //! Note that this module's functions uses [`dirs`], //! which adheres to the XDG standard on Linux. //! -//! This means that the values returned by these functions +//! This means that the values returned by these statics //! may change at runtime depending on environment variables, //! for example: //! @@ -17,7 +17,7 @@ //! # if cfg!(target_os = "linux") { //! std::env::set_var("XDG_CONFIG_HOME", "/custom/path"); //! assert_eq!( -//! cuprate_config_dir().to_string_lossy(), +//! CUPRATE_CONFIG_DIR.to_string_lossy(), //! "/custom/path/cuprate" //! ); //! # } @@ -28,10 +28,7 @@ //! - //---------------------------------------------------------------------------------------------------- Use -use std::{ - path::{Path, PathBuf}, - sync::OnceLock, -}; +use std::{path::PathBuf, sync::LazyLock}; //---------------------------------------------------------------------------------------------------- Const /// Cuprate's main directory. @@ -62,71 +59,59 @@ pub const CUPRATE_DIR: &str = { }; //---------------------------------------------------------------------------------------------------- Directories -/// Create a (private) `OnceLock` and accessor function for common PATHs used by Cuprate. +/// Create a `LazyLock` for common PATHs used by Cuprate. /// /// This currently creates these directories: -/// - [`cuprate_cache_dir()`] -/// - [`cuprate_config_dir()`] -/// - [`cuprate_data_dir()`] -/// - [`cuprate_blockchain_dir()`] -/// -/// FIXME: Use `LazyLock` when stabilized. -/// . -/// . -macro_rules! impl_path_oncelock_and_fn { +/// - [`CUPRATE_CACHE_DIR`] +/// - [`CUPRATE_CONFIG_DIR`] +/// - [`CUPRATE_DATA_DIR`] +/// - [`CUPRATE_BLOCKCHAIN_DIR`] +macro_rules! impl_path_lazylock { ($( $(#[$attr:meta])* // Documentation and any `derive`'s. - $fn:ident, // Name of the corresponding access function. + $name:ident, // Name of the corresponding `LazyLock`. $dirs_fn:ident, // Name of the `dirs` function to use, the PATH prefix. $sub_dirs:literal // Any sub-directories to add onto the PATH. ),* $(,)?) => {$( - // Create the `OnceLock` if needed, append + // Create the `LazyLock` if needed, append // the Cuprate directory string and return. $(#[$attr])* - pub fn $fn() -> &'static Path { - /// Local `OnceLock` containing the Path. - static ONCE_LOCK: OnceLock = OnceLock::new(); + pub static $name: LazyLock = LazyLock::new(|| { + // There's nothing we can do but panic if + // we cannot acquire critical system directories. + // + // Although, this realistically won't panic on + // normal systems for all OS's supported by `dirs`. + let mut path = dirs::$dirs_fn().unwrap(); - ONCE_LOCK.get_or_init(|| { - // There's nothing we can do but panic if - // we cannot acquire critical system directories. - // - // Although, this realistically won't panic on - // normal systems for all OS's supported by `dirs`. - let mut path = dirs::$dirs_fn().unwrap(); + // FIXME: + // Consider a user who does `HOME=/ ./cuprated` + // + // Should we say "that's stupid" and panic here? + // Or should it be respected? + // We really don't want a `rm -rf /` type of situation... + assert!( + path.parent().is_some(), + "SAFETY: returned OS PATH was either root or empty, aborting" + ); - // FIXME: - // Consider a user who does `HOME=/ ./cuprated` - // - // Should we say "that's stupid" and panic here? - // Or should it be respected? - // We really don't want a `rm -rf /` type of situation... - assert!( - path.parent().is_some(), - "SAFETY: returned OS PATH was either root or empty, aborting" - ); + // Returned OS PATH should be absolute, not relative. + assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute"); - // Returned OS PATH should be absolute, not relative. - assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute"); + // Unconditionally prefix with the top-level Cuprate directory. + path.push(CUPRATE_DIR); - // Unconditionally prefix with the top-level Cuprate directory. - path.push(CUPRATE_DIR); + // Add any sub directories if specified in the macro. + if !$sub_dirs.is_empty() { + path.push($sub_dirs); + } - // Add any sub directories if specified in the macro. - if !$sub_dirs.is_empty() { - path.push($sub_dirs); - } - - path - }) - } + path + }); )*}; } -// Note that the `OnceLock`'s are prefixed with `__` to indicate: -// 1. They're not really to be used directly -// 2. To avoid name conflicts -impl_path_oncelock_and_fn! { +impl_path_lazylock! { /// Cuprate's cache directory. /// /// This is the PATH used for any Cuprate cache files. @@ -136,7 +121,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Local\Cuprate\` | /// | macOS | `/Users/Alice/Library/Caches/Cuprate/` | /// | Linux | `/home/alice/.cache/cuprate/` | - cuprate_cache_dir, + CUPRATE_CACHE_DIR, cache_dir, "", @@ -149,7 +134,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` | /// | Linux | `/home/alice/.config/cuprate/` | - cuprate_config_dir, + CUPRATE_CONFIG_DIR, config_dir, "", @@ -162,7 +147,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` | /// | Linux | `/home/alice/.local/share/cuprate/` | - cuprate_data_dir, + CUPRATE_DATA_DIR, data_dir, "", @@ -175,9 +160,22 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | /// | Linux | `/home/alice/.local/share/cuprate/blockchain/` | - cuprate_blockchain_dir, + CUPRATE_BLOCKCHAIN_DIR, data_dir, "blockchain", + + /// Cuprate's transaction pool directory. + /// + /// This is the PATH used for any Cuprate txpool files. + /// + /// | OS | PATH | + /// |---------|------------------------------------------------------------| + /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | + /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | + /// | Linux | `/home/alice/.local/share/cuprate/txpool/` | + CUPRATE_TXPOOL_DIR, + data_dir, + "txpool", } //---------------------------------------------------------------------------------------------------- Tests @@ -192,60 +190,41 @@ mod test { // - It must `ends_with()` the expected end PATH for the OS #[test] fn path_sanity_check() { - assert!(cuprate_cache_dir().is_absolute()); - assert!(cuprate_config_dir().is_absolute()); - assert!(cuprate_data_dir().is_absolute()); - assert!(cuprate_blockchain_dir().is_absolute()); + // Array of (PATH, expected_path_as_string). + // + // The different OS's will set the expected path below. + let mut array = [ + (&*CUPRATE_CACHE_DIR, ""), + (&*CUPRATE_CONFIG_DIR, ""), + (&*CUPRATE_DATA_DIR, ""), + (&*CUPRATE_BLOCKCHAIN_DIR, ""), + (&*CUPRATE_TXPOOL_DIR, ""), + ]; if cfg!(target_os = "windows") { - let dir = cuprate_cache_dir(); - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Local\Cuprate")); - - let dir = cuprate_config_dir(); - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - - let dir = cuprate_data_dir(); - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - - let dir = cuprate_blockchain_dir(); - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain")); + array[0].1 = r"AppData\Local\Cuprate"; + array[1].1 = r"AppData\Roaming\Cuprate"; + array[2].1 = r"AppData\Roaming\Cuprate"; + array[3].1 = r"AppData\Roaming\Cuprate\blockchain"; + array[4].1 = r"AppData\Roaming\Cuprate\txpool"; } else if cfg!(target_os = "macos") { - let dir = cuprate_cache_dir(); - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with("Library/Caches/Cuprate")); - - let dir = cuprate_config_dir(); - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate")); - - let dir = cuprate_data_dir(); - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate")); - - let dir = cuprate_blockchain_dir(); - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain")); + array[0].1 = "Library/Caches/Cuprate"; + array[1].1 = "Library/Application Support/Cuprate"; + array[2].1 = "Library/Application Support/Cuprate"; + array[3].1 = "Library/Application Support/Cuprate/blockchain"; + array[4].1 = "Library/Application Support/Cuprate/txpool"; } else { // Assumes Linux. - let dir = cuprate_cache_dir(); - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with(".cache/cuprate")); + array[0].1 = ".cache/cuprate"; + array[1].1 = ".config/cuprate"; + array[2].1 = ".local/share/cuprate"; + array[3].1 = ".local/share/cuprate/blockchain"; + array[4].1 = ".local/share/cuprate/txpool"; + }; - let dir = cuprate_config_dir(); - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with(".config/cuprate")); - - let dir = cuprate_data_dir(); - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with(".local/share/cuprate")); - - let dir = cuprate_blockchain_dir(); - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with(".local/share/cuprate/blockchain")); + for (path, expected) in array { + assert!(path.is_absolute()); + assert!(path.ends_with(expected)); } } } diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 90f420d6..de0d9555 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -1,36 +1,4 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)] -#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)] -#![forbid( - unused_unsafe, - future_incompatible, - break_with_label_and_loop, - coherence_leak_check, - duplicate_macro_attributes, - exported_private_dependencies, - for_loops_over_fallibles, - large_assignments, - overlapping_range_endpoints, - // private_in_public, - semicolon_in_expressions_from_macros, - redundant_semicolons, - unconditional_recursion, - unreachable_patterns, - unused_allocation, - unused_braces, - unused_comparisons, - unused_doc_comments, - unused_parens, - unused_labels, - while_true, - keyword_idents, - non_ascii_idents, - noop_method_call, - unreachable_pub, - single_use_lifetimes, - // variant_size_differences, -)] #![cfg_attr(not(feature = "std"), no_std)] //---------------------------------------------------------------------------------------------------- Public API @@ -40,6 +8,9 @@ pub mod asynch; // async collides #[cfg(feature = "atomic")] pub mod atomic; +#[cfg(feature = "cast")] +pub mod cast; + #[cfg(feature = "constants")] pub mod constants; diff --git a/helper/src/map.rs b/helper/src/map.rs index 96d9f615..7805ea66 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -7,6 +7,8 @@ //---------------------------------------------------------------------------------------------------- Use use monero_serai::transaction::Timelock; +use crate::cast::{u64_to_usize, usize_to_u64}; + //---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128` /// Split a [`u128`] value into 2 64-bit values. /// @@ -27,6 +29,7 @@ use monero_serai::transaction::Timelock; /// ``` #[inline] pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) { + #[allow(clippy::cast_possible_truncation)] (value as u64, (value >> 64) as u64) } @@ -58,7 +61,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// Map a [`u64`] to a [`Timelock`]. /// /// Height/time is not differentiated via type, but rather: -/// "height is any value less than 500_000_000 and timestamp is any value above" +/// "height is any value less than `500_000_000` and timestamp is any value above" /// so the `u64/usize` is stored without any tag. /// /// See [`timelock_to_u64`] for the inverse function. @@ -73,11 +76,11 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999)); /// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000)); /// ``` -pub fn u64_to_timelock(u: u64) -> Timelock { +pub const fn u64_to_timelock(u: u64) -> Timelock { if u == 0 { Timelock::None } else if u < 500_000_000 { - Timelock::Block(usize::try_from(u).unwrap()) + Timelock::Block(u64_to_usize(u)) } else { Timelock::Time(u) } @@ -94,10 +97,10 @@ pub fn u64_to_timelock(u: u64) -> Timelock { /// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999); /// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000); /// ``` -pub fn timelock_to_u64(timelock: Timelock) -> u64 { +pub const fn timelock_to_u64(timelock: Timelock) -> u64 { match timelock { Timelock::None => 0, - Timelock::Block(u) => u64::try_from(u).unwrap(), + Timelock::Block(u) => usize_to_u64(u), Timelock::Time(u) => u, } } diff --git a/helper/src/network.rs b/helper/src/network.rs index 684e71a4..f3224b33 100644 --- a/helper/src/network.rs +++ b/helper/src/network.rs @@ -30,11 +30,11 @@ pub enum Network { impl Network { /// Returns the network ID for the current network. - pub fn network_id(&self) -> [u8; 16] { + pub const fn network_id(&self) -> [u8; 16] { match self { - Network::Mainnet => MAINNET_NETWORK_ID, - Network::Testnet => TESTNET_NETWORK_ID, - Network::Stagenet => STAGENET_NETWORK_ID, + Self::Mainnet => MAINNET_NETWORK_ID, + Self::Testnet => TESTNET_NETWORK_ID, + Self::Stagenet => STAGENET_NETWORK_ID, } } } diff --git a/helper/src/num.rs b/helper/src/num.rs index f90357e9..674ed354 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -89,8 +89,9 @@ where /// assert_eq!(median(vec), 5); /// ``` /// -/// # Safety +/// # Invariant /// If not sorted the output will be invalid. +#[allow(clippy::debug_assert_with_mut_call)] pub fn median(array: impl AsRef<[T]>) -> T where T: Add diff --git a/helper/src/thread.rs b/helper/src/thread.rs index 96958ff6..04a26069 100644 --- a/helper/src/thread.rs +++ b/helper/src/thread.rs @@ -28,10 +28,10 @@ macro_rules! impl_thread_percent { $( $(#[$doc])* pub fn $fn_name() -> NonZeroUsize { - // SAFETY: // unwrap here is okay because: // - THREADS().get() is always non-zero // - max() guards against 0 + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)] NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap() } )* @@ -58,10 +58,10 @@ impl_thread_percent! { /// Originally from . /// /// # Windows -/// Uses SetThreadPriority() with THREAD_PRIORITY_IDLE (-15). +/// Uses `SetThreadPriority()` with `THREAD_PRIORITY_IDLE` (-15). /// /// # Unix -/// Uses libc::nice() with the max nice level. +/// Uses `libc::nice()` with the max nice level. /// /// On macOS and *BSD: +20 /// On Linux: +19 @@ -74,7 +74,7 @@ pub fn low_priority_thread() { // SAFETY: calling C. // We are _lowering_ our priority, not increasing, so this function should never fail. unsafe { - let _ = SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE); + drop(SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE)); } } @@ -87,7 +87,7 @@ pub fn low_priority_thread() { // SAFETY: calling C. // We are _lowering_ our priority, not increasing, so this function should never fail. unsafe { - let _ = libc::nice(NICE_MAX); + libc::nice(NICE_MAX); } } } diff --git a/helper/src/time.rs b/helper/src/time.rs index 28aff7f5..ce39c2dc 100644 --- a/helper/src/time.rs +++ b/helper/src/time.rs @@ -129,6 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) { debug_assert!(m < 60); debug_assert!(s < 60); + #[allow(clippy::cast_possible_truncation)] // checked above (h as u8, m, s) } @@ -153,6 +154,7 @@ pub fn time() -> u32 { /// /// This is guaranteed to return a value between `0..=86399` pub fn time_utc() -> u32 { + #[allow(clippy::cast_sign_loss)] // checked in function calls unix_clock(chrono::offset::Local::now().timestamp() as u64) } diff --git a/misc/ENVIRONMENT-ADVICE.md b/misc/ENVIRONMENT-ADVICE.md index 295fabdb..b3358b9c 100644 --- a/misc/ENVIRONMENT-ADVICE.md +++ b/misc/ENVIRONMENT-ADVICE.md @@ -87,4 +87,4 @@ On Rust-analyzer's VSCode plugin, you can add the following configuration if you If you still deal with lags on VSCode or Neovim, you could try the following IDE: - RustRover: It have been reported to have excellent performance at managing huge workspace. It use its own fine-tuned plugins by jetbrains. -- Zed: Rust-written IDE focused on performance. Still in beta and macOS only. \ No newline at end of file +- Zed: Rust-written IDE focused on performance. Stable on MacOS and Linux (requires Vulkan driver, therefore unable in virtual machines). diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 7feac004..85ee2c93 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -15,6 +15,7 @@ default = ["std"] std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] [dependencies] +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } paste = "1.0.14" diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index 5b64315e..fa3449b4 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -65,6 +65,8 @@ use core::{ops::Deref, str::from_utf8 as str_from_utf8}; use bytes::{Buf, BufMut, Bytes, BytesMut}; +use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; + pub mod container_as_blob; pub mod error; mod io; @@ -242,7 +244,7 @@ pub fn write_bytes, B: BufMut>(t: T, w: &mut B) -> Result<()> { let bytes = t.as_ref(); let len = bytes.len(); - write_varint(len.try_into()?, w)?; + write_varint(usize_to_u64(len), w)?; if w.remaining_mut() < len { return Err(Error::IO("Not enough capacity to write bytes")); @@ -286,7 +288,7 @@ where I: Iterator + ExactSizeIterator, B: BufMut, { - write_varint(iterator.len().try_into()?, w)?; + write_varint(usize_to_u64(iterator.len()), w)?; for item in iterator.into_iter() { item.write(w)?; } @@ -334,7 +336,7 @@ fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { if let Some(size) = marker.inner_marker.size() { let bytes_to_skip = size - .checked_mul(len.try_into()?) + .checked_mul(u64_to_usize(len)) .ok_or(Error::Value("List is too big".to_string()))?; return advance(bytes_to_skip, r); }; @@ -352,8 +354,8 @@ fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { | InnerMarker::U8 | InnerMarker::Bool => unreachable!("These types are constant size."), InnerMarker::String => { - let len = read_varint(r)?; - advance(len.try_into()?, r)?; + let len = u64_to_usize(read_varint(r)?); + advance(len, r)?; } InnerMarker::Object => { *skipped_objects += 1; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 094f0ef1..000d89c7 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -7,6 +7,7 @@ use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; +use cuprate_helper::cast::u64_to_usize; use crate::{ io::{checked_read_primitive, checked_write_primitive}, @@ -66,11 +67,11 @@ impl EpeeValue for Vec { "Marker is not sequence when a sequence was expected", )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(T::read(r, &individual_marker)?); } @@ -167,11 +168,13 @@ impl EpeeValue for Vec { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - let mut res = vec![0; len.try_into()?]; + let mut res = vec![0; len]; r.copy_to_slice(&mut res); Ok(res) @@ -203,11 +206,13 @@ impl EpeeValue for Bytes { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - Ok(r.copy_to_bytes(len.try_into()?)) + Ok(r.copy_to_bytes(len)) } fn epee_default_value() -> Option { @@ -236,11 +241,13 @@ impl EpeeValue for BytesMut { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - let mut bytes = BytesMut::zeroed(len.try_into()?); + let mut bytes = BytesMut::zeroed(len); r.copy_to_slice(&mut bytes); Ok(bytes) @@ -272,11 +279,13 @@ impl EpeeValue for ByteArrayVec { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < usize::try_from(len)? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArrayVec::try_from(r.copy_to_bytes(usize::try_from(len)?)) + ByteArrayVec::try_from(r.copy_to_bytes(len)) .map_err(|_| Error::Format("Field has invalid length")) } @@ -302,7 +311,7 @@ impl EpeeValue for ByteArray { return Err(Error::Format("Marker does not match expected Marker")); } - let len: usize = read_varint(r)?.try_into()?; + let len = u64_to_usize(read_varint(r)?); if len != N { return Err(Error::Format("Byte array has incorrect length")); } @@ -370,11 +379,11 @@ impl EpeeValue for Vec<[u8; N]> { )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(<[u8; N]>::read(r, &individual_marker)?); } @@ -406,11 +415,11 @@ macro_rules! epee_seq { )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker.clone()); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(<$val>::read(r, &individual_marker)?); } diff --git a/net/epee-encoding/tests/duplicate_key.rs b/net/epee-encoding/tests/duplicate_key.rs index c1b3148f..0ed87aff 100644 --- a/net/epee-encoding/tests/duplicate_key.rs +++ b/net/epee-encoding/tests/duplicate_key.rs @@ -9,12 +9,12 @@ epee_object!( a: u8, ); -struct TT { +struct T2 { a: u8, } epee_object!( - TT, + T2, a: u8 = 0, ); @@ -35,5 +35,5 @@ fn duplicate_key_with_default() { b'a', 0x0B, 0x00, ]; - assert!(from_bytes::(&mut &data[..]).is_err()); + assert!(from_bytes::(&mut &data[..]).is_err()); } diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index 13deabea..1c585b9c 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -12,6 +12,8 @@ default = [] tracing = ["dep:tracing", "tokio-util/tracing"] [dependencies] +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } + thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } bitflags = { workspace = true } diff --git a/net/levin/src/codec.rs b/net/levin/src/codec.rs index 3718d8c3..1177733f 100644 --- a/net/levin/src/codec.rs +++ b/net/levin/src/codec.rs @@ -20,6 +20,8 @@ use std::{fmt::Debug, marker::PhantomData}; use bytes::{Buf, BufMut, BytesMut}; use tokio_util::codec::{Decoder, Encoder}; +use cuprate_helper::cast::u64_to_usize; + use crate::{ header::{Flags, HEADER_SIZE}, message::{make_dummy_message, LevinMessage}, @@ -114,10 +116,7 @@ impl Decoder for LevinBucketCodec { std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head)); } LevinBucketState::WaitingForBody(head) => { - let body_len = head - .size - .try_into() - .map_err(|_| BucketError::BucketExceededMaxSize)?; + let body_len = u64_to_usize(head.size); if src.len() < body_len { src.reserve(body_len - src.len()); return Ok(None); @@ -255,13 +254,11 @@ impl Decoder for LevinMessageCodec { continue; }; - let max_size = if self.bucket_codec.handshake_message_seen { + let max_size = u64_to_usize(if self.bucket_codec.handshake_message_seen { self.bucket_codec.protocol.max_packet_size } else { self.bucket_codec.protocol.max_packet_size_before_handshake - } - .try_into() - .expect("Levin max message size is too large, does not fit into a usize."); + }); if bytes.len().saturating_add(bucket.body.len()) > max_size { return Err(BucketError::InvalidFragmentedMessage( @@ -300,12 +297,7 @@ impl Decoder for LevinMessageCodec { } // Check the fragmented message contains enough bytes to build the message. - if bytes.len().saturating_sub(HEADER_SIZE) - < header - .size - .try_into() - .map_err(|_| BucketError::BucketExceededMaxSize)? - { + if bytes.len().saturating_sub(HEADER_SIZE) < u64_to_usize(header.size) { return Err(BucketError::InvalidFragmentedMessage( "Fragmented message does not have enough bytes to fill bucket body", )); diff --git a/net/levin/src/lib.rs b/net/levin/src/lib.rs index 0a247f72..ab03bfb4 100644 --- a/net/levin/src/lib.rs +++ b/net/levin/src/lib.rs @@ -38,6 +38,8 @@ use std::fmt::Debug; use bytes::{Buf, Bytes}; use thiserror::Error; +use cuprate_helper::cast::usize_to_u64; + pub mod codec; pub mod header; pub mod message; @@ -212,7 +214,7 @@ impl BucketBuilder { Bucket { header: BucketHead { signature: self.signature.unwrap(), - size: body.len().try_into().unwrap(), + size: usize_to_u64(body.len()), have_to_return_data: ty.have_to_return_data(), command: self.command.unwrap(), return_code: self.return_code.unwrap(), diff --git a/net/levin/src/message.rs b/net/levin/src/message.rs index af8227d7..19aa1b50 100644 --- a/net/levin/src/message.rs +++ b/net/levin/src/message.rs @@ -5,6 +5,8 @@ //! for more control over what is actually sent over the wire at certain times. use bytes::{Bytes, BytesMut}; +use cuprate_helper::cast::usize_to_u64; + use crate::{ header::{Flags, HEADER_SIZE}, Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol, @@ -106,9 +108,7 @@ pub fn make_fragmented_messages( new_body.resize(fragment_size - HEADER_SIZE, 0); bucket.body = new_body.freeze(); - bucket.header.size = (fragment_size - HEADER_SIZE) - .try_into() - .expect("Bucket size does not fit into u64"); + bucket.header.size = usize_to_u64(fragment_size - HEADER_SIZE); } return Ok(vec![bucket]); @@ -118,9 +118,7 @@ pub fn make_fragmented_messages( // The first fragment will set the START flag, the last will set the END flag. let fragment_head = BucketHead { signature: protocol.signature, - size: (fragment_size - HEADER_SIZE) - .try_into() - .expect("Bucket size does not fit into u64"), + size: usize_to_u64(fragment_size - HEADER_SIZE), have_to_return_data: false, // Just use a default command. command: T::Command::from(0), @@ -191,7 +189,7 @@ pub(crate) fn make_dummy_message(protocol: &Protocol, size: usi // A header to put on the dummy message. let header = BucketHead { signature: protocol.signature, - size: size.try_into().expect("Bucket size does not fit into u64"), + size: usize_to_u64(size), have_to_return_data: false, // Just use a default command. command: T::from(0), diff --git a/net/levin/tests/fragmented_message.rs b/net/levin/tests/fragmented_message.rs index 7799a719..512fd461 100644 --- a/net/levin/tests/fragmented_message.rs +++ b/net/levin/tests/fragmented_message.rs @@ -8,6 +8,8 @@ use tokio::{ }; use tokio_util::codec::{FramedRead, FramedWrite}; +use cuprate_helper::cast::u64_to_usize; + use cuprate_levin::{ message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand, LevinMessageCodec, MessageType, Protocol, @@ -54,7 +56,7 @@ impl LevinBody for TestBody { _: MessageType, _: Self::Command, ) -> Result { - let size = body.get_u64_le().try_into().unwrap(); + let size = u64_to_usize(body.get_u64_le()); // bucket Ok(TestBody::Bytes(size, body.copy_to_bytes(size))) } diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index 101daa39..cbeb5511 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -15,6 +15,7 @@ cuprate-levin = { path = "../levin" } cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-fixed-bytes = { path = "../fixed-bytes" } cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } diff --git a/net/wire/src/p2p.rs b/net/wire/src/p2p.rs index 97431099..3829d172 100644 --- a/net/wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -99,7 +99,7 @@ impl LevinCommandTrait for LevinCommand { LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB - LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX), + LevinCommand::Unknown(_) => u64::MAX, } } diff --git a/p2p/dandelion-tower/src/lib.rs b/p2p/dandelion-tower/src/lib.rs index aa622f30..60b5ea5d 100644 --- a/p2p/dandelion-tower/src/lib.rs +++ b/p2p/dandelion-tower/src/lib.rs @@ -2,17 +2,17 @@ //! //! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`]. //! -//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool). +//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPoolManager`](pool::DandelionPoolManager). //! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the //! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool. //! -//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard -//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool) +//! This split was done not because the [`DandelionPoolManager`](pool::DandelionPoolManager) is unnecessary but because it is hard +//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPoolManager`](pool::DandelionPoolManager) //! requires you to implement part of the paper yourself. //! //! # Features //! -//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool). +//! This crate only has one feature `txpool` which enables [`DandelionPoolManager`](pool::DandelionPoolManager). //! //! # Needed Services //! @@ -45,7 +45,7 @@ //! //! ## Backing Pool //! -//! ([`DandelionPool`](pool::DandelionPool) only) +//! ([`DandelionPoolManager`](pool::DandelionPoolManager) only) //! //! This service is a backing tx-pool, in memory or on disk. //! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of diff --git a/p2p/dandelion-tower/src/pool.rs b/p2p/dandelion-tower/src/pool.rs deleted file mode 100644 index 5f4f7346..00000000 --- a/p2p/dandelion-tower/src/pool.rs +++ /dev/null @@ -1,509 +0,0 @@ -//! # Dandelion++ Pool -//! -//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store, -//! which fully implements the dandelion++ protocol. -//! -//! ### How To Get Txs From [`DandelionPool`]. -//! -//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or -//! check what transactions are in it, to do this you must keep a handle to the backing transaction store -//! yourself. -//! -//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing -//! pool, so it makes sense to remove the "middle man". -//! -//! ### Keep Stem Transactions Hidden -//! -//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden. -//! So handle any requests to the tx-pool like the stem side of the pool does not exist. -use std::{ - collections::{HashMap, HashSet}, - future::Future, - hash::Hash, - marker::PhantomData, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; - -use futures::{FutureExt, StreamExt}; -use rand::prelude::*; -use rand_distr::Exp; -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinSet, -}; -use tokio_util::{sync::PollSender, time::DelayQueue}; -use tower::{Service, ServiceExt}; -use tracing::Instrument; - -use crate::{ - traits::{TxStoreRequest, TxStoreResponse}, - DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState, -}; - -/// Start the [`DandelionPool`]. -/// -/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send -/// requests to the pool. -/// -/// ### Args -/// -/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`]. -/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow -/// user to customise routing functionality. -/// - `backing_pool` is the backing transaction storage service -/// - `config` is [`DandelionConfig`]. -pub fn start_dandelion_pool( - buffer_size: usize, - dandelion_router: R, - backing_pool: P, - config: DandelionConfig, -) -> DandelionPoolService -where - Tx: Clone + Send + 'static, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, - P: Service< - TxStoreRequest, - Response = TxStoreResponse, - Error = tower::BoxError, - > + Send - + 'static, - P::Future: Send + 'static, - R: Service, Response = State, Error = DandelionRouterError> - + Send - + 'static, - R::Future: Send + 'static, -{ - let (tx, rx) = mpsc::channel(buffer_size); - - let pool = DandelionPool { - dandelion_router, - backing_pool, - routing_set: JoinSet::new(), - stem_origins: HashMap::new(), - embargo_timers: DelayQueue::new(), - embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(), - config, - _tx: PhantomData, - }; - - let span = tracing::debug_span!("dandelion_pool"); - - tokio::spawn(pool.run(rx).instrument(span)); - - DandelionPoolService { - tx: PollSender::new(tx), - } -} - -#[derive(Copy, Clone, Debug, thiserror::Error)] -#[error("The dandelion pool was shutdown")] -pub struct DandelionPoolShutDown; - -/// An incoming transaction for the [`DandelionPool`] to handle. -/// -/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen -/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages, -/// and request txs even if they are in the stem pool. -pub struct IncomingTx { - /// The transaction. - /// - /// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing - /// tx pool and [`DandelionRouter`](crate::DandelionRouter) - pub tx: Tx, - /// The transaction ID. - pub tx_id: TxID, - /// The routing state of this transaction. - pub tx_state: TxState, -} - -/// The dandelion tx pool service. -#[derive(Clone)] -pub struct DandelionPoolService { - /// The channel to [`DandelionPool`]. - tx: PollSender<(IncomingTx, oneshot::Sender<()>)>, -} - -impl Service> for DandelionPoolService -where - Tx: Clone + Send, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, -{ - type Response = (); - type Error = DandelionPoolShutDown; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown) - } - - fn call(&mut self, req: IncomingTx) -> Self::Future { - // although the channel isn't sending anything we want to wait for the request to be handled before continuing. - let (tx, rx) = oneshot::channel(); - - let res = self - .tx - .send_item((req, tx)) - .map_err(|_| DandelionPoolShutDown); - - async move { - res?; - rx.await.expect("Oneshot dropped before response!"); - - Ok(()) - } - .boxed() - } -} - -/// The dandelion++ tx pool. -/// -/// See the [module docs](self) for more. -pub struct DandelionPool { - /// The dandelion++ router - dandelion_router: R, - /// The backing tx storage. - backing_pool: P, - /// The set of tasks that are running the future returned from `dandelion_router`. - routing_set: JoinSet<(TxID, Result>)>, - - /// The origin of stem transactions. - stem_origins: HashMap>, - - /// Current stem pool embargo timers. - embargo_timers: DelayQueue, - /// The distrobution to sample to get embargo timers. - embargo_dist: Exp, - - /// The d++ config. - config: DandelionConfig, - - _tx: PhantomData, -} - -impl DandelionPool -where - Tx: Clone + Send, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, - P: Service< - TxStoreRequest, - Response = TxStoreResponse, - Error = tower::BoxError, - >, - P::Future: Send + 'static, - R: Service, Response = State, Error = DandelionRouterError>, - R::Future: Send + 'static, -{ - /// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx. - async fn store_tx_and_stem( - &mut self, - tx: Tx, - tx_id: TxID, - from: Option, - ) -> Result<(), tower::BoxError> { - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Store( - tx.clone(), - tx_id.clone(), - State::Stem, - )) - .await?; - - let embargo_timer = self.embargo_dist.sample(&mut thread_rng()); - tracing::debug!( - "Setting embargo timer for stem tx: {} seconds.", - embargo_timer - ); - self.embargo_timers - .insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer)); - - self.stem_tx(tx, tx_id, from).await - } - - /// Stems the tx, setting the stem origin, if it wasn't already set. - /// - /// This function does not add the tx to the backing pool. - async fn stem_tx( - &mut self, - tx: Tx, - tx_id: TxID, - from: Option, - ) -> Result<(), tower::BoxError> { - if let Some(peer) = &from { - self.stem_origins - .entry(tx_id.clone()) - .or_default() - .insert(peer.clone()); - } - - let state = from - .map(|from| TxState::Stem { from }) - .unwrap_or(TxState::Local); - - let fut = self - .dandelion_router - .ready() - .await? - .call(DandelionRouteReq { - tx, - state: state.clone(), - }); - - self.routing_set - .spawn(fut.map(|res| (tx_id, res.map_err(|_| state)))); - Ok(()) - } - - /// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx. - async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> { - // fluffs the tx first to prevent timing attacks where we could fluff at different average times - // depending on if the tx was in the stem pool already or not. - // Massively overkill but this is a minimal change. - self.fluff_tx(tx.clone(), tx_id.clone()).await?; - - // Remove the tx from the maps used during the stem phase. - self.stem_origins.remove(&tx_id); - - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Store(tx, tx_id, State::Fluff)) - .await?; - - // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the - // map. These timers should be relatively short, so it shouldn't be a problem. - //self.embargo_timers.try_remove(&tx_id); - - Ok(()) - } - - /// Fluffs a tx, does not add the tx to the tx pool. - async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> { - let fut = self - .dandelion_router - .ready() - .await? - .call(DandelionRouteReq { - tx, - state: TxState::Fluff, - }); - - self.routing_set - .spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff)))); - Ok(()) - } - - /// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`]. - async fn handle_incoming_tx( - &mut self, - tx: Tx, - tx_state: TxState, - tx_id: TxID, - ) -> Result<(), tower::BoxError> { - let TxStoreResponse::Contains(have_tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Contains(tx_id.clone())) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - // If we have already fluffed this tx then we don't need to do anything. - if have_tx == Some(State::Fluff) { - tracing::debug!("Already fluffed incoming tx, ignoring."); - return Ok(()); - } - - match tx_state { - TxState::Stem { from } => { - if self - .stem_origins - .get(&tx_id) - .is_some_and(|peers| peers.contains(&from)) - { - tracing::debug!("Received stem tx twice from same peer, fluffing it"); - // The same peer sent us a tx twice, fluff it. - self.promote_and_fluff_tx(tx_id).await - } else { - // This could be a new tx or it could have already been stemed, but we still stem it again - // unless the same peer sends us a tx twice. - tracing::debug!("Steming incoming tx"); - self.store_tx_and_stem(tx, tx_id, Some(from)).await - } - } - TxState::Fluff => { - tracing::debug!("Fluffing incoming tx"); - self.store_and_fluff_tx(tx, tx_id).await - } - TxState::Local => { - // If we have already stemed this tx then nothing to do. - if have_tx.is_some() { - tracing::debug!("Received a local tx that we already have, skipping"); - return Ok(()); - } - tracing::debug!("Steming local transaction"); - self.store_tx_and_stem(tx, tx_id, None).await - } - } - } - - /// Promotes a tx to the clear pool. - async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> { - // Remove the tx from the maps used during the stem phase. - self.stem_origins.remove(&tx_id); - - // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the - // map. These timers should be relatively short, so it shouldn't be a problem. - //self.embargo_timers.try_remove(&tx_id); - - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Promote(tx_id)) - .await?; - - Ok(()) - } - - /// Promotes a tx to the public fluff pool and fluffs the tx. - async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> { - tracing::debug!("Promoting transaction to public pool and fluffing it."); - - let TxStoreResponse::Transaction(tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Get(tx_id.clone())) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - - let Some((tx, state)) = tx else { - tracing::debug!("Could not find tx, skipping."); - return Ok(()); - }; - - if state == State::Fluff { - tracing::debug!("Transaction already fluffed, skipping."); - return Ok(()); - } - - self.promote_tx(tx_id.clone()).await?; - self.fluff_tx(tx, tx_id).await - } - - /// Returns a tx stored in the fluff _OR_ stem pool. - async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result, tower::BoxError> { - let TxStoreResponse::Transaction(tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Get(tx_id)) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - - Ok(tx.map(|tx| tx.0)) - } - - /// Starts the [`DandelionPool`]. - async fn run( - mut self, - mut rx: mpsc::Receiver<(IncomingTx, oneshot::Sender<()>)>, - ) { - tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config); - - // On start up we just fluff all txs left in the stem pool. - let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool) - .oneshot(TxStoreRequest::IDsInStemPool) - .await - else { - tracing::error!("Failed to get transactions in stem pool."); - return; - }; - - tracing::debug!( - "Fluffing {} txs that are currently in the stem pool", - ids.len() - ); - - for id in ids { - if let Err(e) = self.promote_and_fluff_tx(id).await { - tracing::error!("Failed to fluff tx in the stem pool at start up, {e}."); - return; - } - } - - loop { - tracing::trace!("Waiting for next event."); - tokio::select! { - // biased to handle current txs before routing new ones. - biased; - Some(fired) = self.embargo_timers.next() => { - tracing::debug!("Embargo timer fired, did not see stem tx in time."); - - let tx_id = fired.into_inner(); - if let Err(e) = self.promote_and_fluff_tx(tx_id).await { - tracing::error!("Error handling fired embargo timer: {e}"); - return; - } - } - Some(Ok((tx_id, res))) = self.routing_set.join_next() => { - tracing::trace!("Received d++ routing result."); - - let res = match res { - Ok(State::Fluff) => { - tracing::debug!("Transaction was fluffed upgrading it to the public pool."); - self.promote_tx(tx_id).await - } - Err(tx_state) => { - tracing::debug!("Error routing transaction, trying again."); - - match self.get_tx_from_pool(tx_id.clone()).await { - Ok(Some(tx)) => match tx_state { - TxState::Fluff => self.fluff_tx(tx, tx_id).await, - TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await, - TxState::Local => self.stem_tx(tx, tx_id, None).await, - } - Err(e) => Err(e), - _ => continue, - } - } - Ok(State::Stem) => continue, - }; - - if let Err(e) = res { - tracing::error!("Error handling transaction routing return: {e}"); - return; - } - } - req = rx.recv() => { - tracing::debug!("Received new tx to route."); - - let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else { - return; - }; - - if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await { - let _ = res_tx.send(()); - - tracing::error!("Error handling transaction in dandelion pool: {e}"); - return; - } - let _ = res_tx.send(()); - - } - } - } - } -} diff --git a/p2p/dandelion-tower/src/pool/incoming_tx.rs b/p2p/dandelion-tower/src/pool/incoming_tx.rs new file mode 100644 index 00000000..c9a30dee --- /dev/null +++ b/p2p/dandelion-tower/src/pool/incoming_tx.rs @@ -0,0 +1,113 @@ +//! Contains [`IncomingTx`] and [`IncomingTxBuilder`] +use crate::{State, TxState}; + +/// An incoming transaction that has gone through the preprocessing stage. +pub struct IncomingTx { + /// The transaction. + pub(crate) tx: Tx, + /// The transaction ID. + pub(crate) tx_id: TxId, + /// The routing state of the transaction. + pub(crate) routing_state: TxState, +} + +/// An [`IncomingTx`] builder. +/// +/// The const generics here are used to restrict what methods can be called. +/// +/// - `RS`: routing state; a `bool` for if the routing state is set +/// - `DBS`: database state; a `bool` for if the state in the DB is set +pub struct IncomingTxBuilder { + /// The transaction. + tx: Tx, + /// The transaction ID. + tx_id: TxId, + /// The routing state of the transaction. + routing_state: Option>, + /// The state of this transaction in the DB. + state_in_db: Option, +} + +impl IncomingTxBuilder { + /// Creates a new [`IncomingTxBuilder`]. + pub fn new(tx: Tx, tx_id: TxId) -> Self { + Self { + tx, + tx_id, + routing_state: None, + state_in_db: None, + } + } +} + +impl IncomingTxBuilder { + /// Adds the routing state to the builder. + /// + /// The routing state is the origin of this transaction from our perspective. + pub fn with_routing_state( + self, + state: TxState, + ) -> IncomingTxBuilder { + IncomingTxBuilder { + tx: self.tx, + tx_id: self.tx_id, + routing_state: Some(state), + state_in_db: self.state_in_db, + } + } +} + +impl IncomingTxBuilder { + /// Adds the database state to the builder. + /// + /// If the transaction is not in the DB already then the state should be [`None`]. + pub fn with_state_in_db( + self, + state: Option, + ) -> IncomingTxBuilder { + IncomingTxBuilder { + tx: self.tx, + tx_id: self.tx_id, + routing_state: self.routing_state, + state_in_db: state, + } + } +} + +impl IncomingTxBuilder { + /// Builds the [`IncomingTx`]. + /// + /// If this returns [`None`] then the transaction does not need to be given to the dandelion pool + /// manager. + pub fn build(self) -> Option> { + let routing_state = self.routing_state.unwrap(); + + if self.state_in_db == Some(State::Fluff) { + return None; + } + + Some(IncomingTx { + tx: self.tx, + tx_id: self.tx_id, + routing_state, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_builder() { + IncomingTxBuilder::new(1, 2) + .with_routing_state(TxState::Stem { from: 3 }) + .with_state_in_db(None) + .build(); + + IncomingTxBuilder::new(1, 2) + .with_state_in_db(None) + .with_routing_state(TxState::Stem { from: 3 }) + .build(); + } +} diff --git a/p2p/dandelion-tower/src/pool/manager.rs b/p2p/dandelion-tower/src/pool/manager.rs new file mode 100644 index 00000000..9e1572e1 --- /dev/null +++ b/p2p/dandelion-tower/src/pool/manager.rs @@ -0,0 +1,294 @@ +use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + marker::PhantomData, + time::Duration, +}; + +use futures::{FutureExt, StreamExt}; +use rand::prelude::*; +use rand_distr::Exp; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinSet, +}; +use tokio_util::time::DelayQueue; +use tower::{Service, ServiceExt}; + +use crate::{ + pool::IncomingTx, + traits::{TxStoreRequest, TxStoreResponse}, + DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState, +}; + +#[derive(Copy, Clone, Debug, thiserror::Error)] +#[error("The dandelion pool was shutdown")] +pub struct DandelionPoolShutDown; + +/// The dandelion++ pool manager. +/// +/// See the [module docs](super) for more. +pub struct DandelionPoolManager { + /// The dandelion++ router + pub(crate) dandelion_router: R, + /// The backing tx storage. + pub(crate) backing_pool: P, + /// The set of tasks that are running the future returned from `dandelion_router`. + pub(crate) routing_set: JoinSet<(TxId, Result>)>, + + /// The origin of stem transactions. + pub(crate) stem_origins: HashMap>, + + /// Current stem pool embargo timers. + pub(crate) embargo_timers: DelayQueue, + /// The distrobution to sample to get embargo timers. + pub(crate) embargo_dist: Exp, + + /// The d++ config. + pub(crate) config: DandelionConfig, + + pub(crate) _tx: PhantomData, +} + +impl DandelionPoolManager +where + Tx: Clone + Send, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, + P: Service, Response = TxStoreResponse, Error = tower::BoxError>, + P::Future: Send + 'static, + R: Service, Response = State, Error = DandelionRouterError>, + R::Future: Send + 'static, +{ + /// Adds a new embargo timer to the running timers, with a duration pulled from [`Self::embargo_dist`] + fn add_embargo_timer_for_tx(&mut self, tx_id: TxId) { + let embargo_timer = self.embargo_dist.sample(&mut thread_rng()); + tracing::debug!( + "Setting embargo timer for stem tx: {} seconds.", + embargo_timer + ); + + self.embargo_timers + .insert(tx_id, Duration::from_secs_f64(embargo_timer)); + } + + /// Stems the tx, setting the stem origin, if it wasn't already set. + /// + /// This function does not add the tx to the backing pool. + async fn stem_tx( + &mut self, + tx: Tx, + tx_id: TxId, + from: Option, + ) -> Result<(), tower::BoxError> { + if let Some(peer) = &from { + self.stem_origins + .entry(tx_id.clone()) + .or_default() + .insert(peer.clone()); + } + + let state = from + .map(|from| TxState::Stem { from }) + .unwrap_or(TxState::Local); + + let fut = self + .dandelion_router + .ready() + .await? + .call(DandelionRouteReq { + tx, + state: state.clone(), + }); + + self.routing_set + .spawn(fut.map(|res| (tx_id, res.map_err(|_| state)))); + Ok(()) + } + + /// Fluffs a tx, does not add the tx to the tx pool. + async fn fluff_tx(&mut self, tx: Tx, tx_id: TxId) -> Result<(), tower::BoxError> { + let fut = self + .dandelion_router + .ready() + .await? + .call(DandelionRouteReq { + tx, + state: TxState::Fluff, + }); + + self.routing_set + .spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff)))); + Ok(()) + } + + /// Function to handle an [`IncomingTx`]. + async fn handle_incoming_tx( + &mut self, + tx: Tx, + tx_state: TxState, + tx_id: TxId, + ) -> Result<(), tower::BoxError> { + match tx_state { + TxState::Stem { from } => { + if self + .stem_origins + .get(&tx_id) + .is_some_and(|peers| peers.contains(&from)) + { + tracing::debug!("Received stem tx twice from same peer, fluffing it"); + // The same peer sent us a tx twice, fluff it. + self.promote_and_fluff_tx(tx_id).await?; + } else { + // This could be a new tx or it could have already been stemed, but we still stem it again + // unless the same peer sends us a tx twice. + tracing::debug!("Steming incoming tx"); + self.stem_tx(tx, tx_id.clone(), Some(from)).await?; + self.add_embargo_timer_for_tx(tx_id); + } + } + TxState::Fluff => { + tracing::debug!("Fluffing incoming tx"); + self.fluff_tx(tx, tx_id).await?; + } + TxState::Local => { + tracing::debug!("Steming local transaction"); + self.stem_tx(tx, tx_id.clone(), None).await?; + self.add_embargo_timer_for_tx(tx_id); + } + } + + Ok(()) + } + + /// Promotes a tx to the clear pool. + async fn promote_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> { + // Remove the tx from the maps used during the stem phase. + self.stem_origins.remove(&tx_id); + + // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the + // map. These timers should be relatively short, so it shouldn't be a problem. + //self.embargo_timers.try_remove(&tx_id); + + self.backing_pool + .ready() + .await? + .call(TxStoreRequest::Promote(tx_id)) + .await?; + + Ok(()) + } + + /// Promotes a tx to the public fluff pool and fluffs the tx. + async fn promote_and_fluff_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> { + tracing::debug!("Promoting transaction to public pool and fluffing it."); + + let TxStoreResponse::Transaction(tx) = self + .backing_pool + .ready() + .await? + .call(TxStoreRequest::Get(tx_id.clone())) + .await? + else { + panic!("Backing tx pool responded with wrong response for request."); + }; + + let Some((tx, state)) = tx else { + tracing::debug!("Could not find tx, skipping."); + return Ok(()); + }; + + if state == State::Fluff { + tracing::debug!("Transaction already fluffed, skipping."); + return Ok(()); + } + + self.promote_tx(tx_id.clone()).await?; + self.fluff_tx(tx, tx_id).await + } + + /// Returns a tx stored in the fluff _OR_ stem pool. + async fn get_tx_from_pool(&mut self, tx_id: TxId) -> Result, tower::BoxError> { + let TxStoreResponse::Transaction(tx) = self + .backing_pool + .ready() + .await? + .call(TxStoreRequest::Get(tx_id)) + .await? + else { + panic!("Backing tx pool responded with wrong response for request."); + }; + + Ok(tx.map(|tx| tx.0)) + } + + /// Starts the [`DandelionPoolManager`]. + pub(crate) async fn run( + mut self, + mut rx: mpsc::Receiver<(IncomingTx, oneshot::Sender<()>)>, + ) { + tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config); + + loop { + tracing::trace!("Waiting for next event."); + tokio::select! { + // biased to handle current txs before routing new ones. + biased; + Some(fired) = self.embargo_timers.next() => { + tracing::debug!("Embargo timer fired, did not see stem tx in time."); + + let tx_id = fired.into_inner(); + if let Err(e) = self.promote_and_fluff_tx(tx_id).await { + tracing::error!("Error handling fired embargo timer: {e}"); + return; + } + } + Some(Ok((tx_id, res))) = self.routing_set.join_next() => { + tracing::trace!("Received d++ routing result."); + + let res = match res { + Ok(State::Fluff) => { + tracing::debug!("Transaction was fluffed upgrading it to the public pool."); + self.promote_tx(tx_id).await + } + Err(tx_state) => { + tracing::debug!("Error routing transaction, trying again."); + + match self.get_tx_from_pool(tx_id.clone()).await { + Ok(Some(tx)) => match tx_state { + TxState::Fluff => self.fluff_tx(tx, tx_id).await, + TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await, + TxState::Local => self.stem_tx(tx, tx_id, None).await, + } + Err(e) => Err(e), + _ => continue, + } + } + Ok(State::Stem) => continue, + }; + + if let Err(e) = res { + tracing::error!("Error handling transaction routing return: {e}"); + return; + } + } + req = rx.recv() => { + tracing::debug!("Received new tx to route."); + + let Some((IncomingTx { tx, tx_id, routing_state }, res_tx)) = req else { + return; + }; + + if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await { + let _ = res_tx.send(()); + + tracing::error!("Error handling transaction in dandelion pool: {e}"); + return; + } + let _ = res_tx.send(()); + + } + } + } + } +} diff --git a/p2p/dandelion-tower/src/pool/mod.rs b/p2p/dandelion-tower/src/pool/mod.rs new file mode 100644 index 00000000..40a36172 --- /dev/null +++ b/p2p/dandelion-tower/src/pool/mod.rs @@ -0,0 +1,145 @@ +//! # Dandelion++ Pool +//! +//! This module contains [`DandelionPoolManager`] which is a wrapper around a backing transaction store, +//! which fully implements the dandelion++ protocol. +//! +//! The [`DandelionPoolManager`] is a middle man between a [preprocessing stage](#preprocessing-stage) and a dandelion router. +//! It handles promoting transactions in the stem state to the fluff state and setting embargo timers on stem state transactions. +//! +//! ### Preprocessing stage +//! +//! The preprocessing stage (not handled in this crate) before giving the transaction to the [`DandelionPoolManager`] +//! should handle: +//! +//! - verifying the tx. +//! - checking if we have the tx in the pool already and giving that information to the [`IncomingTxBuilder`]. +//! - storing the tx in the pool, if it isn't there already. +//! +//! ### Keep Stem Transactions Hidden +//! +//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden. +//! So handle any requests to the tx-pool like the stem side of the pool does not exist. +use std::{ + collections::HashMap, + hash::Hash, + marker::PhantomData, + task::{Context, Poll}, +}; + +use futures::{future::BoxFuture, FutureExt}; +use rand_distr::Exp; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinSet, +}; +use tokio_util::{sync::PollSender, time::DelayQueue}; +use tower::Service; +use tracing::Instrument; + +use crate::{ + pool::manager::DandelionPoolShutDown, + traits::{TxStoreRequest, TxStoreResponse}, + DandelionConfig, DandelionRouteReq, DandelionRouterError, State, +}; + +mod incoming_tx; +mod manager; + +pub use incoming_tx::{IncomingTx, IncomingTxBuilder}; +pub use manager::DandelionPoolManager; + +/// Start the [`DandelionPoolManager`]. +/// +/// This function spawns the [`DandelionPoolManager`] and returns [`DandelionPoolService`] which can be used to send +/// requests to the pool. +/// +/// ### Args +/// +/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPoolManager`]. +/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow +/// user to customise routing functionality. +/// - `backing_pool` is the backing transaction storage service +/// - `config` is [`DandelionConfig`]. +pub fn start_dandelion_pool_manager( + buffer_size: usize, + dandelion_router: R, + backing_pool: P, + config: DandelionConfig, +) -> DandelionPoolService +where + Tx: Clone + Send + 'static, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, + P: Service, Response = TxStoreResponse, Error = tower::BoxError> + + Send + + 'static, + P::Future: Send + 'static, + R: Service, Response = State, Error = DandelionRouterError> + + Send + + 'static, + R::Future: Send + 'static, +{ + let (tx, rx) = mpsc::channel(buffer_size); + + let pool = DandelionPoolManager { + dandelion_router, + backing_pool, + routing_set: JoinSet::new(), + stem_origins: HashMap::new(), + embargo_timers: DelayQueue::new(), + embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(), + config, + _tx: PhantomData, + }; + + let span = tracing::debug_span!("dandelion_pool"); + + tokio::spawn(pool.run(rx).instrument(span)); + + DandelionPoolService { + tx: PollSender::new(tx), + } +} + +/// The dandelion pool manager service. +/// +/// Used to send [`IncomingTx`]s to the [`DandelionPoolManager`] +#[derive(Clone)] +pub struct DandelionPoolService { + /// The channel to [`DandelionPoolManager`]. + tx: PollSender<(IncomingTx, oneshot::Sender<()>)>, +} + +impl Service> + for DandelionPoolService +where + Tx: Clone + Send, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, +{ + type Response = (); + type Error = DandelionPoolShutDown; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown) + } + + fn call(&mut self, req: IncomingTx) -> Self::Future { + // although the channel isn't sending anything we want to wait for the request to be handled before continuing. + let (tx, rx) = oneshot::channel(); + + let res = self + .tx + .send_item((req, tx)) + .map_err(|_| DandelionPoolShutDown); + + async move { + res?; + rx.await.expect("Oneshot dropped before response!"); + + Ok(()) + } + .boxed() + } +} diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index c118c0b7..edeccaeb 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -6,7 +6,7 @@ //! ### What The Router Does Not Do //! //! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling -//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPool) +//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPoolManager) use std::{ collections::HashMap, hash::Hash, @@ -43,9 +43,9 @@ pub enum DandelionRouterError { } /// A response from an attempt to retrieve an outbound peer. -pub enum OutboundPeer { +pub enum OutboundPeer { /// A peer. - Peer(ID, T), + Peer(Id, T), /// The peer store is exhausted and has no more to return. Exhausted, } @@ -61,28 +61,28 @@ pub enum State { /// The routing state of a transaction. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TxState { +pub enum TxState { /// Fluff state. Fluff, /// Stem state. Stem { - /// The peer who sent us this transaction's ID. - from: ID, + /// The peer who sent us this transaction's Id. + from: Id, }, /// Local - the transaction originated from our node. Local, } /// A request to route a transaction. -pub struct DandelionRouteReq { +pub struct DandelionRouteReq { /// The transaction. pub tx: Tx, /// The transaction state. - pub state: TxState, + pub state: TxState, } /// The dandelion router service. -pub struct DandelionRouter { +pub struct DandelionRouter { // pub(crate) is for tests /// A [`Discover`] where we can get outbound peers from. outbound_peer_discover: Pin>, @@ -95,14 +95,14 @@ pub struct DandelionRouter { epoch_start: Instant, /// The stem our local transactions will be sent to. - local_route: Option, - /// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`. - stem_routes: HashMap, + local_route: Option, + /// A [`HashMap`] linking peer's Ids to Ids in `stem_peers`. + stem_routes: HashMap, /// Peers we are using for stemming. /// /// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`] /// transactions. - pub(crate) stem_peers: HashMap, + pub(crate) stem_peers: HashMap, /// The distribution to sample to get the [`State`], true is [`State::Fluff`]. state_dist: Bernoulli, @@ -116,10 +116,10 @@ pub struct DandelionRouter { _tx: PhantomData, } -impl DandelionRouter +impl DandelionRouter where - ID: Hash + Eq + Clone, - P: TryStream, Error = tower::BoxError>, + Id: Hash + Eq + Clone, + P: TryStream, Error = tower::BoxError>, B: Service, Error = tower::BoxError>, B::Future: Send + 'static, S: Service, Error = tower::BoxError>, @@ -198,7 +198,7 @@ where fn stem_tx( &mut self, tx: Tx, - from: ID, + from: Id, ) -> BoxFuture<'static, Result> { if self.stem_peers.is_empty() { tracing::debug!("Stem peers are empty, fluffing stem transaction."); @@ -258,19 +258,10 @@ where } } -/* -## Generics ## - -Tx: The tx type -ID: Peer Id type - unique identifier for nodes. -P: Peer Set discover - where we can get outbound peers from -B: Broadcast service - where we send txs to get diffused. -S: The Peer service - handles routing messages to a single node. - */ -impl Service> for DandelionRouter +impl Service> for DandelionRouter where - ID: Hash + Eq + Clone, - P: TryStream, Error = tower::BoxError>, + Id: Hash + Eq + Clone, + P: TryStream, Error = tower::BoxError>, B: Service, Error = tower::BoxError>, B::Future: Send + 'static, S: Service, Error = tower::BoxError>, @@ -336,7 +327,7 @@ where Poll::Ready(Ok(())) } - fn call(&mut self, req: DandelionRouteReq) -> Self::Future { + fn call(&mut self, req: DandelionRouteReq) -> Self::Future { tracing::trace!(parent: &self.span, "Handling route request."); match req.state { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index d868a991..1c6a3e05 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -76,11 +76,9 @@ pub fn mock_in_memory_backing_pool< TxID: Clone + Hash + Eq + Send + 'static, >() -> ( impl Service< - TxStoreRequest, - Response = TxStoreResponse, - Future = impl Future, tower::BoxError>> - + Send - + 'static, + TxStoreRequest, + Response = TxStoreResponse, + Future = impl Future, tower::BoxError>> + Send + 'static, Error = tower::BoxError, > + Send + 'static, @@ -90,33 +88,14 @@ pub fn mock_in_memory_backing_pool< let txs_2 = txs.clone(); ( - service_fn(move |req: TxStoreRequest| { + service_fn(move |req: TxStoreRequest| { let txs = txs.clone(); async move { match req { - TxStoreRequest::Store(tx, tx_id, state) => { - txs.lock().unwrap().insert(tx_id, (tx, state)); - Ok(TxStoreResponse::Ok) - } TxStoreRequest::Get(tx_id) => { let tx_state = txs.lock().unwrap().get(&tx_id).cloned(); Ok(TxStoreResponse::Transaction(tx_state)) } - TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains( - txs.lock().unwrap().get(&tx_id).map(|res| res.1), - )), - TxStoreRequest::IDsInStemPool => { - // horribly inefficient, but it's test code :) - let ids = txs - .lock() - .unwrap() - .iter() - .filter(|(_, (_, state))| matches!(state, State::Stem)) - .map(|tx| tx.0.clone()) - .collect::>(); - - Ok(TxStoreResponse::IDs(ids)) - } TxStoreRequest::Promote(tx_id) => { let _ = txs .lock() diff --git a/p2p/dandelion-tower/src/tests/pool.rs b/p2p/dandelion-tower/src/tests/pool.rs index 4a7c87dd..b7fa55eb 100644 --- a/p2p/dandelion-tower/src/tests/pool.rs +++ b/p2p/dandelion-tower/src/tests/pool.rs @@ -1,12 +1,11 @@ use std::time::Duration; +use super::*; use crate::{ - pool::{start_dandelion_pool, IncomingTx}, + pool::{start_dandelion_pool_manager, IncomingTx}, DandelionConfig, DandelionRouter, Graph, TxState, }; -use super::*; - #[tokio::test] async fn basic_functionality() { let config = DandelionConfig { @@ -21,9 +20,9 @@ async fn basic_functionality() { let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config); - let (pool_svc, pool) = mock_in_memory_backing_pool(); + let (pool_svc, _pool) = mock_in_memory_backing_pool(); - let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config); + let mut pool_svc = start_dandelion_pool_manager(15, router, pool_svc, config); pool_svc .ready() @@ -32,11 +31,13 @@ async fn basic_functionality() { .call(IncomingTx { tx: 0_usize, tx_id: 1_usize, - tx_state: TxState::Fluff, + routing_state: TxState::Fluff, }) .await .unwrap(); - assert!(pool.lock().unwrap().contains_key(&1)); + // TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test + // all functionality. + //assert!(pool.lock().unwrap().contains_key(&1)); assert!(broadcast_rx.try_recv().is_ok()) } diff --git a/p2p/dandelion-tower/src/traits.rs b/p2p/dandelion-tower/src/traits.rs index c84ecf04..bbf60863 100644 --- a/p2p/dandelion-tower/src/traits.rs +++ b/p2p/dandelion-tower/src/traits.rs @@ -8,42 +8,24 @@ pub struct StemRequest(pub Tx); #[cfg(feature = "txpool")] /// A request sent to the backing transaction pool storage. -pub enum TxStoreRequest { - /// A request to store a transaction with the ID to store it under and the pool to store it in. - /// - /// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it - /// to the fluff pool, _if this request state is fluff_. - Store(Tx, TxID, crate::State), - /// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool. +pub enum TxStoreRequest { + /// A request to retrieve a `Tx` with the given Id from the pool, should not remove that tx from the pool. /// /// Must return [`TxStoreResponse::Transaction`] - Get(TxID), + Get(TxId), /// Promote a transaction from the stem pool to the public pool. /// /// If the tx is already in the fluff pool do nothing. /// /// This should not error if the tx isn't in the pool at all. - Promote(TxID), - /// A request to check if a translation is in the pool. - /// - /// Must return [`TxStoreResponse::Contains`] - Contains(TxID), - /// Returns the IDs of all the transaction in the stem pool. - /// - /// Must return [`TxStoreResponse::IDs`] - IDsInStemPool, + Promote(TxId), } #[cfg(feature = "txpool")] /// A response sent back from the backing transaction pool. -pub enum TxStoreResponse { +pub enum TxStoreResponse { /// A generic ok response. Ok, - /// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool - /// the tx is in. - Contains(Option), /// A response containing a requested transaction. Transaction(Option<(Tx, crate::State)>), - /// A list of transaction IDs. - IDs(Vec), } diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index a83c0f07..42d10554 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-inte keywords = ["cuprate", "rpc", "interface"] [features] -default = ["dummy", "serde"] -dummy = [] +default = ["dummy", "serde"] +dummy = [] [dependencies] cuprate-epee-encoding = { path = "../../net/epee-encoding", default-features = false } @@ -18,15 +18,20 @@ cuprate-json-rpc = { path = "../json-rpc", default-features = false } cuprate-rpc-types = { path = "../types", features = ["serde", "epee"], default-features = false } cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } +anyhow = { workspace = true } axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } -serde_json = { workspace = true, features = ["std"] } tower = { workspace = true } paste = { workspace = true } futures = { workspace = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } + axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } serde_json = { workspace = true, features = ["std"] } tokio = { workspace = true, features = ["full"] } -ureq = { version = "2.10.0", features = ["json"] } \ No newline at end of file +ureq = { version = "2.10.0", features = ["json"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/interface/README.md b/rpc/interface/README.md index 3a63ac46..fa5496c1 100644 --- a/rpc/interface/README.md +++ b/rpc/interface/README.md @@ -17,7 +17,7 @@ CLIENT ─► ROUTE ─► REQUEST ─► HANDLER ─► RESPONSE ─► CLIENT Everything coming _in_ from a client is handled by this crate. -This is where your [`RpcHandler`] turns this [`RpcRequest`] into a [`RpcResponse`]. +This is where your [`RpcHandler`] turns this `Request` into a `Response`. You hand this `Response` back to `cuprate-rpc-interface` and it will take care of sending it back to the client. @@ -42,16 +42,19 @@ The proper usage of this crate is to: 3. Do whatever with it # The [`RpcHandler`] -This is your [`tower::Service`] that converts [`RpcRequest`]s into [`RpcResponse`]s, +This is your [`tower::Service`] that converts `Request`s into `Response`s, i.e. the "inner handler". -Said concretely, `RpcHandler` is a `tower::Service` where the associated types are from this crate: -- [`RpcRequest`] -- [`RpcResponse`] -- [`RpcError`] +Said concretely, `RpcHandler` is 3 `tower::Service`s where the +request/response types are the 3 endpoint enums from [`cuprate_rpc_types`]: +- [`JsonRpcRequest`](cuprate_rpc_types::json::JsonRpcRequest) & [`JsonRpcResponse`](cuprate_rpc_types::json::JsonRpcResponse) +- [`BinRequest`](cuprate_rpc_types::bin::BinRequest) & [`BinResponse`](cuprate_rpc_types::bin::BinRequest) +- [`OtherRequest`](cuprate_rpc_types::other::OtherRequest) & [`OtherResponse`](cuprate_rpc_types::other::OtherRequest) `RpcHandler`'s [`Future`](std::future::Future) is generic, _although_, -it must output `Result`. +it must output `Result<$RESPONSE, anyhow::Error>`. + +The error type must always be [`anyhow::Error`]. The `RpcHandler` must also hold some state that is required for RPC server operation. @@ -83,7 +86,7 @@ use cuprate_rpc_types::{ json::{JsonRpcRequest, JsonRpcResponse, GetBlockCountResponse}, other::{OtherRequest, OtherResponse}, }; -use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy, RpcRequest}; +use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy}; // Send a `/get_height` request. This endpoint has no inputs. async fn get_height(port: u16) -> OtherResponse { diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index 2656b074..1f84738e 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -1,123 +1,25 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style, - unreachable_pub -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - // TODO - rustdoc::bare_urls, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - unused_imports, - unused_variables - ) -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] -// TODO: remove me after finishing impl -#![allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] - -//---------------------------------------------------------------------------------------------------- Mod mod route; mod router_builder; -mod rpc_error; mod rpc_handler; #[cfg(feature = "dummy")] mod rpc_handler_dummy; -mod rpc_request; -mod rpc_response; +mod rpc_service; pub use router_builder::RouterBuilder; -pub use rpc_error::RpcError; pub use rpc_handler::RpcHandler; #[cfg(feature = "dummy")] pub use rpc_handler_dummy::RpcHandlerDummy; -pub use rpc_request::RpcRequest; -pub use rpc_response::RpcResponse; +pub use rpc_service::RpcService; + +// false-positive: used in `README.md`'s doc-test. +#[cfg(test)] +mod test { + extern crate axum; + extern crate cuprate_test_utils; + extern crate serde_json; + extern crate tokio; + extern crate ureq; +} diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index b17b98c6..90d06c8f 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -5,9 +5,16 @@ use axum::{body::Bytes, extract::State, http::StatusCode}; use tower::ServiceExt; use cuprate_epee_encoding::from_bytes; -use cuprate_rpc_types::bin::{BinRequest, BinResponse, GetTransactionPoolHashesRequest}; +use cuprate_rpc_types::{ + bin::{ + BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksRequest, GetHashesRequest, + GetOutputIndexesRequest, GetOutsRequest, GetTransactionPoolHashesRequest, + }, + json::GetOutputDistributionRequest, + RpcCall, +}; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// This macro generates route functions that expect input. @@ -66,14 +73,17 @@ macro_rules! generate_endpoints_inner { ($variant:ident, $handler:ident, $request:expr) => { paste::paste! { { - // Send request. - let request = RpcRequest::Binary($request); - let channel = $handler.oneshot(request).await?; + // Check if restricted. + if [<$variant Request>]::IS_RESTRICTED && $handler.restricted() { + // TODO: mimic `monerod` behavior. + return Err(StatusCode::FORBIDDEN); + } - // Assert the response from the inner handler is correct. - let RpcResponse::Binary(response) = channel else { - panic!("RPC handler did not return a binary response"); + // Send request. + let Ok(response) = $handler.oneshot($request).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); }; + let BinResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response"); }; @@ -81,7 +91,7 @@ macro_rules! generate_endpoints_inner { // Serialize to bytes and respond. match cuprate_epee_encoding::to_bytes(response) { Ok(bytes) => Ok(bytes.freeze()), - Err(e) => Err(StatusCode::INTERNAL_SERVER_ERROR), + Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), } } } diff --git a/rpc/interface/src/route/json_rpc.rs b/rpc/interface/src/route/json_rpc.rs index bd35e437..7efb8513 100644 --- a/rpc/interface/src/route/json_rpc.rs +++ b/rpc/interface/src/route/json_rpc.rs @@ -8,21 +8,21 @@ use tower::ServiceExt; use cuprate_json_rpc::{ error::{ErrorCode, ErrorObject}, - Id, + Id, Response, }; use cuprate_rpc_types::{ json::{JsonRpcRequest, JsonRpcResponse}, RpcCallValue, }; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// The `/json_rpc` route function used in [`crate::RouterBuilder`]. pub(crate) async fn json_rpc( State(handler): State, Json(request): Json>, -) -> Result>, StatusCode> { +) -> Result>, StatusCode> { // TODO: // // JSON-RPC notifications (requests without `id`) @@ -30,6 +30,11 @@ pub(crate) async fn json_rpc( // must remain. How to do this considering this function will // always return and cause `axum` to respond? + // JSON-RPC 2.0 rule: + // If there was an error in detecting the `Request`'s ID, + // the `Response` must contain an `Id::Null` + let id = request.id.unwrap_or(Id::Null); + // Return early if this RPC server is restricted and // the requested method is only for non-restricted RPC. if request.body.is_restricted() && handler.restricted() { @@ -39,26 +44,17 @@ pub(crate) async fn json_rpc( data: None, }; - // JSON-RPC 2.0 rule: - // If there was an error in detecting the `Request`'s ID, - // the `Response` must contain an `Id::Null` - let id = request.id.unwrap_or(Id::Null); - - let response = cuprate_json_rpc::Response::err(id, error_object); + let response = Response::err(id, error_object); return Ok(Json(response)); } // Send request. - let request = RpcRequest::JsonRpc(request); - let channel = handler.oneshot(request).await?; - - // Assert the response from the inner handler is correct. - let RpcResponse::JsonRpc(response) = channel else { - panic!("RPC handler returned incorrect response"); + let Ok(response) = handler.oneshot(request.body).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); }; - Ok(Json(response)) + Ok(Json(Response::ok(id, response))) } //---------------------------------------------------------------------------------------------------- Tests diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs index ce778db9..3ff84487 100644 --- a/rpc/interface/src/route/other.rs +++ b/rpc/interface/src/route/other.rs @@ -25,7 +25,7 @@ use cuprate_rpc_types::{ RpcCall, }; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// This macro generates route functions that expect input. @@ -81,13 +81,11 @@ macro_rules! generate_endpoints_inner { } // Send request. - let request = RpcRequest::Other(OtherRequest::$variant($request)); - let channel = $handler.oneshot(request).await?; - - // Assert the response from the inner handler is correct. - let RpcResponse::Other(response) = channel else { - panic!("RPC handler did not return a binary response"); + let request = OtherRequest::$variant($request); + let Ok(response) = $handler.oneshot(request).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); }; + let OtherResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response") }; diff --git a/rpc/interface/src/router_builder.rs b/rpc/interface/src/router_builder.rs index d370cf4e..2e80c433 100644 --- a/rpc/interface/src/router_builder.rs +++ b/rpc/interface/src/router_builder.rs @@ -1,12 +1,7 @@ //! Free functions. -use std::marker::PhantomData; - //---------------------------------------------------------------------------------------------------- Use -use axum::{ - routing::{method_routing::get, post}, - Router, -}; +use axum::Router; use crate::{ route::{bin, fallback, json_rpc, other}, diff --git a/rpc/interface/src/rpc_error.rs b/rpc/interface/src/rpc_error.rs deleted file mode 100644 index 92b9cc1b..00000000 --- a/rpc/interface/src/rpc_error.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! RPC errors. - -//---------------------------------------------------------------------------------------------------- Import -use axum::http::StatusCode; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -//---------------------------------------------------------------------------------------------------- RpcError -/// Possible errors during RPC operation. -/// -/// These are any errors that can happen _during_ a handler function. -/// I.e. if this error surfaces, it happened _after_ the request was -/// deserialized. -/// -/// This is the `Error` type required to be used in an [`RpcHandler`](crate::RpcHandler). -/// -/// TODO: This is empty as possible errors will be -/// enumerated when the handler functions are created. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcError {} - -impl From for StatusCode { - fn from(value: RpcError) -> Self { - // TODO - Self::INTERNAL_SERVER_ERROR - } -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_handler.rs b/rpc/interface/src/rpc_handler.rs index 3d1c28d4..1d2676c7 100644 --- a/rpc/interface/src/rpc_handler.rs +++ b/rpc/interface/src/rpc_handler.rs @@ -1,49 +1,42 @@ //! RPC handler trait. //---------------------------------------------------------------------------------------------------- Use -use std::{future::Future, task::Poll}; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; -use axum::{http::StatusCode, response::IntoResponse}; -use futures::{channel::oneshot::channel, FutureExt}; -use tower::Service; - -use cuprate_helper::asynch::InfallibleOneshotReceiver; -use cuprate_json_rpc::Id; -use cuprate_rpc_types::json::JsonRpcRequest; - -use crate::{rpc_error::RpcError, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::RpcService; //---------------------------------------------------------------------------------------------------- RpcHandler /// An RPC handler. /// -/// This trait represents a type that can turn [`RpcRequest`]s into [`RpcResponse`]s. +/// This trait represents a type that can turn `Request`s into `Response`s. /// -/// Implementors of this trait must be [`tower::Service`]s that use: -/// - [`RpcRequest`] as the generic `Request` type -/// - [`RpcResponse`] as the associated `Response` type -/// - [`RpcError`] as the associated `Error` type -/// - A generic [`Future`] that outputs `Result` +/// Implementors of this trait must be: +/// - A [`tower::Service`] that uses [`JsonRpcRequest`] & [`JsonRpcResponse`] +/// - A [`tower::Service`] that uses [`BinRequest`] & [`BinResponse`] +/// - A [`tower::Service`] that uses [`OtherRequest`] & [`OtherResponse`] +/// +/// In other words, an [`RpcHandler`] is a type that implements [`tower::Service`] 3 times, +/// one for each request/response enum type found in [`cuprate_rpc_types`]. +/// +/// The error type must always be [`anyhow::Error`]. /// /// See this crate's `RpcHandlerDummy` for an implementation example of this trait. /// /// # Panics -/// Your [`RpcHandler`] must reply to [`RpcRequest`]s with the correct -/// [`RpcResponse`] or else this crate will panic during routing functions. +/// Your [`RpcHandler`] must reply to `Request`s with the correct +/// `Response` or else this crate will panic during routing functions. /// -/// For example, upon a [`RpcRequest::Binary`] must be replied with -/// [`RpcRequest::Binary`]. If an [`RpcRequest::Other`] were returned instead, -/// this crate would panic. +/// For example, a [`JsonRpcRequest::GetBlockCount`] must be replied with +/// [`JsonRpcResponse::GetBlockCount`]. If anything else is returned, +/// this crate may panic. pub trait RpcHandler: - Clone - + Send - + Sync - + 'static - + Service< - RpcRequest, - Response = RpcResponse, - Error = RpcError, - Future: Future> + Send + Sync + 'static, - > + RpcService + + RpcService + + RpcService { /// Is this [`RpcHandler`] restricted? /// diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 97b75853..0b018354 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -3,20 +3,21 @@ //---------------------------------------------------------------------------------------------------- Use use std::task::Poll; -use futures::{channel::oneshot::channel, FutureExt}; +use anyhow::Error; +use futures::channel::oneshot::channel; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use tower::Service; use cuprate_helper::asynch::InfallibleOneshotReceiver; -use cuprate_json_rpc::Id; -use cuprate_rpc_types::json::JsonRpcRequest; - -use crate::{ - rpc_error::RpcError, rpc_handler::RpcHandler, rpc_request::RpcRequest, - rpc_response::RpcResponse, +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, }; +use crate::rpc_handler::RpcHandler; + //---------------------------------------------------------------------------------------------------- RpcHandlerDummy /// An [`RpcHandler`] that always returns [`Default::default`]. /// @@ -43,96 +44,133 @@ impl RpcHandler for RpcHandlerDummy { } } -impl Service for RpcHandlerDummy { - type Response = RpcResponse; - type Error = RpcError; - type Future = InfallibleOneshotReceiver>; +impl Service for RpcHandlerDummy { + type Response = JsonRpcResponse; + type Error = Error; + type Future = InfallibleOneshotReceiver>; - fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: RpcRequest) -> Self::Future { - use cuprate_rpc_types::bin::BinRequest as BReq; - use cuprate_rpc_types::bin::BinResponse as BResp; - use cuprate_rpc_types::json::JsonRpcRequest as JReq; - use cuprate_rpc_types::json::JsonRpcResponse as JResp; - use cuprate_rpc_types::other::OtherRequest as OReq; - use cuprate_rpc_types::other::OtherResponse as OResp; + fn call(&mut self, req: JsonRpcRequest) -> Self::Future { + use cuprate_rpc_types::json::JsonRpcRequest as Req; + use cuprate_rpc_types::json::JsonRpcResponse as Resp; - #[rustfmt::skip] #[allow(clippy::default_trait_access)] let resp = match req { - RpcRequest::JsonRpc(j) => RpcResponse::JsonRpc(cuprate_json_rpc::Response::ok(Id::Null, match j.body { - JReq::GetBlockCount(_) => JResp::GetBlockCount(Default::default()), - JReq::OnGetBlockHash(_) => JResp::OnGetBlockHash(Default::default()), - JReq::SubmitBlock(_) => JResp::SubmitBlock(Default::default()), - JReq::GenerateBlocks(_) => JResp::GenerateBlocks(Default::default()), - JReq::GetLastBlockHeader(_) => JResp::GetLastBlockHeader(Default::default()), - JReq::GetBlockHeaderByHash(_) => JResp::GetBlockHeaderByHash(Default::default()), - JReq::GetBlockHeaderByHeight(_) => JResp::GetBlockHeaderByHeight(Default::default()), - JReq::GetBlockHeadersRange(_) => JResp::GetBlockHeadersRange(Default::default()), - JReq::GetBlock(_) => JResp::GetBlock(Default::default()), - JReq::GetConnections(_) => JResp::GetConnections(Default::default()), - JReq::GetInfo(_) => JResp::GetInfo(Default::default()), - JReq::HardForkInfo(_) => JResp::HardForkInfo(Default::default()), - JReq::SetBans(_) => JResp::SetBans(Default::default()), - JReq::GetBans(_) => JResp::GetBans(Default::default()), - JReq::Banned(_) => JResp::Banned(Default::default()), - JReq::FlushTransactionPool(_) => JResp::FlushTransactionPool(Default::default()), - JReq::GetOutputHistogram(_) => JResp::GetOutputHistogram(Default::default()), - JReq::GetCoinbaseTxSum(_) => JResp::GetCoinbaseTxSum(Default::default()), - JReq::GetVersion(_) => JResp::GetVersion(Default::default()), - JReq::GetFeeEstimate(_) => JResp::GetFeeEstimate(Default::default()), - JReq::GetAlternateChains(_) => JResp::GetAlternateChains(Default::default()), - JReq::RelayTx(_) => JResp::RelayTx(Default::default()), - JReq::SyncInfo(_) => JResp::SyncInfo(Default::default()), - JReq::GetTransactionPoolBacklog(_) => JResp::GetTransactionPoolBacklog(Default::default()), - JReq::GetMinerData(_) => JResp::GetMinerData(Default::default()), - JReq::PruneBlockchain(_) => JResp::PruneBlockchain(Default::default()), - JReq::CalcPow(_) => JResp::CalcPow(Default::default()), - JReq::FlushCache(_) => JResp::FlushCache(Default::default()), - JReq::AddAuxPow(_) => JResp::AddAuxPow(Default::default()), - JReq::GetTxIdsLoose(_) => JResp::GetTxIdsLoose(Default::default()), - })), - RpcRequest::Binary(b) => RpcResponse::Binary(match b { - BReq::GetBlocks(_) => BResp::GetBlocks(Default::default()), - BReq::GetBlocksByHeight(_) => BResp::GetBlocksByHeight(Default::default()), - BReq::GetHashes(_) => BResp::GetHashes(Default::default()), - BReq::GetOutputIndexes(_) => BResp::GetOutputIndexes(Default::default()), - BReq::GetOuts(_) => BResp::GetOuts(Default::default()), - BReq::GetTransactionPoolHashes(_) => BResp::GetTransactionPoolHashes(Default::default()), - BReq::GetOutputDistribution(_) => BResp::GetOutputDistribution(Default::default()), - }), - RpcRequest::Other(o) => RpcResponse::Other(match o { - OReq::GetHeight(_) => OResp::GetHeight(Default::default()), - OReq::GetTransactions(_) => OResp::GetTransactions(Default::default()), - OReq::GetAltBlocksHashes(_) => OResp::GetAltBlocksHashes(Default::default()), - OReq::IsKeyImageSpent(_) => OResp::IsKeyImageSpent(Default::default()), - OReq::SendRawTransaction(_) => OResp::SendRawTransaction(Default::default()), - OReq::StartMining(_) => OResp::StartMining(Default::default()), - OReq::StopMining(_) => OResp::StopMining(Default::default()), - OReq::MiningStatus(_) => OResp::MiningStatus(Default::default()), - OReq::SaveBc(_) => OResp::SaveBc(Default::default()), - OReq::GetPeerList(_) => OResp::GetPeerList(Default::default()), - OReq::SetLogHashRate(_) => OResp::SetLogHashRate(Default::default()), - OReq::SetLogLevel(_) => OResp::SetLogLevel(Default::default()), - OReq::SetLogCategories(_) => OResp::SetLogCategories(Default::default()), - OReq::SetBootstrapDaemon(_) => OResp::SetBootstrapDaemon(Default::default()), - OReq::GetTransactionPool(_) => OResp::GetTransactionPool(Default::default()), - OReq::GetTransactionPoolStats(_) => OResp::GetTransactionPoolStats(Default::default()), - OReq::StopDaemon(_) => OResp::StopDaemon(Default::default()), - OReq::GetLimit(_) => OResp::GetLimit(Default::default()), - OReq::SetLimit(_) => OResp::SetLimit(Default::default()), - OReq::OutPeers(_) => OResp::OutPeers(Default::default()), - OReq::InPeers(_) => OResp::InPeers(Default::default()), - OReq::GetNetStats(_) => OResp::GetNetStats(Default::default()), - OReq::GetOuts(_) => OResp::GetOuts(Default::default()), - OReq::Update(_) => OResp::Update(Default::default()), - OReq::PopBlocks(_) => OResp::PopBlocks(Default::default()), - OReq::GetTransactionPoolHashes(_) => OResp::GetTransactionPoolHashes(Default::default()), - OReq::GetPublicNodes(_) => OResp::GetPublicNodes(Default::default()), - }) + Req::GetBlockCount(_) => Resp::GetBlockCount(Default::default()), + Req::OnGetBlockHash(_) => Resp::OnGetBlockHash(Default::default()), + Req::SubmitBlock(_) => Resp::SubmitBlock(Default::default()), + Req::GenerateBlocks(_) => Resp::GenerateBlocks(Default::default()), + Req::GetLastBlockHeader(_) => Resp::GetLastBlockHeader(Default::default()), + Req::GetBlockHeaderByHash(_) => Resp::GetBlockHeaderByHash(Default::default()), + Req::GetBlockHeaderByHeight(_) => Resp::GetBlockHeaderByHeight(Default::default()), + Req::GetBlockHeadersRange(_) => Resp::GetBlockHeadersRange(Default::default()), + Req::GetBlock(_) => Resp::GetBlock(Default::default()), + Req::GetConnections(_) => Resp::GetConnections(Default::default()), + Req::GetInfo(_) => Resp::GetInfo(Default::default()), + Req::HardForkInfo(_) => Resp::HardForkInfo(Default::default()), + Req::SetBans(_) => Resp::SetBans(Default::default()), + Req::GetBans(_) => Resp::GetBans(Default::default()), + Req::Banned(_) => Resp::Banned(Default::default()), + Req::FlushTransactionPool(_) => Resp::FlushTransactionPool(Default::default()), + Req::GetOutputHistogram(_) => Resp::GetOutputHistogram(Default::default()), + Req::GetCoinbaseTxSum(_) => Resp::GetCoinbaseTxSum(Default::default()), + Req::GetVersion(_) => Resp::GetVersion(Default::default()), + Req::GetFeeEstimate(_) => Resp::GetFeeEstimate(Default::default()), + Req::GetAlternateChains(_) => Resp::GetAlternateChains(Default::default()), + Req::RelayTx(_) => Resp::RelayTx(Default::default()), + Req::SyncInfo(_) => Resp::SyncInfo(Default::default()), + Req::GetTransactionPoolBacklog(_) => { + Resp::GetTransactionPoolBacklog(Default::default()) + } + Req::GetMinerData(_) => Resp::GetMinerData(Default::default()), + Req::PruneBlockchain(_) => Resp::PruneBlockchain(Default::default()), + Req::CalcPow(_) => Resp::CalcPow(Default::default()), + Req::FlushCache(_) => Resp::FlushCache(Default::default()), + Req::AddAuxPow(_) => Resp::AddAuxPow(Default::default()), + Req::GetTxIdsLoose(_) => Resp::GetTxIdsLoose(Default::default()), + }; + + let (tx, rx) = channel(); + drop(tx.send(Ok(resp))); + InfallibleOneshotReceiver::from(rx) + } +} + +impl Service for RpcHandlerDummy { + type Response = BinResponse; + type Error = Error; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: BinRequest) -> Self::Future { + use cuprate_rpc_types::bin::BinRequest as Req; + use cuprate_rpc_types::bin::BinResponse as Resp; + + #[allow(clippy::default_trait_access)] + let resp = match req { + Req::GetBlocks(_) => Resp::GetBlocks(Default::default()), + Req::GetBlocksByHeight(_) => Resp::GetBlocksByHeight(Default::default()), + Req::GetHashes(_) => Resp::GetHashes(Default::default()), + Req::GetOutputIndexes(_) => Resp::GetOutputIndexes(Default::default()), + Req::GetOuts(_) => Resp::GetOuts(Default::default()), + Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()), + Req::GetOutputDistribution(_) => Resp::GetOutputDistribution(Default::default()), + }; + + let (tx, rx) = channel(); + drop(tx.send(Ok(resp))); + InfallibleOneshotReceiver::from(rx) + } +} + +impl Service for RpcHandlerDummy { + type Response = OtherResponse; + type Error = Error; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: OtherRequest) -> Self::Future { + use cuprate_rpc_types::other::OtherRequest as Req; + use cuprate_rpc_types::other::OtherResponse as Resp; + + #[allow(clippy::default_trait_access)] + let resp = match req { + Req::GetHeight(_) => Resp::GetHeight(Default::default()), + Req::GetTransactions(_) => Resp::GetTransactions(Default::default()), + Req::GetAltBlocksHashes(_) => Resp::GetAltBlocksHashes(Default::default()), + Req::IsKeyImageSpent(_) => Resp::IsKeyImageSpent(Default::default()), + Req::SendRawTransaction(_) => Resp::SendRawTransaction(Default::default()), + Req::StartMining(_) => Resp::StartMining(Default::default()), + Req::StopMining(_) => Resp::StopMining(Default::default()), + Req::MiningStatus(_) => Resp::MiningStatus(Default::default()), + Req::SaveBc(_) => Resp::SaveBc(Default::default()), + Req::GetPeerList(_) => Resp::GetPeerList(Default::default()), + Req::SetLogHashRate(_) => Resp::SetLogHashRate(Default::default()), + Req::SetLogLevel(_) => Resp::SetLogLevel(Default::default()), + Req::SetLogCategories(_) => Resp::SetLogCategories(Default::default()), + Req::SetBootstrapDaemon(_) => Resp::SetBootstrapDaemon(Default::default()), + Req::GetTransactionPool(_) => Resp::GetTransactionPool(Default::default()), + Req::GetTransactionPoolStats(_) => Resp::GetTransactionPoolStats(Default::default()), + Req::StopDaemon(_) => Resp::StopDaemon(Default::default()), + Req::GetLimit(_) => Resp::GetLimit(Default::default()), + Req::SetLimit(_) => Resp::SetLimit(Default::default()), + Req::OutPeers(_) => Resp::OutPeers(Default::default()), + Req::InPeers(_) => Resp::InPeers(Default::default()), + Req::GetNetStats(_) => Resp::GetNetStats(Default::default()), + Req::GetOuts(_) => Resp::GetOuts(Default::default()), + Req::Update(_) => Resp::Update(Default::default()), + Req::PopBlocks(_) => Resp::PopBlocks(Default::default()), + Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()), + Req::GetPublicNodes(_) => Resp::GetPublicNodes(Default::default()), }; let (tx, rx) = channel(); diff --git a/rpc/interface/src/rpc_request.rs b/rpc/interface/src/rpc_request.rs deleted file mode 100644 index 3b66a780..00000000 --- a/rpc/interface/src/rpc_request.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! RPC requests. - -//---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_rpc_types::{bin::BinRequest, json::JsonRpcRequest, other::OtherRequest}; - -//---------------------------------------------------------------------------------------------------- RpcRequest -/// All possible RPC requests. -/// -/// This enum encapsulates all possible RPC requests: -/// - JSON RPC 2.0 requests -/// - Binary requests -/// - Other JSON requests -/// -/// It is the `Request` type required to be used in an [`RpcHandler`](crate::RpcHandler). -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcRequest { - /// JSON-RPC 2.0 requests. - JsonRpc(cuprate_json_rpc::Request), - /// Binary requests. - Binary(BinRequest), - /// Other JSON requests. - Other(OtherRequest), -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_response.rs b/rpc/interface/src/rpc_response.rs deleted file mode 100644 index 7e8ecdbe..00000000 --- a/rpc/interface/src/rpc_response.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! RPC responses. - -//---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_rpc_types::{bin::BinResponse, json::JsonRpcResponse, other::OtherResponse}; - -//---------------------------------------------------------------------------------------------------- RpcResponse -/// All possible RPC responses. -/// -/// This enum encapsulates all possible RPC responses: -/// - JSON RPC 2.0 responses -/// - Binary responses -/// - Other JSON responses -/// -/// It is the `Response` type required to be used in an [`RpcHandler`](crate::RpcHandler). -#[derive(Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcResponse { - /// JSON RPC 2.0 responses. - JsonRpc(cuprate_json_rpc::Response), - /// Binary responses. - Binary(BinResponse), - /// Other JSON responses. - Other(OtherResponse), -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_service.rs b/rpc/interface/src/rpc_service.rs new file mode 100644 index 00000000..285d60ba --- /dev/null +++ b/rpc/interface/src/rpc_service.rs @@ -0,0 +1,50 @@ +//! RPC [`tower::Service`] trait. + +//---------------------------------------------------------------------------------------------------- Use +use std::future::Future; + +use tower::Service; + +//---------------------------------------------------------------------------------------------------- RpcService +/// An RPC [`tower::Service`]. +/// +/// This trait solely exists to encapsulate the traits needed +/// to handle RPC requests and respond with responses - **it is +/// not meant to be used directly.** +/// +/// The `Request` and `Response` are generic and +/// are used in the [`tower::Service`] bounds. +/// +/// The error type is always [`anyhow::Error`]. +/// +/// There is a blanket implementation that implements this +/// trait on types that implement `tower::Service` correctly. +/// +/// See [`RpcHandler`](crate::RpcHandler) for more information. +pub trait RpcService: + Clone + + Send + + Sync + + 'static + + Service< + Request, + Response = Response, + Error = anyhow::Error, + Future: Future> + Send + 'static, + > +{ +} + +impl RpcService for T where + Self: Clone + + Send + + Sync + + 'static + + Service< + Request, + Response = Response, + Error = anyhow::Error, + Future: Future> + Send + 'static, + > +{ +} diff --git a/rpc/json-rpc/Cargo.toml b/rpc/json-rpc/Cargo.toml index 777f3264..5d2544e4 100644 --- a/rpc/json-rpc/Cargo.toml +++ b/rpc/json-rpc/Cargo.toml @@ -17,4 +17,7 @@ serde_json = { workspace = true, features = ["std"] } thiserror = { workspace = true } [dev-dependencies] -pretty_assertions = { workspace = true } \ No newline at end of file +pretty_assertions = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/json-rpc/src/lib.rs b/rpc/json-rpc/src/lib.rs index 45ac2efb..dfc4b181 100644 --- a/rpc/json-rpc/src/lib.rs +++ b/rpc/json-rpc/src/lib.rs @@ -1,96 +1,5 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - clippy::missing_docs_in_private_items, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] - -//---------------------------------------------------------------------------------------------------- Mod/Use pub mod error; mod id; @@ -105,6 +14,5 @@ pub use request::Request; mod response; pub use response::Response; -//---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests; diff --git a/rpc/json-rpc/src/response.rs b/rpc/json-rpc/src/response.rs index efd768b5..2b846069 100644 --- a/rpc/json-rpc/src/response.rs +++ b/rpc/json-rpc/src/response.rs @@ -304,14 +304,14 @@ where if payload.is_none() { payload = Some(Ok(map.next_value::()?)); } else { - return Err(serde::de::Error::duplicate_field("result/error")); + return Err(Error::duplicate_field("result/error")); } } Key::Error => { if payload.is_none() { payload = Some(Err(map.next_value::()?)); } else { - return Err(serde::de::Error::duplicate_field("result/error")); + return Err(Error::duplicate_field("result/error")); } } Key::Unknown => { diff --git a/rpc/json-rpc/src/tests.rs b/rpc/json-rpc/src/tests.rs index ff8f0496..3ee60881 100644 --- a/rpc/json-rpc/src/tests.rs +++ b/rpc/json-rpc/src/tests.rs @@ -52,6 +52,7 @@ where } /// Tests an input JSON string matches an expected type `T`. +#[allow(clippy::needless_pass_by_value)] // serde signature fn assert_de(json: &'static str, expected: T) where T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq, diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index 9c996818..08b13b18 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -18,13 +18,14 @@ cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } cuprate-types = { path = "../../types" } -monero-serai = { workspace = true } paste = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { path = "../../test-utils" } -cuprate-json-rpc = { path = "../json-rpc" } -serde_json = { workspace = true } -pretty_assertions = { workspace = true } \ No newline at end of file +serde = { workspace = true } +serde_json = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 278e5352..0dbddead 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -13,22 +13,17 @@ use cuprate_epee_encoding::{ container_as_blob::ContainerAsBlob, epee_object, error, macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, }; use cuprate_types::BlockCompleteEntry; use crate::{ - base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_height, default_string, default_vec, default_zero}, - free::{is_one, is_zero}, + base::AccessResponseBase, + defaults::{default_false, default_zero}, macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{ - AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, GetOutputsOut, - HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, Peer, PoolInfoExtent, - PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, - }, - rpc_call::{RpcCall, RpcCallValue}, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status}, + rpc_call::RpcCallValue, }; //---------------------------------------------------------------------------------------------------- Definitions diff --git a/rpc/types/src/defaults.rs b/rpc/types/src/defaults.rs index 6addd0ab..def5df44 100644 --- a/rpc/types/src/defaults.rs +++ b/rpc/types/src/defaults.rs @@ -8,7 +8,6 @@ //! `height`, it will use [`default_height`] to fill that in. //---------------------------------------------------------------------------------------------------- Import -use std::borrow::Cow; //---------------------------------------------------------------------------------------------------- TODO /// Default [`bool`] type used in request/response types, `false`. @@ -23,12 +22,6 @@ pub(crate) const fn default_true() -> bool { true } -/// Default `Cow<'static, str` type used in request/response types. -#[inline] -pub(crate) const fn default_cow_str() -> Cow<'static, str> { - Cow::Borrowed("") -} - /// Default [`String`] type used in request/response types. #[inline] pub(crate) const fn default_string() -> String { diff --git a/rpc/types/src/free.rs b/rpc/types/src/free.rs index 043a5209..45fb2f76 100644 --- a/rpc/types/src/free.rs +++ b/rpc/types/src/free.rs @@ -6,6 +6,7 @@ /// Returns `true` if the input `u` is equal to `0`. #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +#[allow(dead_code)] // TODO: see if needed after handlers. pub(crate) const fn is_zero(u: &u64) -> bool { *u == 0 } @@ -13,6 +14,7 @@ pub(crate) const fn is_zero(u: &u64) -> bool { /// Returns `true` the input `u` is equal to `1`. #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +#[allow(dead_code)] // TODO: see if needed after handlers. pub(crate) const fn is_one(u: &u64) -> bool { *u == 1 } diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 49710613..cfefcf96 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -12,12 +12,11 @@ use crate::{ default_false, default_height, default_one, default_string, default_true, default_vec, default_zero, }, - free::{is_one, is_zero}, macros::define_request_and_response, misc::{ AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, - GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan, - Span, Status, SyncInfoPeer, TxBacklogEntry, + GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, SetBan, Span, Status, + SyncInfoPeer, TxBacklogEntry, }, rpc_call::RpcCallValue, }; diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index b48f22ed..51ea3cc2 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -1,113 +1,6 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style, - unreachable_pub -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - // TODO - rustdoc::bare_urls, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - unused_imports, - unused_variables - ) -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] -// TODO: remove me after finishing impl -#![allow( - dead_code, - rustdoc::broken_intra_doc_links // TODO: remove after `{bin,json,other}.rs` gets merged -)] - -//---------------------------------------------------------------------------------------------------- Mod mod constants; mod defaults; mod free; @@ -129,3 +22,10 @@ pub use constants::{ CORE_RPC_VERSION_MINOR, }; pub use rpc_call::{RpcCall, RpcCallValue}; + +// false-positive: used in tests +#[cfg(test)] +mod test { + extern crate cuprate_test_utils; + extern crate serde_json; +} diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index 1a488d44..55d509e1 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -1,17 +1,14 @@ //! Output distributions for [`crate::json::GetOutputDistributionResponse`]. //---------------------------------------------------------------------------------------------------- Use -use std::mem::size_of; - #[cfg(feature = "serde")] -use serde::{ser::SerializeStruct, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ epee_object, error, macros::bytes::{Buf, BufMut}, - read_epee_value, read_varint, write_field, write_varint, EpeeObject, EpeeObjectBuilder, - EpeeValue, Marker, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, }; //---------------------------------------------------------------------------------------------------- Free @@ -24,7 +21,7 @@ use cuprate_epee_encoding::{ 45..=55 )] #[cfg(feature = "epee")] -fn compress_integer_array(array: &[u64]) -> error::Result> { +fn compress_integer_array(_: &[u64]) -> error::Result> { todo!() } @@ -36,7 +33,7 @@ fn compress_integer_array(array: &[u64]) -> error::Result> { "rpc/core_rpc_server_commands_defs.h", 57..=72 )] -fn decompress_integer_array(array: &[u8]) -> Vec { +fn decompress_integer_array(_: &[u8]) -> Vec { todo!() } @@ -281,9 +278,9 @@ impl EpeeObject for Distribution { //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod tests { - use pretty_assertions::assert_eq; + // use pretty_assertions::assert_eq; - use super::*; + // use super::*; // TODO: re-enable tests after (de)compression functions are implemented. diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 2b31cabf..842997bc 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -5,23 +5,13 @@ //! the [`crate::misc::ConnectionInfo`] struct defined here. //---------------------------------------------------------------------------------------------------- Import -use std::fmt::Display; - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] -use cuprate_epee_encoding::{ - epee_object, - macros::bytes::{Buf, BufMut}, - EpeeValue, Marker, -}; +use cuprate_epee_encoding::epee_object; use crate::{ - constants::{ - CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, - CORE_RPC_STATUS_PAYMENT_REQUIRED, - }, defaults::{default_string, default_zero}, macros::monero_definition_link, }; diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index bd6454dd..c5c1840e 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -15,6 +15,7 @@ mod binary_string; mod distribution; mod key_image_spent_status; +#[allow(clippy::module_inception)] mod misc; mod pool_info_extent; mod status; diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs index e643076d..5151ceef 100644 --- a/rpc/types/src/misc/tx_entry.rs +++ b/rpc/types/src/misc/tx_entry.rs @@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ - epee_object, error, + error, macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, Marker, + EpeeObject, EpeeObjectBuilder, }; //---------------------------------------------------------------------------------------------------- TxEntry @@ -123,7 +123,7 @@ impl Default for TxEntry { //---------------------------------------------------------------------------------------------------- Epee #[cfg(feature = "epee")] impl EpeeObjectBuilder for () { - fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + fn add_field(&mut self, _: &str, _: &mut B) -> error::Result { unreachable!() } @@ -140,7 +140,7 @@ impl EpeeObject for TxEntry { unreachable!() } - fn write_fields(self, w: &mut B) -> error::Result<()> { + fn write_fields(self, _: &mut B) -> error::Result<()> { unreachable!() } } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 9457250f..28c95d2a 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -11,10 +11,9 @@ use crate::{ defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ - GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, - TxEntry, TxInfo, TxpoolStats, + GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, + TxpoolStats, }, - rpc_call::RpcCall, RpcCallValue, }; @@ -191,7 +190,7 @@ define_request_and_response! { } )] AccessResponseBase { - /// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form. + /// FIXME: These are [`KeyImageSpentStatus`](crate::misc::KeyImageSpentStatus) in [`u8`] form. spent_status: Vec, } } diff --git a/rpc/types/src/serde.rs b/rpc/types/src/serde.rs index 70885e09..e624a66d 100644 --- a/rpc/types/src/serde.rs +++ b/rpc/types/src/serde.rs @@ -28,5 +28,5 @@ where //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { - use super::*; + // use super::*; } diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index f45f1bcb..e0399033 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -26,8 +26,8 @@ cuprate-database-service = { path = "../service" } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } -bitflags = { workspace = true, features = ["serde", "bytemuck"] } -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } +bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } @@ -39,7 +39,7 @@ thread_local = { workspace = true, optional = true } rayon = { workspace = true, optional = true } [dev-dependencies] -cuprate-helper = { path = "../../helper", features = ["thread"] } +cuprate-helper = { path = "../../helper", features = ["thread", "cast"] } cuprate-test-utils = { path = "../../test-utils" } tokio = { workspace = true, features = ["full"] } @@ -48,3 +48,6 @@ pretty_assertions = { workspace = true } proptest = { workspace = true } hex = { workspace = true } hex-literal = { workspace = true } + +[lints] +workspace = true diff --git a/storage/blockchain/src/config/config.rs b/storage/blockchain/src/config.rs similarity index 75% rename from storage/blockchain/src/config/config.rs rename to storage/blockchain/src/config.rs index 5bfbf744..e4b76068 100644 --- a/storage/blockchain/src/config/config.rs +++ b/storage/blockchain/src/config.rs @@ -1,4 +1,44 @@ -//! The main [`Config`] struct, holding all configurable values. +//! Database configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](cuprate_database::Env)ironment, +//! and blockchain-specific configuration. +//! +//! It also contains types related to configuration settings. +//! +//! The main constructor is the [`ConfigBuilder`]. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior based +//! on these values. +//! +//! # Example +//! ```rust +//! use cuprate_blockchain::{ +//! cuprate_database::{Env, config::SyncMode}, +//! config::{ConfigBuilder, ReaderThreads}, +//! }; +//! +//! # fn main() -> Result<(), Box> { +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! +//! let config = ConfigBuilder::new() +//! // Use a custom database directory. +//! .db_directory(db_dir.into()) +//! // Use as many reader threads as possible (when using `service`). +//! .reader_threads(ReaderThreads::OnePerThread) +//! // Use the fastest sync mode. +//! .sync_mode(SyncMode::Fast) +//! // Build into `Config` +//! .build(); +//! +//! // Start a database `service` using this configuration. +//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?; +//! // It's using the config we provided. +//! assert_eq!(env.config(), &config.db_config); +//! # Ok(()) } +//! ``` //---------------------------------------------------------------------------------------------------- Import use std::{borrow::Cow, path::Path}; @@ -7,7 +47,7 @@ use std::{borrow::Cow, path::Path}; use serde::{Deserialize, Serialize}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; -use cuprate_helper::fs::cuprate_blockchain_dir; +use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR; // re-exports pub use cuprate_database_service::ReaderThreads; @@ -38,7 +78,7 @@ impl ConfigBuilder { Self { db_directory: None, db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - cuprate_blockchain_dir(), + &*CUPRATE_BLOCKCHAIN_DIR, )), reader_threads: None, } @@ -48,7 +88,7 @@ impl ConfigBuilder { /// /// # Default values /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`cuprate_blockchain_dir`] will be used. + /// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { @@ -56,7 +96,7 @@ impl ConfigBuilder { // in `helper::fs`. No need to do them here. let db_directory = self .db_directory - .unwrap_or_else(|| Cow::Borrowed(cuprate_blockchain_dir())); + .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)); let reader_threads = self.reader_threads.unwrap_or_default(); let db_config = self @@ -106,7 +146,7 @@ impl ConfigBuilder { #[must_use] pub fn fast(mut self) -> Self { self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) .fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); @@ -120,7 +160,7 @@ impl ConfigBuilder { #[must_use] pub fn low_power(mut self) -> Self { self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) .low_power(); self.reader_threads = Some(ReaderThreads::One); @@ -130,7 +170,7 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(cuprate_blockchain_dir()); + let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR); Self { db_directory: Some(db_directory.clone()), db_config: cuprate_database::config::ConfigBuilder::new(db_directory), @@ -161,7 +201,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`cuprate_database::config::Config::db_directory`] - /// will be set to [`cuprate_blockchain_dir`]. + /// will be set to [`CUPRATE_BLOCKCHAIN_DIR`]. /// /// All other values will be [`Default::default`]. /// @@ -179,8 +219,8 @@ impl Config { /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), cuprate_blockchain_dir()); - /// assert!(config.db_config.db_file().starts_with(cuprate_blockchain_dir())); + /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); diff --git a/storage/blockchain/src/config/mod.rs b/storage/blockchain/src/config/mod.rs deleted file mode 100644 index 555a6e6e..00000000 --- a/storage/blockchain/src/config/mod.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! Database configuration. -//! -//! This module contains the main [`Config`]uration struct -//! for the database [`Env`](cuprate_database::Env)ironment, -//! and blockchain-specific configuration. -//! -//! It also contains types related to configuration settings. -//! -//! The main constructor is the [`ConfigBuilder`]. -//! -//! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior based -//! on these values. -//! -//! # Example -//! ```rust -//! use cuprate_blockchain::{ -//! cuprate_database::{Env, config::SyncMode}, -//! config::{ConfigBuilder, ReaderThreads}, -//! }; -//! -//! # fn main() -> Result<(), Box> { -//! let tmp_dir = tempfile::tempdir()?; -//! let db_dir = tmp_dir.path().to_owned(); -//! -//! let config = ConfigBuilder::new() -//! // Use a custom database directory. -//! .db_directory(db_dir.into()) -//! // Use as many reader threads as possible (when using `service`). -//! .reader_threads(ReaderThreads::OnePerThread) -//! // Use the fastest sync mode. -//! .sync_mode(SyncMode::Fast) -//! // Build into `Config` -//! .build(); -//! -//! // Start a database `service` using this configuration. -//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?; -//! // It's using the config we provided. -//! assert_eq!(env.config(), &config.db_config); -//! # Ok(()) } -//! ``` - -mod config; -pub use config::{Config, ConfigBuilder, ReaderThreads}; diff --git a/storage/blockchain/src/constants.rs b/storage/blockchain/src/constants.rs index 7f00d4cd..87268858 100644 --- a/storage/blockchain/src/constants.rs +++ b/storage/blockchain/src/constants.rs @@ -14,21 +14,6 @@ /// pub const DATABASE_VERSION: u64 = 0; -//---------------------------------------------------------------------------------------------------- Error Messages -/// Corrupt database error message. -/// -/// The error message shown to end-users in panic -/// messages if we think the database is corrupted. -/// -/// This is meant to be user-friendly. -pub const DATABASE_CORRUPT_MSG: &str = r"Cuprate has encountered a fatal error. The database may be corrupted. - -TODO: instructions on: -1. What to do -2. How to fix (re-sync, recover, etc) -3. General advice for preventing corruption -4. etc"; - //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test {} diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index 9db0862a..f66cd99b 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -1,103 +1,9 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_crate_dependencies, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - // unused_crate_dependencies, - ) -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) + // See `cuprate-database` for reasoning. + clippy::significant_drop_tightening )] + // Only allow building 64-bit targets. // // This allows us to assume 64-bit @@ -117,7 +23,7 @@ compile_error!("Cuprate is only compatible with 64-bit CPUs"); mod constants; mod free; -pub use constants::{DATABASE_CORRUPT_MSG, DATABASE_VERSION}; +pub use constants::DATABASE_VERSION; pub use cuprate_database; pub use free::open; diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index de955c85..d1b83a45 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -268,7 +268,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use super::*; @@ -292,9 +292,9 @@ mod test { assert_all_tables_are_empty(&env); let mut blocks = [ - block_v1_tx2().clone(), - block_v9_tx3().clone(), - block_v16_tx0().clone(), + BLOCK_V1_TX2.clone(), + BLOCK_V9_TX3.clone(), + BLOCK_V16_TX0.clone(), ]; // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. @@ -440,9 +440,9 @@ mod test { let tx_rw = env_inner.tx_rw().unwrap(); let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); - let mut block = block_v9_tx3().clone(); + let mut block = BLOCK_V9_TX3.clone(); - block.height = usize::try_from(u32::MAX).unwrap() + 1; + block.height = cuprate_helper::cast::u32_to_usize(u32::MAX) + 1; add_block(&block, &mut tables).unwrap(); } @@ -459,7 +459,7 @@ mod test { let tx_rw = env_inner.tx_rw().unwrap(); let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); - let mut block = block_v9_tx3().clone(); + let mut block = BLOCK_V9_TX3.clone(); // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. block.height = 0; diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 65d9ca26..ed368ade 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -84,7 +84,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use super::*; @@ -108,9 +108,9 @@ mod test { assert_all_tables_are_empty(&env); let mut blocks = [ - block_v1_tx2().clone(), - block_v9_tx3().clone(), - block_v16_tx0().clone(), + BLOCK_V1_TX2.clone(), + BLOCK_V9_TX3.clone(), + BLOCK_V16_TX0.clone(), ]; let blocks_len = blocks.len(); diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 2699fc82..4ff7dff1 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -54,7 +54,7 @@ //! ```rust //! use hex_literal::hex; //! -//! use cuprate_test_utils::data::block_v16_tx0; +//! use cuprate_test_utils::data::BLOCK_V16_TX0; //! use cuprate_blockchain::{ //! cuprate_database::{ //! ConcreteEnv, @@ -83,7 +83,7 @@ //! let mut tables = env_inner.open_tables_mut(&tx_rw)?; //! //! // Write a block to the database. -//! let mut block = block_v16_tx0().clone(); +//! let mut block = BLOCK_V16_TX0.clone(); //! # block.height = 0; //! add_block(&block, &mut tables)?; //! diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index 7d608ca0..e7dbdcf6 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -322,7 +322,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; + use cuprate_test_utils::data::{TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3}; use crate::{ tables::{OpenTables, Tables}, @@ -337,7 +337,7 @@ mod test { assert_all_tables_are_empty(&env); // Monero `Transaction`, not database tx. - let txs = [tx_v1_sig0(), tx_v1_sig2(), tx_v2_rct3()]; + let txs = [&*TX_V1_SIG0, &*TX_V1_SIG2, &*TX_V2_RCT3]; // Add transactions. let tx_ids = { diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index e748bbbe..2e7c9086 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -37,8 +37,8 @@ pub fn init( let db = Arc::new(crate::open(config)?); // Spawn the Reader thread pool and Writer. - let readers = init_read_service(db.clone(), reader_threads); - let writer = init_write_service(db.clone()); + let readers = init_read_service(Arc::clone(&db), reader_threads); + let writer = init_write_service(Arc::clone(&db)); Ok((readers, writer, db)) } diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 3331a557..c774ee49 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -66,7 +66,7 @@ //! use tower::{Service, ServiceExt}; //! //! use cuprate_types::{blockchain::{BlockchainReadRequest, BlockchainWriteRequest, BlockchainResponse}, Chain}; -//! use cuprate_test_utils::data::block_v16_tx0; +//! use cuprate_test_utils::data::BLOCK_V16_TX0; //! //! use cuprate_blockchain::{ //! cuprate_database::Env, @@ -86,7 +86,7 @@ //! let (mut read_handle, mut write_handle, _) = cuprate_blockchain::service::init(config)?; //! //! // Prepare a request to write block. -//! let mut block = block_v16_tx0().clone(); +//! let mut block = BLOCK_V16_TX0.clone(); //! # block.height = 0_usize; // must be 0th height or panic in `add_block()` //! let request = BlockchainWriteRequest::WriteBlock(block); //! diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 8d817bb0..b68b5444 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -16,7 +16,7 @@ use pretty_assertions::assert_eq; use tower::{Service, ServiceExt}; use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; -use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; +use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, Chain, OutputOnChain, VerifiedBlockInformation, @@ -61,7 +61,7 @@ fn init_service() -> ( #[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime async fn test_template( // Which block(s) to add? - block_fns: &[fn() -> &'static VerifiedBlockInformation], + blocks: &[&VerifiedBlockInformation], // Total amount of generated coins after the block(s) have been added. cumulative_generated_coins: u64, // What are the table lengths be after the block(s) have been added? @@ -76,8 +76,8 @@ async fn test_template( // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. - for (i, block_fn) in block_fns.iter().enumerate() { - let mut block = block_fn().clone(); + for (i, block) in blocks.iter().enumerate() { + let mut block = (*block).clone(); block.height = i; // Request a block to be written, assert it was written. @@ -104,7 +104,7 @@ async fn test_template( get_block_extended_header_from_height(&0, &tables).unwrap(), )); - let extended_block_header_1 = if block_fns.len() > 1 { + let extended_block_header_1 = if blocks.len() > 1 { Ok(BlockchainResponse::BlockExtendedHeader( get_block_extended_header_from_height(&1, &tables).unwrap(), )) @@ -116,7 +116,7 @@ async fn test_template( get_block_info(&0, tables.block_infos()).unwrap().block_hash, )); - let block_hash_1 = if block_fns.len() > 1 { + let block_hash_1 = if blocks.len() > 1 { Ok(BlockchainResponse::BlockHash( get_block_info(&1, tables.block_infos()).unwrap().block_hash, )) @@ -128,7 +128,7 @@ async fn test_template( get_block_extended_header_from_height(&0, &tables).unwrap(), ])); - let range_0_2 = if block_fns.len() >= 2 { + let range_0_2 = if blocks.len() >= 2 { Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec![ get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&1, &tables).unwrap(), @@ -304,8 +304,9 @@ async fn test_template( // Assert we get back the same map of // `Amount`'s and `AmountIndex`'s. let mut response_output_count = 0; + #[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test for (amount, output_map) in response { - let amount_index_set = map.get(&amount).unwrap(); + let amount_index_set = &map[&amount]; for (amount_index, output) in output_map { response_output_count += 1; @@ -333,7 +334,7 @@ fn init_drop() { #[tokio::test] async fn v1_tx2() { test_template( - &[block_v1_tx2], + &[&*BLOCK_V1_TX2], 14_535_350_982_449, AssertTableLen { block_infos: 1, @@ -359,7 +360,7 @@ async fn v1_tx2() { #[tokio::test] async fn v9_tx3() { test_template( - &[block_v9_tx3], + &[&*BLOCK_V9_TX3], 3_403_774_022_163, AssertTableLen { block_infos: 1, @@ -385,7 +386,7 @@ async fn v9_tx3() { #[tokio::test] async fn v16_tx0() { test_template( - &[block_v16_tx0], + &[&*BLOCK_V16_TX0], 600_000_000_000, AssertTableLen { block_infos: 1, diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index caac7873..122ac31b 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -28,7 +28,6 @@ use crate::types::{ // - Tables are defined in plural to avoid name conflicts with types // - If adding/changing a table also edit: // - the tests in `src/backend/tests.rs` -// - `call_fn_on_all_tables_or_early_return!()` macro in `src/open_tables.rs` cuprate_database::define_tables! { /// Serialized block blobs (bytes). /// diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index 9abd1750..eb1dc647 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -1,4 +1,4 @@ -//! Database [table](crate::tables) types. +//! Blockchain [table](crate::tables) types. //! //! This module contains all types used by the database tables, //! and aliases for common Monero-related types that use the diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index a70457f5..0ef4a97d 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -32,4 +32,7 @@ serde = { workspace = true, optional = true } [dev-dependencies] bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } page_size = { version = "0.6.0" } -tempfile = { version = "3.10.0" } \ No newline at end of file +tempfile = { version = "3.10.0" } + +[lints] +workspace = true \ No newline at end of file diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 0c2847fb..8c71e617 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -70,7 +70,7 @@ impl Drop for ConcreteEnv { // We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)` // to clear the no sync and async flags such that the below `self.sync()` // _actually_ synchronously syncs. - if let Err(_e) = crate::Env::sync(self) { + if let Err(_e) = Env::sync(self) { // TODO: log error? } diff --git a/storage/database/src/backend/heed/storable.rs b/storage/database/src/backend/heed/storable.rs index 3566e88f..da0e0cb5 100644 --- a/storage/database/src/backend/heed/storable.rs +++ b/storage/database/src/backend/heed/storable.rs @@ -78,8 +78,8 @@ mod test { println!("left: {left:?}, right: {right:?}, expected: {expected:?}"); assert_eq!( as heed::Comparator>::compare( - & as heed::BytesEncode>::bytes_encode(&left).unwrap(), - & as heed::BytesEncode>::bytes_encode(&right).unwrap() + & as BytesEncode>::bytes_encode(&left).unwrap(), + & as BytesEncode>::bytes_encode(&right).unwrap() ), expected ); diff --git a/storage/database/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs index cd9a0be9..dafb2417 100644 --- a/storage/database/src/backend/redb/database.rs +++ b/storage/database/src/backend/redb/database.rs @@ -23,7 +23,7 @@ use crate::{ /// Shared [`DatabaseRo::get()`]. #[inline] fn get( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, key: &T::Key, ) -> Result { Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) @@ -32,7 +32,7 @@ fn get( /// Shared [`DatabaseRo::len()`]. #[inline] fn len( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result { Ok(db.len()?) } @@ -40,7 +40,7 @@ fn len( /// Shared [`DatabaseRo::first()`]. #[inline] fn first( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result<(T::Key, T::Value), RuntimeError> { let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) @@ -49,7 +49,7 @@ fn first( /// Shared [`DatabaseRo::last()`]. #[inline] fn last( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result<(T::Key, T::Value), RuntimeError> { let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) @@ -58,7 +58,7 @@ fn last( /// Shared [`DatabaseRo::is_empty()`]. #[inline] fn is_empty( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result { Ok(db.is_empty()?) } diff --git a/storage/database/src/backend/tests.rs b/storage/database/src/backend/tests.rs index ac6b5927..e219c42f 100644 --- a/storage/database/src/backend/tests.rs +++ b/storage/database/src/backend/tests.rs @@ -126,7 +126,7 @@ fn resize() { let (env, _tempdir) = tmp_concrete_env(); // Resize by the OS page size. - let page_size = crate::resize::page_size(); + let page_size = *crate::resize::PAGE_SIZE; let old_size = env.current_map_size(); env.resize_map(Some(ResizeAlgorithm::FixedBytes(page_size))); diff --git a/storage/database/src/config/mod.rs b/storage/database/src/config/mod.rs index 19a324e1..c6ed0c01 100644 --- a/storage/database/src/config/mod.rs +++ b/storage/database/src/config/mod.rs @@ -33,6 +33,7 @@ //! # Ok(()) } //! ``` +#[allow(clippy::module_inception)] mod config; pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index cae49733..82944434 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -163,7 +163,7 @@ pub trait Env: Sized { // We have the direct PATH to the file, // no need to use backend-specific functions. // - // SAFETY: as we are only accessing the metadata of + // INVARIANT: as we are only accessing the metadata of // the file and not reading the bytes, it should be // fine even with a memory mapped file being actively // written to. diff --git a/storage/database/src/key.rs b/storage/database/src/key.rs index 3273d4ed..2f3855a4 100644 --- a/storage/database/src/key.rs +++ b/storage/database/src/key.rs @@ -163,11 +163,11 @@ impl KeyCompare { #[inline] pub const fn as_compare_fn(self) -> fn(&[u8], &[u8]) -> Ordering { match self { - Self::Default => std::cmp::Ord::cmp, + Self::Default => Ord::cmp, Self::Number => |left, right| { let left = ::from_bytes(left); let right = ::from_bytes(right); - std::cmp::Ord::cmp(&left, &right) + Ord::cmp(&left, &right) }, Self::Custom(f) => f, } diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index da36b0d5..45bfc53c 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -1,94 +1,18 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_crate_dependencies, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, - - // unused_crate_dependencies, // false-positive with `paste` -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - // unused_crate_dependencies, - ) + // This lint is allowed because the following + // code exists a lot in this crate: + // + // ```rust + // let env_inner = env.env_inner(); + // let tx_rw = env_inner.tx_rw()?; + // OpenTables::create_tables(&env_inner, &tx_rw)?; + // ``` + // + // Rust thinks `env_inner` can be dropped earlier + // but it cannot, we need it for the lifetime of + // the database transaction + tables. + clippy::significant_drop_tightening )] // Allow some lints in tests. #![cfg_attr( diff --git a/storage/database/src/resize.rs b/storage/database/src/resize.rs index 99d6d7e3..6ef9974a 100644 --- a/storage/database/src/resize.rs +++ b/storage/database/src/resize.rs @@ -10,7 +10,7 @@ //! //! # Page size //! All free functions in this module will -//! return a multiple of the OS page size ([`page_size()`]), +//! return a multiple of the OS page size ([`PAGE_SIZE`]), //! [LMDB will error](http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5) //! if this is not the case. //! @@ -18,10 +18,10 @@ //! All returned [`NonZeroUsize`] values of the free functions in this module //! (including [`ResizeAlgorithm::resize`]) uphold the following invariants: //! 1. It will always be `>=` the input `current_size_bytes` -//! 2. It will always be a multiple of [`page_size()`] +//! 2. It will always be a multiple of [`PAGE_SIZE`] //---------------------------------------------------------------------------------------------------- Import -use std::{num::NonZeroUsize, sync::OnceLock}; +use std::{num::NonZeroUsize, sync::LazyLock}; //---------------------------------------------------------------------------------------------------- ResizeAlgorithm /// The function/algorithm used by the @@ -85,21 +85,14 @@ impl Default for ResizeAlgorithm { } //---------------------------------------------------------------------------------------------------- Free functions -/// This function retrieves the system’s memory page size. +/// This retrieves the system’s memory page size. /// /// It is just [`page_size::get`](https://docs.rs/page_size) internally. /// -/// This caches the result, so this function is cheap after the 1st call. -/// /// # Panics -/// This function will panic if the OS returns of page size of `0` (impossible?). -#[inline] -pub fn page_size() -> NonZeroUsize { - /// Cached result of [`page_size()`]. - static PAGE_SIZE: OnceLock = OnceLock::new(); - *PAGE_SIZE - .get_or_init(|| NonZeroUsize::new(page_size::get()).expect("page_size::get() returned 0")) -} +/// Accessing this [`LazyLock`] will panic if the OS returns of page size of `0` (impossible?). +pub static PAGE_SIZE: LazyLock = + LazyLock::new(|| NonZeroUsize::new(page_size::get()).expect("page_size::get() returned 0")); /// Memory map resize closely matching `monerod`. /// @@ -122,7 +115,7 @@ pub fn page_size() -> NonZeroUsize { /// assert_eq!(monero(0).get(), N); /// /// // Rounds up to nearest OS page size. -/// assert_eq!(monero(1).get(), N + page_size().get()); +/// assert_eq!(monero(1).get(), N + PAGE_SIZE.get()); /// ``` /// /// # Panics @@ -143,7 +136,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// const ADD_SIZE: usize = 1_usize << 30; - let page_size = page_size().get(); + let page_size = PAGE_SIZE.get(); let new_size_bytes = current_size_bytes + ADD_SIZE; // Round up the new size to the @@ -167,7 +160,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// /// ```rust /// # use cuprate_database::resize::*; -/// let page_size: usize = page_size().get(); +/// let page_size: usize = PAGE_SIZE.get(); /// /// // Anything below the page size will round up to the page size. /// for i in 0..=page_size { @@ -190,7 +183,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// fixed_bytes(1, usize::MAX); /// ``` pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize { - let page_size = page_size(); + let page_size = *PAGE_SIZE; let new_size_bytes = current_size_bytes + add_bytes; // Guard against < page_size. @@ -222,7 +215,7 @@ pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize /// /// ```rust /// # use cuprate_database::resize::*; -/// let page_size: usize = page_size().get(); +/// let page_size: usize = PAGE_SIZE.get(); /// /// // Anything below the page size will round up to the page size. /// for i in 0..=page_size { @@ -265,7 +258,7 @@ pub fn percent(current_size_bytes: usize, percent: f32) -> NonZeroUsize { _ => 1.0, }; - let page_size = page_size(); + let page_size = *PAGE_SIZE; // INVARIANT: Allow `f32` <-> `usize` casting, we handle all cases. #[allow( diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index 536d445a..70211d9e 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -4,12 +4,43 @@ version = "0.0.0" edition = "2021" description = "Cuprate's transaction pool database" license = "MIT" -authors = ["hinto-janai"] -repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-txpool" +authors = ["Boog900"] +repository = "https://github.com/Cuprate/cuprate/tree/main/storage/txpool" keywords = ["cuprate", "txpool", "transaction", "pool", "database"] [features] +default = ["heed", "service"] +# default = ["redb", "service"] +# default = ["redb-memory", "service"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memory = ["cuprate-database/redb-memory"] +service = ["dep:tower", "dep:rayon", "dep:cuprate-database-service"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] +cuprate-database = { path = "../database", features = ["heed"] } +cuprate-database-service = { path = "../service", optional = true } +cuprate-types = { path = "../../types" } +cuprate-helper = { path = "../../helper", default-features = false, features = ["constants"] } + +monero-serai = { workspace = true, features = ["std"] } +bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } +thiserror = { workspace = true } +hex = { workspace = true } + +tower = { workspace = true, optional = true } +rayon = { workspace = true, optional = true } + +serde = { workspace = true, optional = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } + +tokio = { workspace = true } +tempfile = { workspace = true } +hex-literal = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/storage/txpool/README.md b/storage/txpool/README.md new file mode 100644 index 00000000..80d3b25b --- /dev/null +++ b/storage/txpool/README.md @@ -0,0 +1,114 @@ +Cuprate's tx-pool database. + +This documentation is mostly for practical usage of `cuprate_txpool`. + +For a high-level overview, see the database section in +[Cuprate's architecture book](https://architecture.cuprate.org). + +If you're looking for a database crate, consider using the lower-level +[`cuprate-database`](https://doc.cuprate.org/cuprate_database) +crate that this crate is built on-top of. + +# Purpose + +This crate does 3 things: + +1. Uses [`cuprate_database`] as a base database layer +1. Implements various transaction pool related [operations](ops), [tables], and [types] +1. Exposes a [`tower::Service`] backed by a thread-pool + +Each layer builds on-top of the previous. + +As a user of `cuprate_txpool`, consider using the higher-level [`service`] module, +or at the very least the [`ops`] module instead of interacting with the `cuprate_database` traits directly. + +# `cuprate_database` + +Consider reading `cuprate_database`'s crate documentation before this crate, as it is the first layer. + +If/when this crate needs is used, be sure to use the version that this crate re-exports, e.g.: + +```rust +use cuprate_txpool::{ + cuprate_database::RuntimeError, +}; +``` + +This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_txpool` internally. + +# Feature flags + +The `service` module requires the `service` feature to be enabled. +See the module for more documentation. + +Different database backends are enabled by the feature flags: + +- `heed` (LMDB) +- `redb` + +The default is `heed`. + +`tracing` is always enabled and cannot be disabled via feature-flag. + + +# Invariants when not using `service` + +See [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain), the invariants are the same. + +# Examples + +The below is an example of using `cuprate_txpool`'s +lowest API, i.e. using a mix of this crate and `cuprate_database`'s traits directly - +**this is NOT recommended.** + +For examples of the higher-level APIs, see: + +- [`ops`] +- [`service`] + +```rust +use cuprate_txpool::{ + cuprate_database::{ + ConcreteEnv, + Env, EnvInner, + DatabaseRo, DatabaseRw, TxRo, TxRw, + }, + config::ConfigBuilder, + tables::{Tables, TablesMut, OpenTables}, +}; + +# fn main() -> Result<(), Box> { + // Create a configuration for the database environment. + let tmp_dir = tempfile::tempdir()?; + let db_dir = tmp_dir.path().to_owned(); + let config = ConfigBuilder::new() + .db_directory(db_dir.into()) + .build(); + + // Initialize the database environment. + let env = cuprate_txpool::open(config)?; + + // Open up a transaction + tables for writing. + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + let mut tables = env_inner.open_tables_mut(&tx_rw)?; + + // ⚠️ Write data to the tables directly. + // (not recommended, use `ops` or `service`). + const KEY_IMAGE: [u8; 32] = [88; 32]; + const TX_HASH: [u8; 32] = [88; 32]; + tables.spent_key_images_mut().put(&KEY_IMAGE, &TX_HASH)?; + + // Commit the data written. + drop(tables); + TxRw::commit(tx_rw)?; + + // Read the data, assert it is correct. + let tx_ro = env_inner.tx_ro()?; + let tables = env_inner.open_tables(&tx_ro)?; + let (key_image, tx_hash) = tables.spent_key_images().first()?; + assert_eq!(key_image, KEY_IMAGE); + assert_eq!(tx_hash, TX_HASH); + # Ok(()) +} +``` diff --git a/storage/txpool/src/config.rs b/storage/txpool/src/config.rs new file mode 100644 index 00000000..1ef0d734 --- /dev/null +++ b/storage/txpool/src/config.rs @@ -0,0 +1,232 @@ +//! The transaction pool [`Config`]. +use std::{borrow::Cow, path::Path}; + +use cuprate_database::{ + config::{Config as DbConfig, SyncMode}, + resize::ResizeAlgorithm, +}; +use cuprate_database_service::ReaderThreads; +use cuprate_helper::fs::CUPRATE_TXPOOL_DIR; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// The default transaction pool weight limit. +const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; + +//---------------------------------------------------------------------------------------------------- ConfigBuilder +/// Builder for [`Config`]. +/// +// SOMEDAY: there's are many more options to add in the future. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ConfigBuilder { + /// [`Config::db_directory`]. + db_directory: Option>, + + /// [`Config::cuprate_database_config`]. + db_config: cuprate_database::config::ConfigBuilder, + + /// [`Config::reader_threads`]. + reader_threads: Option, + + /// [`Config::max_txpool_weight`]. + max_txpool_weight: Option, +} + +impl ConfigBuilder { + /// Create a new [`ConfigBuilder`]. + /// + /// [`ConfigBuilder::build`] can be called immediately + /// after this function to use default values. + pub fn new() -> Self { + Self { + db_directory: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( + &*CUPRATE_TXPOOL_DIR, + )), + reader_threads: None, + max_txpool_weight: None, + } + } + + /// Build into a [`Config`]. + /// + /// # Default values + /// If [`ConfigBuilder::db_directory`] was not called, + /// the default [`CUPRATE_TXPOOL_DIR`] will be used. + /// + /// For all other values, [`Default::default`] is used. + pub fn build(self) -> Config { + // INVARIANT: all PATH safety checks are done + // in `helper::fs`. No need to do them here. + let db_directory = self + .db_directory + .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)); + + let reader_threads = self.reader_threads.unwrap_or_default(); + + let max_txpool_weight = self + .max_txpool_weight + .unwrap_or(DEFAULT_TXPOOL_WEIGHT_LIMIT); + + let db_config = self + .db_config + .db_directory(db_directory) + .reader_threads(reader_threads.as_threads()) + .build(); + + Config { + db_config, + reader_threads, + max_txpool_weight, + } + } + + /// Sets a new maximum weight for the transaction pool. + #[must_use] + pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self { + self.max_txpool_weight = Some(max_txpool_weight); + self + } + + /// Set a custom database directory (and file) [`Path`]. + #[must_use] + pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { + self.db_directory = Some(db_directory); + self + } + + /// Calls [`cuprate_database::config::ConfigBuilder::sync_mode`]. + #[must_use] + pub fn sync_mode(mut self, sync_mode: SyncMode) -> Self { + self.db_config = self.db_config.sync_mode(sync_mode); + self + } + + /// Calls [`cuprate_database::config::ConfigBuilder::resize_algorithm`]. + #[must_use] + pub fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self { + self.db_config = self.db_config.resize_algorithm(resize_algorithm); + self + } + + /// Set a custom [`ReaderThreads`]. + #[must_use] + pub const fn reader_threads(mut self, reader_threads: ReaderThreads) -> Self { + self.reader_threads = Some(reader_threads); + self + } + + /// Tune the [`ConfigBuilder`] for the highest performing, + /// but also most resource-intensive & maybe risky settings. + /// + /// Good default for testing, and resource-available machines. + #[must_use] + pub fn fast(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) + .fast(); + + self.reader_threads = Some(ReaderThreads::OnePerThread); + self + } + + /// Tune the [`ConfigBuilder`] for the lowest performing, + /// but also least resource-intensive settings. + /// + /// Good default for resource-limited machines, e.g. a cheap VPS. + #[must_use] + pub fn low_power(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) + .low_power(); + + self.reader_threads = Some(ReaderThreads::One); + self + } +} + +impl Default for ConfigBuilder { + fn default() -> Self { + let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path()); + Self { + db_directory: Some(db_directory.clone()), + db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + reader_threads: Some(ReaderThreads::default()), + max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT), + } + } +} + +//---------------------------------------------------------------------------------------------------- Config +/// `cuprate_txpool` configuration. +/// +/// This is a configuration built on-top of [`DbConfig`]. +/// +/// It contains configuration specific to this crate, plus the database config. +/// +/// For construction, either use [`ConfigBuilder`] or [`Config::default`]. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +pub struct Config { + /// The database configuration. + pub db_config: DbConfig, + + /// Database reader thread count. + pub reader_threads: ReaderThreads, + + /// The maximum weight of the transaction pool, after which we will start dropping transactions. + // TODO: enforce this max size. + pub max_txpool_weight: usize, +} + +impl Config { + /// Create a new [`Config`] with sane default settings. + /// + /// The [`DbConfig::db_directory`] + /// will be set to [`CUPRATE_TXPOOL_DIR`]. + /// + /// All other values will be [`Default::default`]. + /// + /// Same as [`Config::default`]. + /// + /// ```rust + /// use cuprate_database::{ + /// config::SyncMode, + /// resize::ResizeAlgorithm, + /// DATABASE_DATA_FILENAME, + /// }; + /// use cuprate_database_service::ReaderThreads; + /// use cuprate_helper::fs::*; + /// + /// use cuprate_txpool::Config; + /// + /// let config = Config::new(); + /// + /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR)); + /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); + /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); + /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); + /// assert_eq!(config.reader_threads, ReaderThreads::default()); + /// ``` + pub fn new() -> Self { + Self { + db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), + reader_threads: ReaderThreads::default(), + max_txpool_weight: 0, + } + } +} + +impl Default for Config { + /// Same as [`Config::new`]. + /// + /// ```rust + /// # use cuprate_txpool::Config; + /// assert_eq!(Config::default(), Config::new()); + /// ``` + fn default() -> Self { + Self::new() + } +} diff --git a/storage/txpool/src/free.rs b/storage/txpool/src/free.rs new file mode 100644 index 00000000..d394002b --- /dev/null +++ b/storage/txpool/src/free.rs @@ -0,0 +1,62 @@ +//! General free functions (related to the tx-pool database). + +//---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; + +use crate::{config::Config, tables::OpenTables}; + +//---------------------------------------------------------------------------------------------------- Free functions +/// Open the txpool database using the passed [`Config`]. +/// +/// This calls [`cuprate_database::Env::open`] and prepares the +/// database to be ready for txpool-related usage, e.g. +/// table creation, table sort order, etc. +/// +/// All tables found in [`crate::tables`] will be +/// ready for usage in the returned [`ConcreteEnv`]. +/// +/// # Errors +/// This will error if: +/// - The database file could not be opened +/// - A write transaction could not be opened +/// - A table could not be created/opened +#[cold] +#[inline(never)] // only called once +pub fn open(config: Config) -> Result { + // Attempt to open the database environment. + let env = ::open(config.db_config)?; + + /// Convert runtime errors to init errors. + /// + /// INVARIANT: + /// [`cuprate_database`]'s functions mostly return the former + /// so we must convert them. We have knowledge of which errors + /// makes sense in this functions context so we panic on + /// unexpected ones. + fn runtime_to_init_error(runtime: RuntimeError) -> InitError { + match runtime { + RuntimeError::Io(io_error) => io_error.into(), + + // These errors shouldn't be happening here. + RuntimeError::KeyExists + | RuntimeError::KeyNotFound + | RuntimeError::ResizeNeeded + | RuntimeError::TableNotFound => unreachable!(), + } + } + + // INVARIANT: We must ensure that all tables are created, + // `cuprate_database` has no way of knowing _which_ tables + // we want since it is agnostic, so we are responsible for this. + { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw().map_err(runtime_to_init_error)?; + + // Create all tables. + OpenTables::create_tables(&env_inner, &tx_rw).map_err(runtime_to_init_error)?; + + TxRw::commit(tx_rw).map_err(runtime_to_init_error)?; + } + + Ok(env) +} diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 8b137891..243dc4d9 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -1 +1,29 @@ +#![doc = include_str!("../README.md")] +#![allow( + // See `cuprate-database` for reasoning. + clippy::significant_drop_tightening +)] +pub mod config; +mod free; +pub mod ops; +#[cfg(feature = "service")] +pub mod service; +pub mod tables; +pub mod types; + +pub use config::Config; +pub use free::open; + +//re-exports +pub use cuprate_database; + +// TODO: remove when used. +use tower as _; +#[cfg(test)] +mod test { + use cuprate_test_utils as _; + use hex_literal as _; + use tempfile as _; + use tokio as _; +} diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs new file mode 100644 index 00000000..50d9ea4a --- /dev/null +++ b/storage/txpool/src/ops.rs @@ -0,0 +1,102 @@ +//! Abstracted Monero tx-pool database operations. +//! +//! This module contains many free functions that use the +//! traits in [`cuprate_database`] to generically call Monero-related +//! tx-pool database operations. +//! +//! # `impl Table` +//! Functions in this module take [`Tables`](crate::tables::Tables) and +//! [`TablesMut`](crate::tables::TablesMut) directly - these are +//! _already opened_ database tables. +//! +//! As such, the responsibility of +//! transactions, tables, etc, are on the caller. +//! +//! Notably, this means that these functions are as lean +//! as possible, so calling them in a loop should be okay. +//! +//! # Atomicity +//! As transactions are handled by the _caller_ of these functions, +//! it is up to the caller to decide what happens if one them return +//! an error. +//! +//! To maintain atomicity, transactions should be [`abort`](cuprate_database::TxRw::abort)ed +//! if one of the functions failed. +//! +//! For example, if [`add_transaction`] is called and returns an [`Err`], +//! `abort`ing the transaction that opened the input `TableMut` would reverse all tables +//! mutated by [`add_transaction`] up until the error, leaving it in the state it was in before +//! [`add_transaction`] was called. +//! +//! # Example +//! Simple usage of `ops`. +//! +//! ```rust +//! use hex_literal::hex; +//! +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! use cuprate_txpool::{ +//! cuprate_database::{ +//! ConcreteEnv, +//! Env, EnvInner, +//! DatabaseRo, DatabaseRw, TxRo, TxRw, +//! }, +//! config::ConfigBuilder, +//! tables::{Tables, TablesMut, OpenTables}, +//! ops::{add_transaction, get_transaction_verification_data}, +//! }; +//! +//! # fn main() -> Result<(), Box> { +//! // Create a configuration for the database environment. +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! let config = ConfigBuilder::new() +//! .db_directory(db_dir.into()) +//! .build(); +//! +//! // Initialize the database environment. +//! let env = cuprate_txpool::open(config)?; +//! +//! // Open up a transaction + tables for writing. +//! let env_inner = env.env_inner(); +//! let tx_rw = env_inner.tx_rw()?; +//! let mut tables = env_inner.open_tables_mut(&tx_rw)?; +//! +//! // Write a tx to the database. +//! let mut tx = TX_V1_SIG2.clone(); +//! let tx_hash = tx.tx_hash; +//! add_transaction(&tx.try_into().unwrap(), true, &mut tables)?; +//! +//! // Commit the data written. +//! drop(tables); +//! TxRw::commit(tx_rw)?; +//! +//! // Read the data, assert it is correct. +//! let tx_rw = env_inner.tx_rw()?; +//! let mut tables = env_inner.open_tables_mut(&tx_rw)?; +//! let tx = get_transaction_verification_data(&tx_hash, &mut tables)?; +//! +//! assert_eq!(tx.tx_hash, tx_hash); +//! assert_eq!(tx.tx, TX_V1_SIG2.tx); +//! # Ok(()) } +//! ``` + +mod key_images; +mod tx_read; +mod tx_write; + +pub use tx_read::get_transaction_verification_data; +pub use tx_write::{add_transaction, remove_transaction}; + +/// An error that can occur on some tx-write ops. +#[derive(thiserror::Error, Debug)] +pub enum TxPoolWriteError { + /// The transaction could not be added as it double spends another tx in the pool. + /// + /// The inner value is the hash of the transaction that was double spent. + #[error("Transaction doubles spent transaction already in the pool ({}).", hex::encode(.0))] + DoubleSpend(crate::types::TransactionHash), + /// A database error. + #[error("Database error: {0}")] + Database(#[from] cuprate_database::RuntimeError), +} diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs new file mode 100644 index 00000000..04aa1b44 --- /dev/null +++ b/storage/txpool/src/ops/key_images.rs @@ -0,0 +1,54 @@ +//! Tx-pool key image ops. +use monero_serai::transaction::Input; + +use cuprate_database::{DatabaseRw, RuntimeError}; + +use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash}; + +/// Adds the transaction key images to the [`SpentKeyImages`] table. +/// +/// This function will return an error if any of the key images are already spent. +/// +/// # Panics +/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] +pub(super) fn add_tx_key_images( + inputs: &[Input], + tx_hash: &TransactionHash, + kis_table: &mut impl DatabaseRw, +) -> Result<(), TxPoolWriteError> { + for ki in inputs.iter().map(ki_from_input) { + if let Ok(double_spend_tx_hash) = kis_table.get(&ki) { + return Err(TxPoolWriteError::DoubleSpend(double_spend_tx_hash)); + } + + kis_table.put(&ki, tx_hash)?; + } + + Ok(()) +} + +/// Removes key images from the [`SpentKeyImages`] table. +/// +/// # Panics +/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] +pub(super) fn remove_tx_key_images( + inputs: &[Input], + kis_table: &mut impl DatabaseRw, +) -> Result<(), RuntimeError> { + for ki in inputs.iter().map(ki_from_input) { + kis_table.delete(&ki)?; + } + + Ok(()) +} + +/// Maps an input to a key image. +/// +/// # Panics +/// This function will panic if the [`Input`] is not [`Input::ToKey`] +fn ki_from_input(input: &Input) -> [u8; 32] { + match input { + Input::ToKey { key_image, .. } => key_image.compress().0, + Input::Gen(_) => panic!("miner tx cannot be added to the txpool"), + } +} diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs new file mode 100644 index 00000000..db894151 --- /dev/null +++ b/storage/txpool/src/ops/tx_read.rs @@ -0,0 +1,36 @@ +//! Transaction read ops. +//! +//! This module handles reading full transaction data, like getting a transaction from the pool. +use std::sync::Mutex; + +use monero_serai::transaction::Transaction; + +use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_types::{TransactionVerificationData, TxVersion}; + +use crate::{tables::Tables, types::TransactionHash}; + +/// Gets the [`TransactionVerificationData`] of a transaction in the tx-pool, leaving the tx in the pool. +pub fn get_transaction_verification_data( + tx_hash: &TransactionHash, + tables: &impl Tables, +) -> Result { + let tx_blob = tables.transaction_blobs().get(tx_hash)?.0; + + let tx_info = tables.transaction_infos().get(tx_hash)?; + + let cached_verification_state = tables.cached_verification_state().get(tx_hash)?.into(); + + let tx = + Transaction::read(&mut tx_blob.as_slice()).expect("Tx in the tx-pool must be parseable"); + + Ok(TransactionVerificationData { + version: TxVersion::from_raw(tx.version()).expect("Tx in tx-pool has invalid version"), + tx, + tx_blob, + tx_weight: tx_info.weight, + fee: tx_info.fee, + tx_hash: *tx_hash, + cached_verification_state: Mutex::new(cached_verification_state), + }) +} diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs new file mode 100644 index 00000000..9885b9c5 --- /dev/null +++ b/storage/txpool/src/ops/tx_write.rs @@ -0,0 +1,83 @@ +//! Transaction writing ops. +//! +//! This module handles writing full transaction data, like removing or adding a transaction. +use bytemuck::TransparentWrapper; +use monero_serai::transaction::{NotPruned, Transaction}; + +use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; +use cuprate_types::TransactionVerificationData; + +use crate::{ + ops::{ + key_images::{add_tx_key_images, remove_tx_key_images}, + TxPoolWriteError, + }, + tables::TablesMut, + types::{TransactionHash, TransactionInfo, TxStateFlags}, +}; + +/// Adds a transaction to the tx-pool. +/// +/// This function fills in all tables necessary to add the transaction to the pool. +/// +/// # Panics +/// This function will panic if the transactions inputs are not all of type [`Input::ToKey`](monero_serai::transaction::Input::ToKey). +pub fn add_transaction( + tx: &TransactionVerificationData, + state_stem: bool, + tables: &mut impl TablesMut, +) -> Result<(), TxPoolWriteError> { + // Add the tx blob to table 0. + tables + .transaction_blobs_mut() + .put(&tx.tx_hash, StorableVec::wrap_ref(&tx.tx_blob))?; + + let mut flags = TxStateFlags::empty(); + flags.set(TxStateFlags::STATE_STEM, state_stem); + + // Add the tx info to table 1. + tables.transaction_infos_mut().put( + &tx.tx_hash, + &TransactionInfo { + fee: tx.fee, + weight: tx.tx_weight, + flags, + _padding: [0; 7], + }, + )?; + + // Add the cached verification state to table 2. + let cached_verification_state = (*tx.cached_verification_state.lock().unwrap()).into(); + tables + .cached_verification_state_mut() + .put(&tx.tx_hash, &cached_verification_state)?; + + // Add the tx key images to table 3. + let kis_table = tables.spent_key_images_mut(); + add_tx_key_images(&tx.tx.prefix().inputs, &tx.tx_hash, kis_table)?; + + Ok(()) +} + +/// Removes a transaction from the transaction pool. +pub fn remove_transaction( + tx_hash: &TransactionHash, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + // Remove the tx blob from table 0. + let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0; + + // Remove the tx info from table 1. + tables.transaction_infos_mut().delete(tx_hash)?; + + // Remove the cached verification state from table 2. + tables.cached_verification_state_mut().delete(tx_hash)?; + + // Remove the tx key images from table 3. + let tx = Transaction::::read(&mut tx_blob.as_slice()) + .expect("Tx in the tx-pool must be parseable"); + let kis_table = tables.spent_key_images_mut(); + remove_tx_key_images(&tx.prefix().inputs, kis_table)?; + + Ok(()) +} diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs new file mode 100644 index 00000000..d87adcea --- /dev/null +++ b/storage/txpool/src/service.rs @@ -0,0 +1,136 @@ +//! [`tower::Service`] integeration + thread-pool. +//! +//! ## `service` +//! The `service` module implements the [`tower`] integration, +//! along with the reader/writer thread-pool system. +//! +//! The thread-pool allows outside crates to communicate with it by +//! sending database [`Request`][req_r]s and receiving [`Response`][resp]s `async`hronously - +//! without having to actually worry and handle the database themselves. +//! +//! The system is managed by this crate, and only requires [`init`] by the user. +//! +//! This module must be enabled with the `service` feature. +//! +//! ## Handles +//! The 2 handles to the database are: +//! - [`TxpoolReadHandle`] +//! - [`TxpoolWriteHandle`] +//! +//! The 1st allows any caller to send [`ReadRequest`][req_r]s. +//! +//! The 2nd allows any caller to send [`WriteRequest`][req_w]s. +//! +//! The `DatabaseReadHandle` can be shared as it is cheaply [`Clone`]able, however, +//! the `DatabaseWriteHandle` cannot be cloned. There is only 1 place in Cuprate that +//! writes, so it is passed there and used. +//! +//! ## Initialization +//! The database & thread-pool system can be initialized with [`init()`]. +//! +//! This causes the underlying database/threads to be setup +//! and returns a read/write handle to that database. +//! +//! ## Shutdown +//! Upon the above handles being dropped, the corresponding thread(s) will automatically exit, i.e: +//! - The last [`TxpoolReadHandle`] is dropped => reader thread-pool exits +//! - The last [`TxpoolWriteHandle`] is dropped => writer thread exits +//! +//! Upon dropping the [`cuprate_database::Env`]: +//! - All un-processed database transactions are completed +//! - All data gets flushed to disk (caused by [`Drop::drop`] impl on `Env`) +//! +//! ## Request and Response +//! To interact with the database (whether reading or writing data), +//! a `Request` can be sent using one of the above handles. +//! +//! Both the handles implement `tower::Service`, so they can be [`tower::Service::call`]ed. +//! +//! An `async`hronous channel will be returned from the call. +//! This channel can be `.await`ed upon to (eventually) receive +//! the corresponding `Response` to your `Request`. +//! +//! [req_r]: interface::TxpoolReadRequest +//! +//! [req_w]: interface::TxpoolWriteRequest +//! +//! // TODO: we have 2 responses +//! +//! [resp]: interface::TxpoolWriteResponse +//! +//! # Example +//! Simple usage of `service`. +//! +//! ```rust +//! use std::sync::Arc; +//! +//! use hex_literal::hex; +//! use tower::{Service, ServiceExt}; +//! +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! +//! use cuprate_txpool::{ +//! cuprate_database::Env, +//! config::ConfigBuilder, +//! service::interface::{ +//! TxpoolWriteRequest, +//! TxpoolWriteResponse, +//! TxpoolReadRequest, +//! TxpoolReadResponse +//! } +//! }; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Create a configuration for the database environment. +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! let config = ConfigBuilder::new() +//! .db_directory(db_dir.into()) +//! .build(); +//! +//! // Initialize the database thread-pool. +//! let (mut read_handle, mut write_handle, _) = cuprate_txpool::service::init(config)?; +//! +//! // Prepare a request to write block. +//! let tx = TX_V1_SIG2.clone(); +//! let request = TxpoolWriteRequest::AddTransaction { +//! tx: Arc::new(tx.try_into().unwrap()), +//! state_stem: false, +//! }; +//! +//! // Send the request. +//! // We receive back an `async` channel that will +//! // eventually yield the result when `service` +//! // is done writing the tx. +//! let response_channel = write_handle.ready().await?.call(request); +//! +//! // Block write was OK. +//! let TxpoolWriteResponse::AddTransaction(double_spent) = response_channel.await? else { +//! panic!("tx-pool returned wrong response!"); +//! }; +//! assert!(double_spent.is_none()); +//! +//! // Now, let's try getting the block hash +//! // of the block we just wrote. +//! let request = TxpoolReadRequest::TxBlob(TX_V1_SIG2.tx_hash); +//! let response_channel = read_handle.ready().await?.call(request); +//! let response = response_channel.await?; +//! +//! // This causes the writer thread on the +//! // other side of this handle to exit... +//! drop(write_handle); +//! // ...and this causes the reader thread-pool to exit. +//! drop(read_handle); +//! # Ok(()) } +//! ``` + +mod free; +pub mod interface; +mod read; +mod types; +mod write; + +pub use free::init; +pub use types::{TxpoolReadHandle, TxpoolWriteHandle}; diff --git a/storage/txpool/src/service/free.rs b/storage/txpool/src/service/free.rs new file mode 100644 index 00000000..003da552 --- /dev/null +++ b/storage/txpool/src/service/free.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use cuprate_database::{ConcreteEnv, InitError}; + +use crate::{ + service::{ + read::init_read_service, + types::{TxpoolReadHandle, TxpoolWriteHandle}, + write::init_write_service, + }, + Config, +}; + +//---------------------------------------------------------------------------------------------------- Init +#[cold] +#[inline(never)] // Only called once (?) +/// Initialize a database & thread-pool, and return a read/write handle to it. +/// +/// Once the returned handles are [`Drop::drop`]ed, the reader +/// thread-pool and writer thread will exit automatically. +/// +/// # Errors +/// This will forward the error if [`crate::open`] failed. +pub fn init( + config: Config, +) -> Result<(TxpoolReadHandle, TxpoolWriteHandle, Arc), InitError> { + let reader_threads = config.reader_threads; + + // Initialize the database itself. + let db = Arc::new(crate::open(config)?); + + // Spawn the Reader thread pool and Writer. + let readers = init_read_service(Arc::clone(&db), reader_threads); + let writer = init_write_service(Arc::clone(&db)); + + Ok((readers, writer, db)) +} diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs new file mode 100644 index 00000000..93235c00 --- /dev/null +++ b/storage/txpool/src/service/interface.rs @@ -0,0 +1,59 @@ +//! Tx-pool [`service`](super) interface. +//! +//! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums. +use std::sync::Arc; + +use cuprate_types::TransactionVerificationData; + +use crate::types::TransactionHash; + +//---------------------------------------------------------------------------------------------------- TxpoolReadRequest +/// The transaction pool [`tower::Service`] read request type. +pub enum TxpoolReadRequest { + /// A request for the blob (raw bytes) of a transaction with the given hash. + TxBlob(TransactionHash), + /// A request for the [`TransactionVerificationData`] of a transaction in the tx pool. + TxVerificationData(TransactionHash), +} + +//---------------------------------------------------------------------------------------------------- TxpoolReadResponse +/// The transaction pool [`tower::Service`] read response type. +#[allow(clippy::large_enum_variant)] +pub enum TxpoolReadResponse { + /// A response containing the raw bytes of a transaction. + // TODO: use bytes::Bytes. + TxBlob(Vec), + /// A response of [`TransactionVerificationData`]. + TxVerificationData(TransactionVerificationData), +} + +//---------------------------------------------------------------------------------------------------- TxpoolWriteRequest +/// The transaction pool [`tower::Service`] write request type. +pub enum TxpoolWriteRequest { + /// Add a transaction to the pool. + /// + /// Returns [`TxpoolWriteResponse::AddTransaction`]. + AddTransaction { + /// The tx to add. + tx: Arc, + /// A [`bool`] denoting the routing state of this tx. + /// + /// [`true`] if this tx is in the stem state. + state_stem: bool, + }, + /// Remove a transaction with the given hash from the pool. + /// + /// Returns [`TxpoolWriteResponse::Ok`]. + RemoveTransaction(TransactionHash), +} + +//---------------------------------------------------------------------------------------------------- TxpoolWriteResponse +/// The transaction pool [`tower::Service`] write response type. +#[derive(Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum TxpoolWriteResponse { + /// A [`TxpoolWriteRequest::AddTransaction`] response. + /// + /// If the inner value is [`Some`] the tx was not added to the pool as it double spends a tx with the given hash. + AddTransaction(Option), + Ok, +} diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs new file mode 100644 index 00000000..56541641 --- /dev/null +++ b/storage/txpool/src/service/read.rs @@ -0,0 +1,103 @@ +use std::sync::Arc; + +use rayon::ThreadPool; + +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; + +use crate::{ + ops::get_transaction_verification_data, + service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse}, + types::{ReadResponseResult, TxpoolReadHandle}, + }, + tables::{OpenTables, TransactionBlobs}, + types::TransactionHash, +}; + +// TODO: update the docs here +//---------------------------------------------------------------------------------------------------- init_read_service +/// Initialize the [`TxpoolReadHandle`] thread-pool backed by `rayon`. +/// +/// This spawns `threads` amount of reader threads +/// attached to `env` and returns a handle to the pool. +/// +/// Should be called _once_ per actual database. +#[cold] +#[inline(never)] // Only called once. +pub(super) fn init_read_service(env: Arc, threads: ReaderThreads) -> TxpoolReadHandle { + init_read_service_with_pool(env, init_thread_pool(threads)) +} + +/// Initialize the [`TxpoolReadHandle`], with a specific rayon thread-pool instead of +/// creating a new one. +/// +/// Should be called _once_ per actual database. +#[cold] +#[inline(never)] // Only called once. +fn init_read_service_with_pool(env: Arc, pool: Arc) -> TxpoolReadHandle { + DatabaseReadService::new(env, pool, map_request) +} + +//---------------------------------------------------------------------------------------------------- Request Mapping +// This function maps [`Request`]s to function calls +// executed by the rayon DB reader threadpool. + +/// Map [`TxpoolReadRequest`]'s to specific database handler functions. +/// +/// This is the main entrance into all `Request` handler functions. +/// The basic structure is: +/// 1. `Request` is mapped to a handler function +/// 2. Handler function is called +/// 3. [`TxpoolReadResponse`] is returned +#[allow(clippy::needless_pass_by_value)] +fn map_request( + env: &ConcreteEnv, // Access to the database + request: TxpoolReadRequest, // The request we must fulfill +) -> ReadResponseResult { + match request { + TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash), + TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash), + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions +// These are the actual functions that do stuff according to the incoming [`TxpoolReadRequest`]. +// +// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to +// the enum variant name, e.g: `TxBlob` -> `tx_blob`. +// +// Each function will return the [`TxpoolReadResponse`] that we +// should send back to the caller in [`map_request()`]. +// +// INVARIANT: +// These functions are called above in `tower::Service::call()` +// using a custom threadpool which means any call to `par_*()` functions +// will be using the custom rayon DB reader thread-pool, not the global one. +// +// All functions below assume that this is the case, such that +// `par_*()` functions will not block the _global_ rayon thread-pool. + +/// [`TxpoolReadRequest::TxBlob`]. +#[inline] +fn tx_blob(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tx_blobs_table = inner_env.open_db_ro::(&tx_ro)?; + + tx_blobs_table + .get(tx_hash) + .map(|blob| TxpoolReadResponse::TxBlob(blob.0)) +} + +/// [`TxpoolReadRequest::TxVerificationData`]. +#[inline] +fn tx_verification_data(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tables = inner_env.open_tables(&tx_ro)?; + + get_transaction_verification_data(tx_hash, &tables).map(TxpoolReadResponse::TxVerificationData) +} diff --git a/storage/txpool/src/service/types.rs b/storage/txpool/src/service/types.rs new file mode 100644 index 00000000..5c6b97ce --- /dev/null +++ b/storage/txpool/src/service/types.rs @@ -0,0 +1,21 @@ +//! Database service type aliases. +//! +//! Only used internally for our [`tower::Service`] impls. + +use cuprate_database::RuntimeError; +use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; + +use crate::service::interface::{ + TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse, +}; + +/// The actual type of the response. +/// +/// Either our [`TxpoolReadResponse`], or a database error occurred. +pub(super) type ReadResponseResult = Result; + +/// The transaction pool database write service. +pub type TxpoolWriteHandle = DatabaseWriteHandle; + +/// The transaction pool database read service. +pub type TxpoolReadHandle = DatabaseReadService; diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs new file mode 100644 index 00000000..8a3b1bf7 --- /dev/null +++ b/storage/txpool/src/service/write.rs @@ -0,0 +1,103 @@ +use std::sync::Arc; + +use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database_service::DatabaseWriteHandle; +use cuprate_types::TransactionVerificationData; + +use crate::{ + ops::{self, TxPoolWriteError}, + service::{ + interface::{TxpoolWriteRequest, TxpoolWriteResponse}, + types::TxpoolWriteHandle, + }, + tables::OpenTables, + types::TransactionHash, +}; + +//---------------------------------------------------------------------------------------------------- init_write_service +/// Initialize the txpool write service from a [`ConcreteEnv`]. +pub(super) fn init_write_service(env: Arc) -> TxpoolWriteHandle { + DatabaseWriteHandle::init(env, handle_txpool_request) +} + +//---------------------------------------------------------------------------------------------------- handle_txpool_request +/// Handle an incoming [`TxpoolWriteRequest`], returning a [`TxpoolWriteResponse`]. +fn handle_txpool_request( + env: &ConcreteEnv, + req: &TxpoolWriteRequest, +) -> Result { + match req { + TxpoolWriteRequest::AddTransaction { tx, state_stem } => { + add_transaction(env, tx, *state_stem) + } + TxpoolWriteRequest::RemoveTransaction(tx_hash) => remove_transaction(env, tx_hash), + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions +// These are the actual functions that do stuff according to the incoming [`TxpoolWriteRequest`]. +// +// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to +// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`. +// +// Each function will return the [`Response`] that we +// should send back to the caller in [`map_request()`]. + +/// [`TxpoolWriteRequest::AddTransaction`] +fn add_transaction( + env: &ConcreteEnv, + tx: &TransactionVerificationData, + state_stem: bool, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + if let Err(e) = ops::add_transaction(tx, state_stem, &mut tables_mut) { + drop(tables_mut); + // error adding the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return match e { + TxPoolWriteError::DoubleSpend(tx_hash) => { + // If we couldn't add the tx due to a double spend still return ok, but include the tx + // this double spent. + // TODO: mark the double spent tx? + Ok(TxpoolWriteResponse::AddTransaction(Some(tx_hash))) + } + TxPoolWriteError::Database(e) => Err(e), + }; + }; + + drop(tables_mut); + // The tx was added to the pool successfully. + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::AddTransaction(None)) +} + +/// [`TxpoolWriteRequest::RemoveTransaction`] +fn remove_transaction( + env: &ConcreteEnv, + tx_hash: &TransactionHash, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + if let Err(e) = ops::remove_transaction(tx_hash, &mut tables_mut) { + drop(tables_mut); + // error removing the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return Err(e); + } + + drop(tables_mut); + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} diff --git a/storage/txpool/src/tables.rs b/storage/txpool/src/tables.rs new file mode 100644 index 00000000..dbb686ae --- /dev/null +++ b/storage/txpool/src/tables.rs @@ -0,0 +1,45 @@ +//! Tx-pool Database tables. +//! +//! # Table marker structs +//! This module contains all the table definitions used by [`cuprate_txpool`](crate). +//! +//! The zero-sized structs here represents the table type; +//! they all are essentially marker types that implement [`cuprate_database::Table`]. +//! +//! Table structs are `CamelCase`, and their static string +//! names used by the actual database backend are `snake_case`. +//! +//! For example: [`TransactionBlobs`] -> `transaction_blobs`. +//! +//! # Traits +//! This module also contains a set of traits for +//! accessing _all_ tables defined here at once. +use cuprate_database::{define_tables, StorableVec}; + +use crate::types::{KeyImage, RawCachedVerificationState, TransactionHash, TransactionInfo}; + +define_tables! { + /// Serialized transaction blobs. + /// + /// This table contains the transaction blobs of all the transactions in the pool. + 0 => TransactionBlobs, + TransactionHash => StorableVec, + + /// Transaction information. + /// + /// This table contains information of all transactions currently in the pool. + 1 => TransactionInfos, + TransactionHash => TransactionInfo, + + /// Cached transaction verification state. + /// + /// This table contains the cached verification state of all translations in the pool. + 2 => CachedVerificationState, + TransactionHash => RawCachedVerificationState, + + /// Spent key images. + /// + /// This table contains the spent key images from all transactions in the pool. + 3 => SpentKeyImages, + KeyImage => TransactionHash +} diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs new file mode 100644 index 00000000..09b0ce0d --- /dev/null +++ b/storage/txpool/src/types.rs @@ -0,0 +1,126 @@ +//! Tx-pool [table](crate::tables) types. +//! +//! This module contains all types used by the database tables, +//! and aliases for common types that use the same underlying +//! primitive type. +//! +//! +use bytemuck::{Pod, Zeroable}; + +use monero_serai::transaction::Timelock; + +use cuprate_types::{CachedVerificationState, HardFork}; + +/// An inputs key image. +pub type KeyImage = [u8; 32]; + +/// A transaction hash. +pub type TransactionHash = [u8; 32]; + +bitflags::bitflags! { + /// Flags representing the state of the transaction in the pool. + #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] + #[repr(transparent)] + pub struct TxStateFlags: u8 { + /// A flag for if the transaction is in the stem state. + const STATE_STEM = 0b0000_0001; + /// A flag for if we have seen another tx double spending this tx. + const DOUBLE_SPENT = 0b0000_0010; + } +} + +/// Information on a tx-pool transaction. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct TransactionInfo { + /// The transaction's fee. + pub fee: u64, + /// The transaction's weight. + pub weight: usize, + /// [`TxStateFlags`] of this transaction. + pub flags: TxStateFlags, + #[allow(clippy::pub_underscore_fields)] + /// Explicit padding so that we have no implicit padding bytes in `repr(C)`. + /// + /// Allows potential future expansion of this type. + pub _padding: [u8; 7], +} + +/// [`CachedVerificationState`] in a format that can be stored into the database. +/// +/// This type impls [`Into`] & [`From`] [`CachedVerificationState`]. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct RawCachedVerificationState { + /// The raw hash, will be all `0`s if there is no block hash that this is valid for. + raw_valid_at_hash: [u8; 32], + /// The raw hard-fork, will be `0` if there is no hf this was validated at. + raw_hf: u8, + /// The raw [`u64`] timestamp as little endian bytes ([`u64::to_le_bytes`]). + /// + /// This will be `0` if there is no timestamp that needs to be passed for this to + /// be valid. + /// + /// Not a [`u64`] as if it was this type would have an alignment requirement. + raw_valid_past_timestamp: [u8; 8], +} + +impl From for CachedVerificationState { + fn from(value: RawCachedVerificationState) -> Self { + // if the hash is all `0`s then there is no hash this is valid at. + if value.raw_valid_at_hash == [0; 32] { + return Self::NotVerified; + } + + let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp); + + // if the timestamp is 0, there is no timestamp that needs to be passed. + if raw_valid_past_timestamp == 0 { + return Self::ValidAtHashAndHF { + block_hash: value.raw_valid_at_hash, + hf: HardFork::from_version(value.raw_hf) + .expect("hard-fork values stored in the DB should always be valid"), + }; + } + + Self::ValidAtHashAndHFWithTimeBasedLock { + block_hash: value.raw_valid_at_hash, + hf: HardFork::from_version(value.raw_hf) + .expect("hard-fork values stored in the DB should always be valid"), + time_lock: Timelock::Time(raw_valid_past_timestamp), + } + } +} + +#[allow(clippy::fallible_impl_from)] // only panics in invalid states +impl From for RawCachedVerificationState { + fn from(value: CachedVerificationState) -> Self { + match value { + CachedVerificationState::NotVerified => Self { + raw_valid_at_hash: [0; 32], + raw_hf: 0, + raw_valid_past_timestamp: [0; 8], + }, + CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => Self { + raw_valid_at_hash: block_hash, + raw_hf: hf.as_u8(), + raw_valid_past_timestamp: [0; 8], + }, + CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + block_hash, + hf, + time_lock, + } => { + let Timelock::Time(time) = time_lock else { + panic!("ValidAtHashAndHFWithTimeBasedLock timelock was not time-based"); + }; + + Self { + raw_valid_at_hash: block_hash, + raw_hf: hf.as_u8(), + raw_valid_past_timestamp: time.to_le_bytes(), + } + } + } + } +} diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index 696c6865..b9d42fb8 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -15,14 +15,14 @@ //! let tx: Transaction = Transaction::read(&mut TX_E57440).unwrap(); //! ``` //! -//! ## Functions -//! The free functions provide access to typed data found in `cuprate_types`: +//! ## Statics +//! The statics provide access to typed data found in `cuprate_types`: //! ```rust //! # use cuprate_test_utils::data::*; //! use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; //! -//! let block: VerifiedBlockInformation = block_v16_tx0().clone(); -//! let tx: VerifiedTransactionInformation = tx_v1_sig0().clone(); +//! let block: VerifiedBlockInformation = BLOCK_V16_TX0.clone(); +//! let tx: VerifiedTransactionInformation = TX_V1_SIG0.clone(); //! ``` mod constants; @@ -31,7 +31,7 @@ pub use constants::{ TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, }; -mod free; -pub use free::{ - block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_fee, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3, +mod statics; +pub use statics::{ + tx_fee, BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3, }; diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/statics.rs similarity index 82% rename from test-utils/src/data/free.rs rename to test-utils/src/data/statics.rs index d7f61ae5..8b98171a 100644 --- a/test-utils/src/data/free.rs +++ b/test-utils/src/data/statics.rs @@ -1,4 +1,4 @@ -//! Free functions to access data. +//! `static LazyLock`s to access data. #![allow( const_item_mutation, // `R: Read` needs `&mut self` @@ -6,7 +6,7 @@ )] //---------------------------------------------------------------------------------------------------- Import -use std::sync::OnceLock; +use std::sync::LazyLock; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; @@ -141,8 +141,7 @@ pub fn tx_fee(tx: &Transaction) -> u64 { } //---------------------------------------------------------------------------------------------------- Blocks -/// Generate a block accessor function with this signature: -/// `fn() -> &'static VerifiedBlockInformation` +/// Generate a `static LazyLock`. /// /// This will use `VerifiedBlockMap` type above to do various /// checks on the input data and makes sure it seems correct. @@ -153,9 +152,9 @@ pub fn tx_fee(tx: &Transaction) -> u64 { /// - Monero RPC (see cuprate_test_utils::rpc for this) /// /// See below for actual usage. -macro_rules! verified_block_information_fn { +macro_rules! verified_block_information { ( - fn_name: $fn_name:ident, // Name of the function created + name: $name:ident, // Name of the `LazyLock` created block_blob: $block_blob:ident, // Block blob ([u8], found in `constants.rs`) tx_blobs: [$($tx_blob:ident),*], // Array of contained transaction blobs pow_hash: $pow_hash:literal, // PoW hash as a string literal @@ -183,7 +182,7 @@ macro_rules! verified_block_information_fn { #[doc = "# use hex_literal::hex;"] #[doc = "use cuprate_helper::map::combine_low_high_bits_to_u128;"] #[doc = ""] - #[doc = concat!("let block = ", stringify!($fn_name), "();")] + #[doc = concat!("let block = &*", stringify!($name), ";")] #[doc = concat!("assert_eq!(&block.block.serialize(), ", stringify!($block_blob), ");")] #[doc = concat!("assert_eq!(block.pow_hash, hex!(\"", $pow_hash, "\"));")] #[doc = concat!("assert_eq!(block.height, ", $height, ");")] @@ -201,28 +200,25 @@ macro_rules! verified_block_information_fn { "));" )] /// ``` - pub fn $fn_name() -> &'static VerifiedBlockInformation { - static BLOCK: OnceLock = OnceLock::new(); - BLOCK.get_or_init(|| { - VerifiedBlockMap { - block_blob: $block_blob, - pow_hash: hex!($pow_hash), - height: $height, - generated_coins: $generated_coins, - weight: $weight, - long_term_weight: $long_term_weight, - cumulative_difficulty_low: $cumulative_difficulty_low, - cumulative_difficulty_high: $cumulative_difficulty_high, - txs: &[$($tx_blob),*], - } - .into_verified() - }) - } + pub static $name: LazyLock = LazyLock::new(|| { + VerifiedBlockMap { + block_blob: $block_blob, + pow_hash: hex!($pow_hash), + height: $height, + generated_coins: $generated_coins, + weight: $weight, + long_term_weight: $long_term_weight, + cumulative_difficulty_low: $cumulative_difficulty_low, + cumulative_difficulty_high: $cumulative_difficulty_high, + txs: &[$($tx_blob),*], + } + .into_verified() + }); }; } -verified_block_information_fn! { - fn_name: block_v1_tx2, +verified_block_information! { + name: BLOCK_V1_TX2, block_blob: BLOCK_5ECB7E, tx_blobs: [TX_2180A8, TX_D7FEBD], pow_hash: "c960d540000459480560b7816de968c7470083e5874e10040bdd4cc501000000", @@ -235,8 +231,8 @@ verified_block_information_fn! { tx_len: 2, } -verified_block_information_fn! { - fn_name: block_v9_tx3, +verified_block_information! { + name: BLOCK_V9_TX3, block_blob: BLOCK_F91043, tx_blobs: [TX_E2D393, TX_E57440, TX_B6B439], pow_hash: "7c78b5b67a112a66ea69ea51477492057dba9cfeaa2942ee7372c61800000000", @@ -249,8 +245,8 @@ verified_block_information_fn! { tx_len: 3, } -verified_block_information_fn! { - fn_name: block_v16_tx0, +verified_block_information! { + name: BLOCK_V16_TX0, block_blob: BLOCK_43BD1F, tx_blobs: [], pow_hash: "10b473b5d097d6bfa0656616951840724dfe38c6fb9c4adf8158800300000000", @@ -264,13 +260,12 @@ verified_block_information_fn! { } //---------------------------------------------------------------------------------------------------- Transactions -/// Generate a transaction accessor function with this signature: -/// `fn() -> &'static VerifiedTransactionInformation` +/// Generate a `const LazyLock`. /// -/// Same as [`verified_block_information_fn`] but for transactions. -macro_rules! transaction_verification_data_fn { +/// Same as [`verified_block_information`] but for transactions. +macro_rules! transaction_verification_data { ( - fn_name: $fn_name:ident, // Name of the function created + name: $name:ident, // Name of the `LazyLock` created tx_blobs: $tx_blob:ident, // Transaction blob ([u8], found in `constants.rs`) weight: $weight:literal, // Transaction weight hash: $hash:literal, // Transaction hash as a string literal @@ -280,35 +275,34 @@ macro_rules! transaction_verification_data_fn { /// ```rust #[doc = "# use cuprate_test_utils::data::*;"] #[doc = "# use hex_literal::hex;"] - #[doc = concat!("let tx = ", stringify!($fn_name), "();")] + #[doc = concat!("let tx = &*", stringify!($name), ";")] #[doc = concat!("assert_eq!(&tx.tx.serialize(), ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")] #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")] /// ``` - pub fn $fn_name() -> &'static VerifiedTransactionInformation { - static TX: OnceLock = OnceLock::new(); - TX.get_or_init(|| to_tx_verification_data($tx_blob)) - } + pub static $name: LazyLock = LazyLock::new(|| { + to_tx_verification_data($tx_blob) + }); }; } -transaction_verification_data_fn! { - fn_name: tx_v1_sig0, +transaction_verification_data! { + name: TX_V1_SIG0, tx_blobs: TX_3BC7FF, weight: 248, hash: "3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1", } -transaction_verification_data_fn! { - fn_name: tx_v1_sig2, +transaction_verification_data! { + name: TX_V1_SIG2, tx_blobs: TX_9E3F73, weight: 448, hash: "9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34", } -transaction_verification_data_fn! { - fn_name: tx_v2_rct3, +transaction_verification_data! { + name: TX_V2_RCT3, tx_blobs: TX_84D48D, weight: 2743, hash: "84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66", @@ -328,7 +322,7 @@ mod tests { #[tokio::test] async fn block_same_as_rpc() { let rpc = HttpRpcClient::new(None).await; - for block in [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] { + for block in [&*BLOCK_V1_TX2, &*BLOCK_V9_TX3, &*BLOCK_V16_TX0] { println!("block_height: {}", block.height); let block_rpc = rpc.get_verified_block_information(block.height).await; assert_eq!(block, &block_rpc); @@ -342,16 +336,12 @@ mod tests { async fn tx_same_as_rpc() { let rpc = HttpRpcClient::new(None).await; - let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] + let mut txs = [&*BLOCK_V1_TX2, &*BLOCK_V9_TX3, &*BLOCK_V16_TX0] .into_iter() .flat_map(|block| block.txs.iter().cloned()) .collect::>(); - txs.extend([ - tx_v1_sig0().clone(), - tx_v1_sig2().clone(), - tx_v2_rct3().clone(), - ]); + txs.extend([TX_V1_SIG0.clone(), TX_V1_SIG2.clone(), TX_V2_RCT3.clone()]); for tx in txs { println!("tx_hash: {:?}", tx.tx_hash); diff --git a/types/Cargo.toml b/types/Cargo.toml index 4c31cfc0..4b9204b9 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -23,10 +23,12 @@ bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } -borsh = { workspace = true, optional = true } thiserror = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -[dev-dependencies] \ No newline at end of file +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/types/src/lib.rs b/types/src/lib.rs index d70f4c31..0b0dbe67 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,76 +1,6 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - single_use_lifetimes, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_doc_comments, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] +// `proptest` needs this internally. +#![cfg_attr(any(feature = "proptest"), allow(non_local_definitions))] // Allow some lints when running in debug mode. #![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] @@ -97,4 +27,5 @@ pub use types::{ //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "blockchain")] pub mod blockchain; + //---------------------------------------------------------------------------------------------------- Private diff --git a/types/src/transaction_verification_data.rs b/types/src/transaction_verification_data.rs index 68e17b81..3dfe5fdf 100644 --- a/types/src/transaction_verification_data.rs +++ b/types/src/transaction_verification_data.rs @@ -4,7 +4,7 @@ use std::sync::Mutex; use monero_serai::transaction::{Timelock, Transaction}; -use crate::HardFork; +use crate::{HardFork, VerifiedTransactionInformation}; /// An enum representing all valid Monero transaction versions. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] @@ -92,3 +92,23 @@ pub struct TransactionVerificationData { /// The verification state of this transaction. pub cached_verification_state: Mutex, } + +#[derive(Debug, Copy, Clone, thiserror::Error)] +#[error("Error converting a verified tx to a cached verification data tx.")] +pub struct TxConversionError; + +impl TryFrom for TransactionVerificationData { + type Error = TxConversionError; + + fn try_from(value: VerifiedTransactionInformation) -> Result { + Ok(Self { + version: TxVersion::from_raw(value.tx.version()).ok_or(TxConversionError)?, + tx: value.tx, + tx_blob: value.tx_blob, + tx_weight: value.tx_weight, + fee: value.fee, + tx_hash: value.tx_hash, + cached_verification_state: Mutex::new(CachedVerificationState::NotVerified), + }) + } +}