From 690c0660648147b04956f60d723ffb1843793c72 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 001/103] State resolution outline for /send --- Cargo.lock | 152 +++++++------------ Cargo.toml | 8 +- src/main.rs | 2 +- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 347 +++++++++++++++++++++++++++++++++++++++---- 6 files changed, 384 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5be6aa..4734f80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,21 +215,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" [[package]] name = "constant_time_eq" @@ -631,9 +621,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -660,9 +650,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -758,9 +748,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -1035,9 +1025,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1109,12 +1099,12 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1129,18 +1119,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.13.0+1.1.1i" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "045e4dc48af57aad93d665885789b43222ae26f4886494da12d1ed58d309dcb6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" dependencies = [ "autocfg", "cc", @@ -1163,9 +1153,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1177,9 +1167,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" @@ -1276,9 +1266,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1343,9 +1333,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1457,9 +1447,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", "bytes", @@ -1486,7 +1476,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1586,7 +1575,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "js_int", @@ -1604,7 +1593,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "http", "percent-encoding", @@ -1619,7 +1608,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1630,7 +1619,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "ruma-api", "ruma-common", @@ -1644,7 +1633,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "http", @@ -1663,7 +1652,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "maplit", @@ -1676,7 +1665,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-common", @@ -1690,7 +1679,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1701,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-api", @@ -1716,7 +1705,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "paste", "rand", @@ -1730,7 +1719,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro2", "quote", @@ -1741,7 +1730,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "serde", ] @@ -1749,7 +1738,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "form_urlencoded", "itoa", @@ -1762,7 +1751,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1773,7 +1762,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "base64 0.12.3", "ring", @@ -1839,12 +1828,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -1962,9 +1945,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -1999,13 +1982,12 @@ checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2033,7 +2015,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" +source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" dependencies = [ "itertools", "maplit", @@ -2116,9 +2098,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" dependencies = [ "proc-macro2", "quote", @@ -2223,9 +2205,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2293,9 +2275,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2472,9 +2454,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2566,30 +2548,6 @@ version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "web-sys" version = "0.3.46" diff --git a/Cargo.toml b/Cargo.toml index 4b87199..0ed8fb0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } diff --git a/src/main.rs b/src/main.rs index 9c0eab6..fe7ab0d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -178,6 +178,6 @@ async fn main() { } #[catch(404)] -fn not_found_catcher(_req: &'_ Request<'_>) -> String { +fn not_found_catcher(_: &Request<'_>) -> String { "404 Not Found".to_owned() } diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..f6ec415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -17,7 +17,7 @@ use std::{ time::UNIX_EPOCH, }; -#[derive(Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0fdca74..ce0cc74 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, + api::{AuthScheme, IncomingRequest, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -29,7 +29,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -40,10 +40,7 @@ pub struct Ruma { #[cfg(feature = "conduit_bin")] impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma where - ::Incoming: TryFrom>> + std::fmt::Debug, - <::Incoming as std::convert::TryFrom< - http::request::Request>, - >>::Error: std::fmt::Debug, + T::Incoming: IncomingRequest, { type Error = (); // TODO: Better error handling type Owned = Data; @@ -149,8 +146,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); debug!("{:?}", http_request); - - match ::Incoming::try_from(http_request) { + match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { body: t, sender_user, @@ -170,7 +166,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index 7ff9e3f..d68e9fa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -11,17 +11,18 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::get_missing_events, + event::{get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; +use state_res::StateMap; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, @@ -476,6 +477,34 @@ pub async fn get_public_rooms_route( .into()) } +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum PrevEvents { + Sequential(T), + Fork(Vec), +} + +impl IntoIterator for PrevEvents { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + match self { + Self::Sequential(item) => vec![item].into_iter(), + Self::Fork(list) => list.into_iter(), + } + } +} + +impl PrevEvents { + pub fn new(id: &[T]) -> Self { + match id { + [] => panic!("All events must have previous event"), + [single_id] => Self::Sequential(single_id.clone()), + rest => Self::Fork(rest.to_vec()), + } + } +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -532,53 +561,313 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this - // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. - - // 3. Passes hash checks, otherwise it is redacted before being processed further. - // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let keys = db.globals.keypair(); + let mut pub_key_set = BTreeMap::new(); + pub_key_set.insert( + "ed25519:1".to_string(), + String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), + ); + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert("domain".to_string(), pub_key_set); + + let value = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + } + } else { + value + } + } + Err(_e) => { + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + }; + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) .expect("all ruma pdus are conduit pdus"); - let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(room_id)? { + if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + // TODO: remove the need to convert to state_res + let event = pdu.convert_for_state_res(); + let previous = pdu + .prev_events + .first() + .map(|id| { + db.rooms + .get_pdu(id) + .expect("todo") + .map(|ev| ev.convert_for_state_res()) + }) + .flatten(); - db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - value, - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + // 4. + let auth_events = db.rooms.get_auth_events( + &pdu.room_id, + &pdu.kind, + &pdu.sender, + pdu.state_key.as_deref(), + pdu.content.clone(), )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + auth_events + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with auth events".into()), + ); + continue; } - resolved_map.insert(event_id, Ok::<(), String>(())); + let mut previous_states = vec![]; + for id in &pdu.prev_events { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + previous_states.push(state); + } else { + // fetch the state + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + let state_at_event = if previous_states.is_empty() { + // State is empty + Default::default() + } else if previous_states.len() == 1 { + previous_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &previous_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + state_at_event + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Event failed auth with state_at + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with state at the event".into()), + ); + continue; + } + + // The event could still be soft failed + append_state_soft(&db, &pdu)?; + + // Gather the forward extremities and resolve + let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; + let mut fork_states = vec![]; + for id in &forward_extrems { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + fork_states.push(state); + } else { + // This is probably an error?? + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 6. + let state_at_forks = if fork_states.is_empty() { + // State is empty + Default::default() + } else if fork_states.len() == 1 { + fork_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous, + state_at_forks + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail + resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + } else { + append_state(&db, &pdu)?; + // Event has passed all auth/stateres checks + resolved_map.insert(event.event_id(), Ok(())); + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 9e83d2b2d570e1d0addc5a2cd64f34bf262a9fb1 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 31 Dec 2020 08:40:49 -0500 Subject: [PATCH 002/103] Update state-res, use the new Event trait This also bumps ruma to latest and removes js_int infavor of the ruma re-export --- Cargo.lock | 141 +++++++++++++++++++------------- Cargo.toml | 8 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 22 +++-- src/client_server/message.rs | 7 +- src/database/rooms.rs | 68 +++++++++------ src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 116 +++++++++++++------------- src/server_server.rs | 64 ++++++++------- 11 files changed, 252 insertions(+), 184 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4734f80..609226a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,10 +196,9 @@ dependencies = [ "directories", "http", "image", - "js_int", "log", - "rand", "regex", + "rand 0.7.3", "reqwest", "ring", "rocket", @@ -571,6 +570,17 @@ dependencies = [ "wasi", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi", +] + [[package]] name = "gif" version = "0.11.1" @@ -833,9 +843,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.9" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1346,11 +1356,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.0", + "rand_hc 0.3.0", ] [[package]] @@ -1360,7 +1382,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.0", ] [[package]] @@ -1369,7 +1401,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +dependencies = [ + "getrandom 0.2.0", ] [[package]] @@ -1378,7 +1419,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.0", ] [[package]] @@ -1393,7 +1443,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1521,7 +1571,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1575,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "js_int", @@ -1593,7 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "http", "percent-encoding", @@ -1608,7 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1619,7 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "ruma-api", "ruma-common", @@ -1633,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "http", @@ -1652,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "maplit", @@ -1665,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-common", @@ -1679,7 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1690,7 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-api", @@ -1705,21 +1755,21 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand", + "rand 0.8.0", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", + "ruma-serde-macros", "serde", - "strum", ] [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro2", "quote", @@ -1730,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "serde", ] @@ -1738,7 +1788,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "form_urlencoded", "itoa", @@ -1750,8 +1800,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1762,9 +1812,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -2015,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" +source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" dependencies = [ "itertools", "maplit", @@ -2075,27 +2125,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "strum" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syn" version = "1.0.55" @@ -2115,7 +2144,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2344,7 +2373,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 0ed8fb0..44df254 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# TODO: remove the gen-eventid feature +state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -37,8 +38,7 @@ log = "0.4.11" http = "0.2.1" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" + # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for appservice registration files diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..2bff20c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -124,7 +124,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..f792062 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -131,7 +131,7 @@ pub async fn get_content_thumbnail_route( allow_remote: false, height: body.height, width: body.width, - method: body.method, + method: body.method.clone(), server_name: &body.server_name, media_id: &body.media_id, }, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..eb44085 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::StateEvent; +use state_res::Event; use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, @@ -594,19 +594,19 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) + PduEvent::from_id_val(&event_id, value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; + .collect::>>>()?; let control_events = event_map .values() - .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .filter(|pdu| state_res::is_power_event(pdu)) + .map(|pdu| pdu.event_id.clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -646,7 +646,8 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -685,8 +686,13 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( +<<<<<<< HEAD &PduEvent::from(&**pdu), utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +======= + &pdu, + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +>>>>>>> 6232d1f (Update state-res, use the new Event trait) count, pdu_id.clone().into(), &db.globals, @@ -695,7 +701,9 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert((pdu.kind(), pdu.state_key()), pdu_id); + if let Some(key) = &pdu.state_key { + state.insert((pdu.kind(), key.to_string()), pdu_id); + } } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..c56cc94 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,10 @@ use ruma::{ events::EventContent, EventId, }; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -46,7 +49,7 @@ pub async fn send_message_event_route( return Ok(send_message_event::Response { event_id }.into()); } - let mut unsigned = serde_json::Map::new(); + let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.build_and_append_pdu( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4081944..87829a3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, @@ -67,12 +67,8 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> state_res::Result> { +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { let pid = self .get_pdu_id(event_id) .map_err(StateError::custom)? @@ -91,7 +87,7 @@ impl StateStore for Rooms { .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) .map_err(Into::into) - .and_then(|pdu: StateEvent| { + .and_then(|pdu: PduEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == room_id { @@ -112,7 +108,7 @@ impl Rooms { &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -141,7 +137,7 @@ impl Rooms { pdu, )) }) - .collect::>>() + .collect() } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -181,7 +177,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -205,7 +201,7 @@ impl Rooms { content: serde_json::Value, ) -> Result> { let auth_events = state_res::auth_types_for_event( - kind.clone(), + kind, sender, state_key.map(|s| s.to_string()), content, @@ -213,7 +209,13 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get( + room_id, + &event_type, + &state_key + .as_deref() + .expect("found a non state event in auth events"), + )? { events.insert((event_type, state_key), pdu); } } @@ -290,7 +292,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) } else { @@ -795,23 +800,40 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()); + .map(Arc::new); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, + &Arc::new(PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key: Some(state_key.clone()), + prev_events, + depth: (prev_events.len() as u32).into(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), + redacts, + unsigned: unsigned + .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }), prev_event, None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) + Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) .collect::>>()?, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..2b1b03d 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 0421ae2..05fd6d6 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index f6ec415..c764700 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -33,8 +32,8 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] - pub unsigned: serde_json::Map, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub unsigned: BTreeMap, pub hashes: EventHash, pub signatures: BTreeMap, BTreeMap>, } @@ -227,61 +226,66 @@ impl PduEvent { ) .expect("Raw::from_value always works") } -} -impl From<&state_res::StateEvent> for PduEvent { - fn from(pdu: &state_res::StateEvent) -> Self { - Self { - event_id: pdu.event_id(), - room_id: pdu.room_id().clone(), - sender: pdu.sender().clone(), - origin_server_ts: (pdu - .origin_server_ts() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .try_into() - .expect("time is valid"), - kind: pdu.kind(), - content: pdu.content().clone(), - state_key: Some(pdu.state_key()), - prev_events: pdu.prev_event_ids(), - depth: *pdu.depth(), - auth_events: pdu.auth_events(), - redacts: pdu.redacts().cloned(), - unsigned: pdu.unsigned().clone().into_iter().collect(), - hashes: pdu.hashes().clone(), - signatures: pdu.signatures(), - } + pub fn from_id_val( + event_id: &EventId, + json: CanonicalJsonObject, + ) -> Result { + json.insert( + "event_id".to_string(), + ruma::serde::to_canonical_value(event_id).expect("event_id is a valid Value"), + ); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) } } -impl PduEvent { - pub fn convert_for_state_res(&self) -> Arc { - Arc::new( - // For consistency of eventId (just in case) we use the one - // generated by conduit for everything. - state_res::StateEvent::from_id_value( - self.event_id.clone(), - json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }), - ) - .expect("all conduit PDUs are state events"), - ) +impl state_res::Event for PduEvent { + fn event_id(&self) -> &EventId { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + &self.room_id + } + + fn sender(&self) -> &UserId { + &self.sender + } + fn kind(&self) -> EventType { + self.kind.clone() + } + + fn content(&self) -> serde_json::Value { + self.content.clone() + } + fn origin_server_ts(&self) -> std::time::SystemTime { + UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + } + + fn state_key(&self) -> Option { + self.state_key.clone() + } + fn prev_events(&self) -> Vec { + self.prev_events.to_vec() + } + fn depth(&self) -> &UInt { + &self.depth + } + fn auth_events(&self) -> Vec { + self.auth_events.to_vec() + } + fn redacts(&self) -> Option<&EventId> { + self.redacts.as_ref() + } + fn hashes(&self) -> &EventHash { + &self.hashes + } + fn signatures(&self) -> BTreeMap, BTreeMap> { + self.signatures.clone() + } + fn unsigned(&self) -> &BTreeMap { + &self.unsigned } } @@ -315,7 +319,7 @@ pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, - pub unsigned: Option>, + pub unsigned: Option>, pub state_key: Option, pub redacts: Option, } diff --git a/src/server_server.rs b/src/server_server.rs index d68e9fa..58d85b1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,12 +20,13 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::StateMap; +use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, + sync::Arc, time::{Duration, SystemTime}, }; @@ -610,17 +611,12 @@ pub async fn send_transaction_message_route<'a>( continue; } - // TODO: remove the need to convert to state_res - let event = pdu.convert_for_state_res(); + let event = Arc::new(pdu.clone()); + let previous = pdu .prev_events .first() - .map(|id| { - db.rooms - .get_pdu(id) - .expect("todo") - .map(|ev| ev.convert_for_state_res()) - }) + .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); // 4. @@ -637,27 +633,32 @@ pub async fn send_transaction_message_route<'a>( previous.clone(), auth_events .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) + .map(|(k, v)| (k, Arc::new(v))) .collect(), None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with auth events".into()), ); continue; } - let mut previous_states = vec![]; + let mut previous_states: Vec>> = vec![]; for id in &pdu.prev_events { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .collect(); previous_states.push(state); } else { // fetch the state @@ -693,7 +694,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -702,7 +703,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -712,17 +713,14 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_event, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Event failed auth with state_at resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with state at the event".into()), ); continue; @@ -733,14 +731,20 @@ pub async fn send_transaction_message_route<'a>( // Gather the forward extremities and resolve let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states = vec![]; + let mut fork_states: Vec>> = vec![]; for id in &forward_extrems { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + fork_states.push(state); } else { // This is probably an error?? @@ -776,7 +780,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -785,7 +789,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -795,20 +799,20 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Soft fail - resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + resolved_map.insert( + event.event_id().clone(), + Err("Event has been soft failed".into()), + ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id(), Ok(())); + resolved_map.insert(event.event_id().clone(), Ok(())); } } From 0ee239c9d78a4b02fa58d018db28cecdd8f9bd78 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 3 Jan 2021 17:26:17 -0500 Subject: [PATCH 003/103] Step 5 in /send just fetches state from incoming server --- Cargo.lock | 78 ++++++------- src/database/rooms.rs | 29 +---- src/pdu.rs | 2 +- src/server_server.rs | 264 +++++++++++++++++++++++------------------- 4 files changed, 186 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 609226a..033c15b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -44,9 +44,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -354,9 +354,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -561,11 +561,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi", ] @@ -819,9 +819,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" @@ -1017,9 +1017,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1109,9 +1109,9 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.31" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -1138,9 +1138,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.59" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1356,7 +1356,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1401,7 +1401,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1443,25 +1443,25 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -1954,9 +1954,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -2026,9 +2026,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" [[package]] name = "socket2" @@ -2049,9 +2049,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" +source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" dependencies = [ "itertools", "maplit", @@ -2127,9 +2127,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.55" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" +checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" dependencies = [ "proc-macro2", "quote", @@ -2152,18 +2152,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -2673,9 +2673,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 87829a3..48e7c14 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -802,32 +802,9 @@ impl Rooms { ))?)? .map(Arc::new); event_auth::valid_membership_change( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - &Arc::new(PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key: Some(state_key.clone()), - prev_events, - depth: (prev_events.len() as u32).into(), - auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) - .collect(), - redacts, - unsigned: unsigned - .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), - }), + Some(state_key.as_str()), + &sender, + content.clone(), prev_event, None, // TODO: third party invite &auth_events diff --git a/src/pdu.rs b/src/pdu.rs index c764700..2997317 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -229,7 +229,7 @@ impl PduEvent { pub fn from_id_val( event_id: &EventId, - json: CanonicalJsonObject, + mut json: CanonicalJsonObject, ) -> Result { json.insert( "event_id".to_string(), diff --git a/src/server_server.rs b/src/server_server.rs index 58d85b1..3c4308c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,13 +11,15 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::Raw, + signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; @@ -578,32 +580,13 @@ pub async fn send_transaction_message_route<'a>( let mut pub_key_map = BTreeMap::new(); pub_key_map.insert("domain".to_string(), pub_key_set); - let value = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - } - } else { - value - } - } - Err(_e) => { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - }; - - let pdu = serde_json::from_value::( - serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("all ruma pdus are conduit pdus"); + let pdu = match signature_and_hash_check(&pub_key_map, value) { + Ok(pdu) => pdu, + Err(e) => { + resolved_map.insert(event_id, Err(e)); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -619,7 +602,10 @@ pub async fn send_transaction_message_route<'a>( .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); - // 4. + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // the auth events that would be correct for this pdu. Put another way we should use the auth events + // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( &pdu.room_id, &pdu.kind, @@ -627,6 +613,12 @@ pub async fn send_transaction_message_route<'a>( pdu.state_key.as_deref(), pdu.content.clone(), )?; + + let mut event_map: state_res::EventMap> = auth_events + .iter() + .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .collect(); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, @@ -635,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), - None, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -646,66 +638,38 @@ pub async fn send_transaction_message_route<'a>( continue; } - let mut previous_states: Vec>> = vec![]; - for id in &pdu.prev_events { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? + let server_name = body.body.origin.clone(); + let (state_at_event, incoming_auth_events): (StateMap>, _) = match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) + .await? .into_iter() - .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); - previous_states.push(state); - } else { - // fetch the state - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } - } - } - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - let state_at_event = if previous_states.is_empty() { - // State is empty - Default::default() - } else if previous_states.len() == 1 { - previous_states[0].clone() - } else { - match state_res::StateResolution::resolve( - &pdu.room_id, - &RoomVersionId::Version6, - &previous_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - None, - &db.rooms, - ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), - Err(e) => panic!("{:?}", e), + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; } }; @@ -713,8 +677,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event, - None, + state_at_event.clone(), // TODO: could this be &state avoid .clone + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -747,22 +711,7 @@ pub async fn send_transaction_message_route<'a>( fork_states.push(state); } else { - // This is probably an error?? - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } + todo!("we don't know of a pdu that is part of our known forks OOPS") } } @@ -773,6 +722,18 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // Add as much as we can to the `event_map` (less DB hits) + event_map.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + ); + event_map.extend( + state_at_event + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, @@ -784,7 +745,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - None, + &mut event_map, &db.rooms, ) { Ok(res) => res @@ -819,8 +780,74 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn signature_and_hash_check( + pub_key_map: &ruma::signatures::PublicKeyMap, + value: CanonicalJsonObject, +) -> std::result::Result { + let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".into()), + } + } else { + value + } + } + Err(_e) => return Err("Signature verification failed".into()), + }; + + serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// TODO: this needs to add events to the DB in a way that does not +/// effect the state of the room +async fn fetch_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + events: &[EventId], +) -> Result> { + let mut pdus = vec![]; + for id in events { + match db.rooms.get_pdu(id)? { + Some(pdu) => pdus.push(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: id }, + ) + .await + { + Ok(res) => { + let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(pdu) => { + // TODO: add to our DB somehow? + pdus.push(pdu); + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + } + } + Ok(pdus) +} + fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() + db.rooms.get_pdu_leaves(room_id) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -854,20 +881,15 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + // db.rooms.append_pdu( + // pdu, + // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + // count, + // pdu_id.clone().into(), + // &db.globals, + // &db.account_data, + // &db.admin, + // )?; Ok(()) } From 7c4d74bf9ba04c70be602f9a8d34259024a34e6c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 5 Jan 2021 09:21:41 -0500 Subject: [PATCH 004/103] Fix clippy warnings remove unused imports --- Cargo.lock | 2 +- src/appservice_server.rs | 2 +- src/client_server/membership.rs | 1 - src/database/globals.rs | 4 ++- src/database/sending.rs | 12 ++++----- src/pdu.rs | 7 +---- src/server_server.rs | 45 ++++++++++++++++++++------------- 7 files changed, 39 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 033c15b..7ef5efb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" +source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" dependencies = [ "itertools", "maplit", diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index eb44085..4e093c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -675,7 +675,6 @@ async fn join_room_by_id_helper( .iter() .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) .expect("Found event_id in sorted events that is not in resolved state"); diff --git a/src/database/globals.rs b/src/database/globals.rs index 3e24d82..c8e3b23 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,13 +9,15 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +pub type DestinationCache = Arc, (String, Option)>>>; + #[derive(Clone)] pub struct Globals { pub(super) globals: sled::Tree, config: Config, keypair: Arc, reqwest_client: reqwest::Client, - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: DestinationCache, // actual_destination, host dns_resolver: TokioAsyncResolver, } diff --git a/src/database/sending.rs b/src/database/sending.rs index 74aad32..709fa53 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -79,7 +79,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -125,7 +125,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -156,7 +156,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -185,7 +185,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -219,7 +219,7 @@ impl Sending { } pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -329,7 +329,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/pdu.rs b/src/pdu.rs index 2997317..86fbc9f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,12 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::UNIX_EPOCH, -}; +use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 3c4308c..3de3636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,14 +11,13 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::Raw, signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -220,7 +219,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &ServerName, ) -> (String, Option) { let mut host = None; @@ -594,13 +593,14 @@ pub async fn send_transaction_message_route<'a>( continue; } + let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - - let previous = pdu - .prev_events - .first() - .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) - .flatten(); + // Fetch any unknown events or retrieve them from the DB + let previous = + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { + mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not @@ -616,14 +616,14 @@ pub async fn send_transaction_message_route<'a>( let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - auth_events + &auth_events .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), @@ -638,7 +638,6 @@ pub async fn send_transaction_message_route<'a>( continue; } - let server_name = body.body.origin.clone(); let (state_at_event, incoming_auth_events): (StateMap>, _) = match db .sending .send_federation_request( @@ -652,8 +651,18 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) - .await? + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); @@ -677,8 +686,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event.clone(), // TODO: could this be &state avoid .clone - None, // TODO: third party invite + &state_at_event, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -760,7 +769,7 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks, + &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? From 8a035880f097d885baed6e9ee179ccbe3db16881 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 08:52:30 -0500 Subject: [PATCH 005/103] Remove StateStore trait from state-res collect events needed --- Cargo.lock | 70 +++++++++++--------- Cargo.toml | 2 +- src/client_server/membership.rs | 4 -- src/database/rooms.rs | 100 ++++++++++++++++++---------- src/server_server.rs | 111 +++++++++++++++++++------------- 5 files changed, 170 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ef5efb..f439e51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -543,7 +543,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -567,18 +567,18 @@ checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -707,7 +707,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -1221,11 +1221,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.3", ] [[package]] @@ -1241,9 +1241,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1258,9 +1258,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" [[package]] name = "pin-utils" @@ -1365,13 +1365,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" dependencies = [ "libc", "rand_chacha 0.3.0", - "rand_core 0.6.0", + "rand_core 0.6.1", "rand_hc 0.3.0", ] @@ -1392,7 +1392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1406,11 +1406,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.1", ] [[package]] @@ -1428,7 +1428,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1518,7 +1518,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "serde", "serde_urlencoded", "tokio", @@ -1758,7 +1758,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand 0.8.0", + "rand 0.8.1", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1977,9 +1977,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" dependencies = [ "dtoa", "linked-hash-map", @@ -2065,7 +2065,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" +source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" dependencies = [ "itertools", "maplit", @@ -2127,9 +2127,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2325,7 +2325,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "tracing-attributes", "tracing-core", ] @@ -2509,6 +2509,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.69" diff --git a/Cargo.toml b/Cargo.toml index 44df254..004cbfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4e093c2..ea14268 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -618,7 +618,6 @@ async fn join_room_by_id_helper( &room_id, &control_events, &mut event_map, - &db.rooms, &event_ids, ); @@ -629,7 +628,6 @@ async fn join_room_by_id_helper( &sorted_control_events, &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); @@ -654,7 +652,6 @@ async fn join_room_by_id_helper( &events_to_sort, power_level, &mut event_map, - &db.rooms, ); let resolved_events = state_res::StateResolution::iterative_auth_check( @@ -663,7 +660,6 @@ async fn join_room_by_id_helper( &sorted_event_ids, &resolved_control_events, &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 48e7c14..b84d1f9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -67,40 +67,6 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { - let pid = self - .get_pdu_id(event_id) - .map_err(StateError::custom)? - .ok_or_else(|| { - StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db: {}", - event_id.as_str() - )) - })?; - - serde_json::from_slice( - &self - .pduid_pdu - .get(pid) - .map_err(StateError::custom)? - .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, - ) - .map_err(Into::into) - .and_then(|pdu: PduEvent| { - // conduit's PDU's always contain a room_id but some - // of ruma's do not so this must be an Option - if pdu.room_id() == room_id { - Ok(Arc::new(pdu)) - } else { - Err(StateError::NotFound( - "Found PDU for incorrect room in db.".into(), - )) - } - }) - } -} - impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -222,6 +188,72 @@ impl Rooms { Ok(events) } + /// Returns a Vec of the related auth events to the given `event`. + /// + /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. + pub fn auth_events_full( + &self, + room_id: &RoomId, + event_ids: &[EventId], + ) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + if let Some(ev) = self.get_pdu(&ev_id)? { + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) + } + + /// Returns a Vec representing the difference in auth chains of the given `events`. + /// + /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). + pub fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + use std::collections::BTreeSet; + + let mut chains = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_events_full(room_id, &ids)? + .into_iter() + .map(|pdu| pdu.event_id) + .collect::>(); + chains.push(chain); + } + + if let Some(chain) = chains.first() { + let rest = chains.iter().skip(1).flatten().cloned().collect(); + let common = chain.intersection(&rest).collect::>(); + + Ok(chains + .iter() + .flatten() + .filter(|id| !common.contains(&id)) + .cloned() + .collect::>() + .into_iter() + .collect()) + } else { + Ok(vec![]) + } + } + /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. diff --git a/src/server_server.rs b/src/server_server.rs index 3de3636..f68475c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -603,7 +603,7 @@ pub async fn send_transaction_message_route<'a>( }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not // the auth events that would be correct for this pdu. Put another way we should use the auth events // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( @@ -637,50 +637,56 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 4. - let (state_at_event, incoming_auth_events): (StateMap>, _) = match db - .sending - .send_federation_request( - &db.globals, - server_name.clone(), - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) - .collect(); - - ( - state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) - .await?, + // Step 5. event passes auth based on state at the event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - } - Err(_) => { - resolved_map.insert( - event.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await + { + Ok(res) => { + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .collect(); + + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await? + .into_iter() + .map(Arc::new) + .collect(), + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; if !state_res::event_auth::auth_check( &RoomVersionId::Version6, @@ -698,6 +704,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 5. // The event could still be soft failed append_state_soft(&db, &pdu)?; @@ -724,18 +731,30 @@ pub async fn send_transaction_message_route<'a>( } } - // 6. + // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() } else if fork_states.len() == 1 { fork_states[0].clone() } else { + let auth_events = fork_states + .iter() + .map(|map| { + db.rooms.auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + }) + .collect(); + // Add as much as we can to the `event_map` (less DB hits) event_map.extend( incoming_auth_events .into_iter() - .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + .map(|pdu| (pdu.event_id().clone(), pdu)), ); event_map.extend( state_at_event @@ -754,8 +773,8 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), + &auth_events, &mut event_map, - &db.rooms, ) { Ok(res) => res .into_iter() From 88f3ee489b23536698fef8b97f79d4489dd9d547 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 15:05:09 -0500 Subject: [PATCH 006/103] Fill event_map with all events that will be needed for resolution --- Cargo.lock | 2 +- src/server_server.rs | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f439e51..d08de95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -197,8 +197,8 @@ dependencies = [ "http", "image", "log", - "regex", "rand 0.7.3", + "regex", "reqwest", "ring", "rocket", diff --git a/src/server_server.rs b/src/server_server.rs index f68475c..e87c05c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -565,7 +565,7 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this - + // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); // 2. Passes signature checks, otherwise event is dropped. @@ -741,16 +741,24 @@ pub async fn send_transaction_message_route<'a>( let auth_events = fork_states .iter() .map(|map| { - db.rooms.auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), - ) + db.rooms + .auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) }) - .collect(); + .collect::>>()?; - // Add as much as we can to the `event_map` (less DB hits) + // Add everything we will need to event_map + event_map.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); event_map.extend( incoming_auth_events .into_iter() @@ -773,7 +781,10 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - &auth_events, + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), &mut event_map, ) { Ok(res) => res From 27c4e9d9d5d362c174c6ca14df5cd1fe412a23a2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 12 Jan 2021 08:26:52 -0500 Subject: [PATCH 007/103] Fix signature/hash checks, fetch recursive auth events --- src/client_server/membership.rs | 7 +- src/database/rooms.rs | 4 +- src/pdu.rs | 12 +- src/server_server.rs | 240 +++++++++++++++++++++++++------- 4 files changed, 193 insertions(+), 70 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea14268..29b6c14 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -681,13 +681,8 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( -<<<<<<< HEAD - &PduEvent::from(&**pdu), - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), -======= &pdu, - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), ->>>>>>> 6232d1f (Update state-res, use the new Event trait) + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b84d1f9..6b51d58 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; +use state_res::{event_auth, Event, StateMap}; use std::{ collections::{BTreeMap, HashMap}, @@ -193,7 +193,7 @@ impl Rooms { /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. pub fn auth_events_full( &self, - room_id: &RoomId, + _room_id: &RoomId, event_ids: &[EventId], ) -> Result> { let mut result = BTreeMap::new(); diff --git a/src/pdu.rs b/src/pdu.rs index 86fbc9f..750f9cf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,7 +4,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -286,12 +286,11 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn process_incoming_pdu( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", @@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu( )) .expect("ruma's reference hashes are valid event ids"); - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - (event_id, value) } diff --git a/src/server_server.rs b/src/server_server.rs index e87c05c..141d5bb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ + device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -18,13 +20,14 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - signatures::{CanonicalJsonObject, PublicKeyMap}, + serde::to_canonical_value, + signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, net::{IpAddr, SocketAddr}, sync::Arc, @@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } + dbg!(&*body); + for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>( } "m.presence" => {} "m.receipt" => {} + "m.device_list_update" => {} _ => {} }, Err(_err) => { @@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. // 3. Passes hash checks, otherwise it is redacted before being processed further. - let keys = db.globals.keypair(); - let mut pub_key_set = BTreeMap::new(); - pub_key_set.insert( - "ed25519:1".to_string(), - String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), - ); + let server_name = body.body.origin.clone(); let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert("domain".to_string(), pub_key_set); + if let Some(sig) = value.get("signatures") { + match sig { + CanonicalJsonValue::Object(entity) => { + for key in entity.keys() { + // TODO: save this in a DB maybe... + // fetch the public signing key + let res = db + .sending + .send_federation_request( + &db.globals, + Box::::try_from(key.to_string()).unwrap(), + get_server_keys::v2::Request::new(), + ) + .await?; - let pdu = match signature_and_hash_check(&pub_key_map, value) { + pub_key_map.insert( + res.server_key.server_name.to_string(), + res.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); + } + } + _ => { + resolved_map.insert( + event_id, + Err("`signatures` is not a JSON object".to_string()), + ); + continue; + } + } + } else { + resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); + continue; + } + + let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>( } }; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - // Fetch any unknown events or retrieve them from the DB + dbg!(&*event); + // Fetch any unknown prev_events or retrieve them from the DB let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { - mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), _ => None, }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not - // the auth events that would be correct for this pdu. Put another way we should use the auth events - // the pdu claims are its auth events - let auth_events = db.rooms.get_auth_events( - &pdu.room_id, - &pdu.kind, - &pdu.sender, - pdu.state_key.as_deref(), - pdu.content.clone(), - )?; + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) + .await + { + Ok(events) => events, + Err(_) => { + resolved_map.insert( + pdu.event_id, + Err("Failed to recursively gather auth events".into()), + ); + continue; + } + }; let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); - if !state_res::event_auth::auth_check( + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - &auth_events - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + &pdu.auth_events + .iter() + .map(|id| { + event_map + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + Error::Conflict( + "Auth event not found, event failed recursive auth checks.", + ) + }) + }) + .collect::>>()?, None, // TODO: third party invite ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { + .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( pdu.event_id, Err("Event has failed auth check with auth events".into()), @@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, -) -> std::result::Result { - let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".into()), +) -> std::result::Result { + Ok( + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + error!("CONTENT HASH FAILED"); + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => return Err("Signature verification failed".into()), - }; - - serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }, ) - .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have missing events it fails. +async fn fetch_check_auth_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + event_ids: &[EventId], +) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + let ev = match db.rooms.get_pdu(&ev_id)? { + Some(pdu) => pdu, + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: &ev_id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(mut val) => { + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map") + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }; + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) } /// TODO: this needs to add events to the DB in a way that does not @@ -865,10 +988,21 @@ async fn fetch_events( .await { Ok(res) => { - let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); match signature_and_hash_check(key_map, value) { - Ok(pdu) => { + Ok(mut val) => { // TODO: add to our DB somehow? + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + pdus.push(pdu); } Err(e) => { @@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; db.rooms.append_pdu( pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, From 7c4e116caab10db9613455f755072a9b83835117 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 008/103] State resolution outline for /send --- src/server_server.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 141d5bb..31d6467 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1067,6 +1067,59 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { Ok(()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 2ac3ffbb2405cdaddb308cdc6e8be87a9c635c61 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 14:39:56 -0500 Subject: [PATCH 009/103] Convert uses of Box to a ref --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 2 +- src/server_server.rs | 117 ++++++++++++++++++++++---------- 7 files changed, 92 insertions(+), 43 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..0dc40a9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -70,7 +70,7 @@ pub async fn get_alias_helper( .sending .send_federation_request( &db.globals, - room_alias.server_name().to_owned(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2bff20c..87d5fc8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -137,7 +137,7 @@ pub async fn get_public_rooms_filtered_helper( .sending .send_federation_request( &db.globals, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f792062..275038a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -77,7 +77,7 @@ pub async fn get_content_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -126,7 +126,7 @@ pub async fn get_content_thumbnail_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content_thumbnail::Request { allow_remote: false, height: body.height, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29b6c14..40e4183 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -468,7 +468,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, @@ -547,7 +547,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/sending.rs b/src/database/sending.rs index 709fa53..e827dad 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -302,7 +302,7 @@ impl Sending { server_server::send_request( &globals, - server.clone(), + &*server, send_transaction_message::v1::Request { origin: globals.server_name(), pdus: &pdu_jsons, @@ -347,7 +347,7 @@ impl Sending { pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where diff --git a/src/pdu.rs b/src/pdu.rs index 750f9cf..340ddee 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -287,7 +287,7 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub(crate) fn process_incoming_pdu( +pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/server_server.rs b/src/server_server.rs index 31d6467..64e0a05 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,11 +20,12 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, StateMap}; +use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, @@ -36,7 +37,7 @@ use std::{ pub async fn send_request( globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where @@ -50,7 +51,7 @@ where .actual_destination_cache .read() .unwrap() - .get(&destination) + .get(destination) .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { @@ -61,7 +62,7 @@ where .actual_destination_cache .write() .unwrap() - .insert(destination.clone(), result.clone()); + .insert(Box::::from(destination), result.clone()); result }; @@ -278,9 +279,9 @@ async fn find_actual_destination( (actual_destination, host) } -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() @@ -572,11 +573,9 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then - let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let server_name = body.body.origin.clone(); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { match sig { @@ -588,7 +587,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - Box::::try_from(key.to_string()).unwrap(), + <&ServerName>::try_from(key.as_str()).unwrap(), get_server_keys::v2::Request::new(), ) .await?; @@ -616,6 +615,9 @@ pub async fn send_transaction_message_route<'a>( continue; } + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { @@ -625,15 +627,20 @@ pub async fn send_transaction_message_route<'a>( }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // to our PduEvent type also finally verifying the first step listed above val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let pdu = match serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); + ) { + Ok(pdu) => pdu, + Err(_) => { + resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -644,18 +651,15 @@ pub async fn send_transaction_message_route<'a>( let event = Arc::new(pdu.clone()); dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // Recursively gather all auth events checking that the previous auth events are valid. let auth_events: Vec = - match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) - .await - { + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { Ok(events) => events, Err(_) => { resolved_map.insert( @@ -707,7 +711,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - server_name.clone(), + server_name, get_room_state_ids::v1::Request { room_id: pdu.room_id(), event_id: pdu.event_id(), @@ -716,8 +720,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -734,7 +737,7 @@ pub async fn send_transaction_message_route<'a>( ( state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) .await? .into_iter() .map(Arc::new) @@ -881,6 +884,52 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +async fn auth_each_event( + db: &Database, + value: CanonicalJsonObject, + event_id: EventId, + pub_key_map: &PublicKeyMap, + server_name: &ServerName, + auth_cache: EventMap>, +) -> std::result::Result { + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let mut val = signature_and_hash_check(&pub_key_map, value)?; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; + + // If we have no idea about this room skip the PDU + if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { + return Err("Room is unknown to this server".into()); + } + + // Fetch any unknown prev_events or retrieve them from the DB + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; + + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(events) => events, + Err(_) => return Err("Failed to recursively gather auth events".into()), + }; + + Ok(pdu) +} + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -909,7 +958,7 @@ fn signature_and_hash_check( /// events `auth_events`. If the chain is found to have missing events it fails. async fn fetch_check_auth_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, event_ids: &[EventId], ) -> Result> { @@ -929,13 +978,13 @@ async fn fetch_check_auth_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: &ev_id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { val.insert( @@ -970,7 +1019,7 @@ async fn fetch_check_auth_events( /// effect the state of the room async fn fetch_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, events: &[EventId], ) -> Result> { @@ -982,13 +1031,13 @@ async fn fetch_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { // TODO: add to our DB somehow? From 851eb555b6bb656b7316515fdc865c163c9e1874 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 21:32:22 -0500 Subject: [PATCH 010/103] Abstract event validation/fetching, add outlier and signing key DB trees Fixed the miss named commented out keys in conduit-example.toml. --- conduit-example.toml | 6 +- src/database.rs | 10 +- src/database/globals.rs | 77 ++++- src/database/rooms.rs | 97 ++---- src/error.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 632 +++++++++++++++++++--------------------- 7 files changed, 416 insertions(+), 414 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 70d3ce4..bb3ae33 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,12 +23,12 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#registration_disabled = false +#allow_registration = false # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#encryption_disabled = false -#federation_disabled = false +#allow_encryption = false +#allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time diff --git a/src/database.rs b/src/database.rs index 607e1be..e94a517 100644 --- a/src/database.rs +++ b/src/database.rs @@ -22,7 +22,7 @@ use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; use tokio::sync::Semaphore; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, database_path: String, @@ -102,7 +102,12 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config).await?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + ) + .await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -155,6 +160,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index c8e3b23..4670068 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,10 @@ use crate::{database::Config, utils, Error, Result}; use log::error; -use ruma::ServerName; -use std::collections::HashMap; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + ServerName, ServerSigningKeyId, +}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -19,10 +22,15 @@ pub struct Globals { reqwest_client: reqwest::Client, pub actual_destination_cache: DestinationCache, // actual_destination, host dns_resolver: TokioAsyncResolver, + pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey } impl Globals { - pub async fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load( + globals: sled::Tree, + server_keys: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -75,6 +83,7 @@ impl Globals { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), + servertimeout_signingkey: server_keys, }) } @@ -128,4 +137,66 @@ impl Globals { pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { + // Remove outdated keys + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, _) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + + if now > valid_until { + self.servertimeout_signingkey.remove(k)?; + } + } + + let mut key = origin.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice( + &(keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .to_be_bytes(), + ); + + self.servertimeout_signingkey.insert( + key, + serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, bytes) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + // If these keys are still valid use em! + if valid_until > now { + return serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + } + } + Ok(BTreeMap::default()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6b51d58..c37aa1a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,6 +65,9 @@ pub struct Rooms { /// The state for a given state hash. pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + + /// Any pdu that has passed the steps up to auth with auth_events. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { @@ -188,72 +191,6 @@ impl Rooms { Ok(events) } - /// Returns a Vec of the related auth events to the given `event`. - /// - /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. - pub fn auth_events_full( - &self, - _room_id: &RoomId, - event_ids: &[EventId], - ) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - if let Some(ev) = self.get_pdu(&ev_id)? { - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) - } - - /// Returns a Vec representing the difference in auth chains of the given `events`. - /// - /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). - pub fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - use std::collections::BTreeSet; - - let mut chains = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_events_full(room_id, &ids)? - .into_iter() - .map(|pdu| pdu.event_id) - .collect::>(); - chains.push(chain); - } - - if let Some(chain) = chains.first() { - let rest = chains.iter().skip(1).flatten().cloned().collect(); - let common = chain.intersection(&rest).collect::>(); - - Ok(chains - .iter() - .flatten() - .filter(|id| !common.contains(&id)) - .cloned() - .collect::>() - .into_iter() - .collect()) - } else { - Ok(vec![]) - } - } - /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. @@ -475,6 +412,31 @@ impl Rooms { Ok(()) } + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns true if the event_id was previously inserted. + pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + let res = self + .eventid_outlierpdu + .insert( + event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + ) + .map(|op| op.is_some())?; + Ok(res) + } + /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -516,6 +478,9 @@ impl Rooms { } } + // We no longer keep this pdu as an outlier + self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending diff --git a/src/error.rs b/src/error.rs index c57843c..fed545c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -122,10 +122,9 @@ impl log::Log for ConduitLogger { let output = format!("{} - {}", record.level(), record.args()); if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record + && (record.module_path().map_or(false, |path| { + path.starts_with("conduit::") || path.starts_with("state") + }) || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/main.rs b/src/main.rs index fe7ab0d..9817c56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -155,6 +155,7 @@ fn setup_rocket() -> rocket::Rocket { .figment() .extract() .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) .await .expect("config is valid"); diff --git a/src/server_server.rs b/src/server_server.rs index 64e0a05..6907e34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,4 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -7,7 +6,6 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ - device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -20,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -28,9 +25,12 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, + future::Future, net::{IpAddr, SocketAddr}, + pin::Pin, + result::Result as StdResult, sync::Arc, time::{Duration, SystemTime}, }; @@ -575,6 +575,26 @@ pub async fn send_transaction_message_route<'a>( // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + continue; + } + }; + if !db.rooms.exists(&room_id)? { + resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); + continue; + } + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { @@ -583,20 +603,12 @@ pub async fn send_transaction_message_route<'a>( for key in entity.keys() { // TODO: save this in a DB maybe... // fetch the public signing key - let res = db - .sending - .send_federation_request( - &db.globals, - <&ServerName>::try_from(key.as_str()).unwrap(), - get_server_keys::v2::Request::new(), - ) - .await?; + let origin = <&ServerName>::try_from(key.as_str()).unwrap(); + let keys = fetch_signing_keys(&db, origin).await?; pub_key_map.insert( - res.server_key.server_name.to_string(), - res.server_key - .verify_keys - .into_iter() + origin.to_string(), + keys.into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(), ); @@ -615,10 +627,31 @@ pub async fn send_transaction_message_route<'a>( continue; } - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = match signature_and_hash_check(&pub_key_map, value) { + // TODO: make this persist but not a DB Tree... + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) + // like structure so that once an auth event is known it would know (using indexes maybe) all of + // the auth events that it references. + let mut auth_cache = EventMap::new(); + + // 1. check the server is in the room (optional) + // 2. check content hash, redact if doesn't match + // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. persist this event as an outlier + // 7. if not timeline event: stop + let pdu = match validate_event( + &db, + value, + event_id.clone(), + &pub_key_map, + server_name, + // All the auth events gathered will be here + &mut auth_cache, + ) + .await + { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -626,59 +659,31 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = match serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - Ok(pdu) => pdu, - Err(_) => { - resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); - continue; - } - }; + let pdu = Arc::new(pdu.clone()); - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - - let event = Arc::new(pdu.clone()); - dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + let previous = match fetch_events( + &db, + server_name, + &pub_key_map, + &pdu.prev_events, + &mut auth_cache, + ) + .await + { + Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), _ => None, }; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => { - resolved_map.insert( - pdu.event_id, - Err("Failed to recursively gather auth events".into()), - ); - continue; - } - }; - - let mut event_map: state_res::EventMap> = auth_events + let mut event_map: state_res::EventMap> = auth_cache .iter() - .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); // Check that the event passes auth based on the auth_events let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &pdu.auth_events .iter() @@ -696,9 +701,10 @@ pub async fn send_transaction_message_route<'a>( None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( - pdu.event_id, + pdu.event_id().clone(), Err("Event has failed auth check with auth events".into()), ); continue; @@ -720,7 +726,14 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events( + &db, + server_name, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, + ) + .await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -732,21 +745,26 @@ pub async fn send_transaction_message_route<'a>( let state = state .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); ( state, - fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) - .await? - .into_iter() - .map(Arc::new) - .collect(), + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await? + .into_iter() + .collect(), ) } Err(_) => { resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Fetching state for event failed".into()), ); continue; @@ -755,7 +773,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &state_at_event, None, // TODO: third party invite @@ -764,37 +782,21 @@ pub async fn send_transaction_message_route<'a>( { // Event failed auth with state_at resolved_map.insert( - pdu.event_id, + event_id, Err("Event has failed auth check with state at the event".into()), ); continue; } // End of step 5. - // The event could still be soft failed - append_state_soft(&db, &pdu)?; - // Gather the forward extremities and resolve - let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states: Vec>> = vec![]; - for id in &forward_extrems { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - fork_states.push(state); - } else { - todo!("we don't know of a pdu that is part of our known forks OOPS") + let fork_states = match forward_extremity_ids(&db, &pdu) { + Ok(states) => states, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; } - } + }; // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { @@ -803,19 +805,47 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { - let auth_events = fork_states - .iter() - .map(|map| { - db.rooms - .auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), + let mut auth_events = vec![]; + // this keeps track if we error so we can break out of these inner loops + // to continue on with the incoming PDU's + let mut failed = false; + for map in &fork_states { + let mut state_auth = vec![]; + for pdu in map.values() { + let event = match auth_cache.get(pdu.event_id()) { + Some(aev) => aev.clone(), + // We should know about every event at this point but just incase... + None => match fetch_events( + &db, + server_name, + &pub_key_map, + &[pdu.event_id().clone()], + &mut auth_cache, ) - .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) - }) - .collect::>>()?; + .await + .map(|mut vec| vec.remove(0)) + { + Ok(aev) => aev.clone(), + Err(_) => { + resolved_map.insert( + event_id.clone(), + Err("Event has been soft failed".into()), + ); + failed = true; + break; + } + }, + }; + state_auth.push(event); + } + if failed { + break; + } + auth_events.push(state_auth); + } + if failed { + continue; + } // Add everything we will need to event_map event_map.extend( @@ -862,74 +892,163 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous, &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { - // Soft fail + // Soft fail, we add the event as an outlier. resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Event has been soft failed".into()), ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id().clone(), Ok(())); + resolved_map.insert(pdu.event_id().clone(), Ok(())); } } Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } -async fn auth_each_event( - db: &Database, +/// Validate any event that is given to us by another server. +/// +/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). +/// 2. Passes signature checks, otherwise event is dropped. +/// 3. Passes hash checks, otherwise it is redacted before being processed further. +/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). +/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +fn validate_event<'a>( + db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &PublicKeyMap, - server_name: &ServerName, - auth_cache: EventMap>, -) -> std::result::Result { - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = signature_and_hash_check(&pub_key_map, value)?; + pub_key_map: &'a PublicKeyMap, + origin: &'a ServerName, + auth_cache: &'a mut EventMap>, +) -> Pin> + 'a + Send>> { + Box::pin(async move { + let mut val = signature_and_hash_check(&pub_key_map, value)?; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { - return Err("Room is unknown to this server".into()); - } + fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + .await + .map_err(|_| "Event failed auth chain check".to_string())?; - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + db.rooms + .append_pdu_outlier(pdu.event_id(), &pdu) + .map_err(|e| e.to_string())?; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => return Err("Failed to recursively gather auth events".into()), - }; - - Ok(pdu) + Ok(pdu) + }) } +/// Find the event and auth it. +/// +/// 1. Look in the main timeline (pduid_pdu tree) +/// 2. Look at outlier pdu tree +/// 3. Ask origin server over federation +/// 4. TODO: Ask other servers over federation? +async fn fetch_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + events: &[EventId], + auth_cache: &mut EventMap>, +) -> Result>> { + let mut pdus = vec![]; + for id in events { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db.rooms.get_pdu_outlier(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; + + Arc::new(pdu) + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }, + }; + pdus.push(pdu); + } + Ok(pdus) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, +) -> Result> { + match db.globals.signing_keys_for(origin)? { + keys if !keys.is_empty() => Ok(keys), + _ => { + let keys = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + } +} fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -954,122 +1073,29 @@ fn signature_and_hash_check( ) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have missing events it fails. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], -) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); +fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { + let mut fork_states = vec![]; + for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - let ev = match db.rooms.get_pdu(&ev_id)? { - Some(pdu) => pdu, - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &ev_id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map") - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, - }; - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) -} - -/// TODO: this needs to add events to the DB in a way that does not -/// effect the state of the room -async fn fetch_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - events: &[EventId], -) -> Result> { - let mut pdus = vec![]; - for id in events { - match db.rooms.get_pdu(id)? { - Some(pdu) => pdus.push(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - // TODO: add to our DB somehow? - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); - - pdus.push(pdu); - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + fork_states.push(state); + } else { + return Err(Error::Conflict( + "we don't know of a pdu that is part of our known forks OOPS", + )); } } - Ok(pdus) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - db.rooms.get_pdu_leaves(room_id) + Ok(fork_states) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -1078,9 +1104,12 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( - pdu, + &pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), @@ -1089,78 +1118,9 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { &db.admin, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // db.rooms.append_pdu( - // pdu, - // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - // count, - // pdu_id.clone().into(), - // &db.globals, - // &db.account_data, - // &db.admin, - // )?; - - Ok(()) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() -} - -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 5a8041969dd50a205bb3634f8c5905d7c7717bf4 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 11:05:57 -0500 Subject: [PATCH 011/103] Fix ALL clippy warnings --- src/client_server/account.rs | 78 ++++------------------- src/client_server/membership.rs | 40 ++---------- src/client_server/message.rs | 6 +- src/client_server/profile.rs | 12 +--- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 90 +++++--------------------- src/client_server/state.rs | 6 +- src/database/admin.rs | 6 +- src/database/rooms.rs | 109 ++++++++++++++------------------ src/database/sending.rs | 19 +++--- src/error.rs | 8 ++- src/server_server.rs | 4 +- 12 files changed, 106 insertions(+), 278 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 12c7f7e..76354b6 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -239,11 +239,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Make conduit bot join @@ -264,11 +260,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -302,11 +294,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.1 Join Rules @@ -323,11 +311,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -346,11 +330,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -367,11 +347,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 6. Events implied by name and topic @@ -390,11 +366,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( @@ -410,11 +382,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Room alias @@ -436,11 +404,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -463,11 +427,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -486,11 +446,7 @@ pub async fn register_route( }, &user_id, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Send welcome message @@ -515,11 +471,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -691,11 +643,7 @@ pub async fn deactivate_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 40e4183..70bb480 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,11 +124,7 @@ pub async fn leave_room_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -164,11 +160,7 @@ pub async fn invite_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -220,11 +212,7 @@ pub async fn kick_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -280,11 +268,7 @@ pub async fn ban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -332,11 +316,7 @@ pub async fn unban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -685,9 +665,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; if state_events.contains(ev_id) { @@ -717,11 +695,7 @@ async fn join_room_by_id_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c56cc94..c64c390 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -68,11 +68,7 @@ pub async fn send_message_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..21759a8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -63,11 +63,7 @@ pub async fn set_displayname_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update @@ -160,11 +156,7 @@ pub async fn set_avatar_url_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..282c35a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -31,11 +31,7 @@ pub async fn redact_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..631d87b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,11 +65,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Let the room creator join @@ -90,11 +86,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -135,11 +127,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4. Events set by preset @@ -175,11 +163,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -196,11 +180,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -225,11 +205,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 5. Events listed in initial_state @@ -248,11 +224,7 @@ pub async fn create_room_route( pdu_builder, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -273,11 +245,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -295,11 +263,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -322,11 +286,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -413,11 +373,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Get the old room federations status @@ -457,11 +413,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Join the new room @@ -482,11 +434,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Recommended transferable state events list from the specs @@ -519,11 +467,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -566,11 +510,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index cecb79d..60e8363 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,11 +284,7 @@ pub async fn send_state_event_for_key_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; Ok(event_id) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..501722e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -60,11 +60,7 @@ impl Admin { }, &conduit_user, &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, ) .unwrap(); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c37aa1a..d62d4b0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::error; use regex::Regex; use ring::digest; @@ -447,9 +447,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, - globals: &super::globals::Globals, - account_data: &super::account_data::AccountData, - admin: &super::admin::Admin, + db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -486,7 +484,7 @@ impl Rooms { // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; self.pduid_pdu.insert( &pdu_id, @@ -521,8 +519,8 @@ impl Rooms { ) })?, &pdu.sender, - account_data, - globals, + &db.account_data, + &db.globals, )?; } } @@ -540,10 +538,10 @@ impl Rooms { self.tokenids.insert(key, &[])?; } - if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", globals.server_name()) + &format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), )? @@ -570,10 +568,11 @@ impl Rooms { ); match parsed_config { Ok(yaml) => { - admin.send(AdminCommand::RegisterAppservice(yaml)); + db.admin + .send(AdminCommand::RegisterAppservice(yaml)); } Err(e) => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( format!( "Could not parse appservice config: {}", @@ -584,7 +583,7 @@ impl Rooms { } } } else { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( "Expected code block in command body.", ), @@ -592,10 +591,10 @@ impl Rooms { } } "list_appservices" => { - admin.send(AdminCommand::ListAppservices); + db.admin.send(AdminCommand::ListAppservices); } _ => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( "Command: {}, Args: {:?}", command, args @@ -696,17 +695,12 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - globals: &super::globals::Globals, - sending: &super::sending::Sending, - admin: &super::admin::Admin, - account_data: &super::account_data::AccountData, - appservice: &super::appservice::Appservice, + db: &Database, ) -> Result { let PduBuilder { event_type, @@ -789,7 +783,7 @@ impl Rooms { if !match event_type { EventType::RoomEncryption => { // Only allow encryption events if it's allowed in the config - globals.allow_encryption() + db.globals.allow_encryption() } EventType::RoomMember => { let prev_event = self @@ -895,13 +889,13 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(globals.server_name()) + to_canonical_value(db.globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), + db.globals.server_name().as_str(), + db.globals.keypair(), &mut pdu_json, &RoomVersionId::Version6, ) @@ -922,24 +916,16 @@ impl Rooms { // Increment the last index and use that // This is also the next_batch/since value - let count = globals.next_count()?; + let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu( - &pdu, - pdu_json, - count, - pdu_id.clone().into(), - globals, - account_data, - admin, - )?; + self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -948,31 +934,28 @@ impl Rooms { for server in self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) + .filter(|server| &**server != db.globals.server_name()) { - sending.send_pdu(&server, &pdu_id)?; + db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") .and_then(|users| users.as_sequence()) - .map_or_else( - || Vec::new(), - |users| { - users - .iter() - .map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) - .filter_map(|o| o) - .collect::>() - }, - ); + .map_or_else(Vec::new, |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let aliases = namespaces .get("aliases") .and_then(|users| users.get("regex")) @@ -989,25 +972,31 @@ impl Rooms { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, globals.server_name()).ok() + UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - if bridge_user_id.map_or(false, |bridge_user_id| { - self.is_joined(&bridge_user_id, room_id).unwrap_or(false) - }) || users.iter().any(|users| { + let user_is_joined = + |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { + }; + let matching_aliases = |aliases: Regex| { room_aliases .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + }; + + if bridge_user_id.map_or(false, user_is_joined) + || users.iter().any(matching_users) + || aliases.map_or(false, matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } diff --git a/src/database/sending.rs b/src/database/sending.rs index e827dad..4b0309f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -148,6 +148,15 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); + let exponential_backoff = |(tries, instant): &(u32, Instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }; if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() @@ -172,15 +181,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, |(tries, instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - instant.elapsed() < min_elapsed_duration - }) { + if last_failed_try.get(server).map_or(false, exponential_backoff) { return false; } diff --git a/src/error.rs b/src/error.rs index fed545c..13efce6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,10 +121,12 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); + let match_mod_path = + |path: &str| path.starts_with("conduit::") || path.starts_with("state"); + if self.enabled(record.metadata()) - && (record.module_path().map_or(false, |path| { - path.starts_with("conduit::") || path.starts_with("state") - }) || record + && (record.module_path().map_or(false, match_mod_path) + || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/server_server.rs b/src/server_server.rs index 6907e34..ae59583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1113,9 +1113,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time From 96dc6be14b8065f8833b8372cfe1e3655d358f77 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 15:46:47 -0500 Subject: [PATCH 012/103] Use the auth_events for step 6, WIP forward_extremity_ids fn --- src/server_server.rs | 159 ++++++++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 55 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ae59583..77f0fa8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); - for pdu in &body.pdus { + 'main_pdu_loop: for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -660,7 +660,6 @@ pub async fn send_transaction_message_route<'a>( }; let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB let previous = match fetch_events( &db, @@ -675,6 +674,7 @@ pub async fn send_transaction_message_route<'a>( _ => None, }; + // [auth_cache] At this point we have the auth chain of the incoming event. let mut event_map: state_res::EventMap> = auth_cache .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -688,7 +688,7 @@ pub async fn send_transaction_message_route<'a>( &pdu.auth_events .iter() .map(|id| { - event_map + auth_cache .get(id) .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) .ok_or_else(|| { @@ -790,7 +790,15 @@ pub async fn send_transaction_message_route<'a>( // End of step 5. // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids(&db, &pdu) { + let fork_states = match forward_extremity_ids( + &db, + &pdu, + server_name, + &pub_key_map, + &mut auth_cache, + ) + .await + { Ok(states) => states, Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); @@ -805,47 +813,44 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // TODO: remove this is for current debugging Jan, 15 2021 + let mut number_fetches = 0_u32; let mut auth_events = vec![]; // this keeps track if we error so we can break out of these inner loops // to continue on with the incoming PDU's - let mut failed = false; for map in &fork_states { let mut state_auth = vec![]; - for pdu in map.values() { - let event = match auth_cache.get(pdu.event_id()) { + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), // We should know about every event at this point but just incase... None => match fetch_events( &db, server_name, &pub_key_map, - &[pdu.event_id().clone()], + &[auth_id.clone()], &mut auth_cache, ) .await - .map(|mut vec| vec.remove(0)) - { - Ok(aev) => aev.clone(), + .map(|mut vec| { + number_fetches += 1; + vec.remove(0) + }) { + Ok(aev) => aev, Err(_) => { resolved_map.insert( event_id.clone(), Err("Event has been soft failed".into()), ); - failed = true; - break; + continue 'main_pdu_loop; } }, }; state_auth.push(event); } - if failed { - break; - } auth_events.push(state_auth); } - if failed { - continue; - } + info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map event_map.extend( @@ -886,7 +891,13 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), - Err(e) => panic!("{:?}", e), + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("State resolution failed, either an event could not be found or deserialization".into()), + ); + continue 'main_pdu_loop; + } } }; @@ -914,6 +925,7 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). @@ -955,6 +967,37 @@ fn validate_event<'a>( }) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + /// Find the event and auth it. /// /// 1. Look in the main timeline (pduid_pdu tree) @@ -1000,36 +1043,6 @@ async fn fetch_events( Ok(pdus) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| vec.remove(0))?; - - stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); - } - Ok(()) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. async fn fetch_signing_keys( @@ -1049,6 +1062,7 @@ async fn fetch_signing_keys( } } } + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -1073,9 +1087,23 @@ fn signature_and_hash_check( ) } -fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { +async fn forward_extremity_ids( + db: &Database, + pdu: &PduEvent, + origin: &ServerName, + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + + for incoming_leaf in &pdu.prev_events { + if !current_leaves.contains(incoming_leaf) { + current_leaves.push(incoming_leaf.clone()); + } + } + let mut fork_states = vec![]; - for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1090,11 +1118,32 @@ fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result Date: Sat, 16 Jan 2021 16:37:20 -0500 Subject: [PATCH 013/103] Fixing the incoming events algorithm (review with time) --- src/database/rooms.rs | 2 +- src/server_server.rs | 237 ++++++++++++++++++++++++------------------ 2 files changed, 138 insertions(+), 101 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d62d4b0..325a2e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -150,7 +150,7 @@ impl Rooms { } } - /// Returns the last state hash key added to the db. + /// Returns the state hash for this pdu. pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } diff --git a/src/server_server.rs b/src/server_server.rs index 77f0fa8..0eb7d6f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,6 +5,7 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -590,6 +591,8 @@ pub async fn send_transaction_message_route<'a>( continue; } }; + + // 1. check the server is in the room (optional) if !db.rooms.exists(&room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); continue; @@ -634,14 +637,13 @@ pub async fn send_transaction_message_route<'a>( // the auth events that it references. let mut auth_cache = EventMap::new(); - // 1. check the server is in the room (optional) // 2. check content hash, redact if doesn't match // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 6. persist this event as an outlier // 7. if not timeline event: stop - let pdu = match validate_event( + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (pdu, previous) = match validate_event( &db, value, event_id.clone(), @@ -659,59 +661,16 @@ pub async fn send_transaction_message_route<'a>( } }; - let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events( - &db, - server_name, - &pub_key_map, - &pdu.prev_events, - &mut auth_cache, - ) - .await - { - Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), - _ => None, + let single_prev = if previous.len() == 1 { + previous.first().cloned() + } else { + None }; - // [auth_cache] At this point we have the auth chain of the incoming event. - let mut event_map: state_res::EventMap> = auth_cache - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); + // 6. persist the event as an outlier. + db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; - // Check that the event passes auth based on the auth_events - let is_authed = state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - Error::Conflict( - "Auth event not found, event failed recursive auth checks.", - ) - }) - }) - .collect::>>()?, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))?; - - if !is_authed { - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has failed auth check with auth events".into()), - ); - continue; - } - // End of step 4. - - // Step 5. event passes auth based on state at the event + // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -757,9 +716,7 @@ pub async fn send_transaction_message_route<'a>( &res.auth_chain_ids, &mut auth_cache, ) - .await? - .into_iter() - .collect(), + .await?, ) } Err(_) => { @@ -771,10 +728,11 @@ pub async fn send_transaction_message_route<'a>( } }; + // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous.clone(), + single_prev.clone(), &state_at_event, None, // TODO: third party invite ) @@ -787,10 +745,34 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - // End of step 5. + // End of step 10. + + // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(pdu.room_id())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + single_prev.clone(), + ¤t_state, + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail, we add the event as an outlier. + resolved_map.insert( + pdu.event_id().clone(), + Err("Event has been soft failed".into()), + ); + }; // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids( + let fork_states = match forward_extremities( &db, &pdu, server_name, @@ -806,7 +788,9 @@ pub async fn send_transaction_message_route<'a>( } }; - // Step 6. event passes auth based on state of all forks and current room state + // 13. start state-res with all previous forward extremities minus the ones that are in + // the prev_events of this event plus the new one created by this event and use + // the result as the new room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() @@ -852,6 +836,7 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); + let mut event_map = EventMap::new(); // Add everything we will need to event_map event_map.extend( auth_events @@ -904,7 +889,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous, + single_prev, &state_at_forks, None, ) @@ -925,14 +910,19 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// An async function that can recursively calls itself. +type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; + /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. Passes signature checks, otherwise event is dropped. -/// 3. Passes hash checks, otherwise it is redacted before being processed further. -/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). -/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +/// 2. check content hash, redact if doesn't match +/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events +/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" +/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. if not timeline event: stop +/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fn validate_event<'a>( db: &'a Database, value: CanonicalJsonObject, @@ -940,9 +930,24 @@ fn validate_event<'a>( pub_key_map: &'a PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> Pin> + 'a + Send>> { +) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { Box::pin(async move { - let mut val = signature_and_hash_check(&pub_key_map, value)?; + let mut val = + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value + } + } + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -959,11 +964,42 @@ fn validate_event<'a>( .await .map_err(|_| "Event failed auth chain check".to_string())?; - db.rooms - .append_pdu_outlier(pdu.event_id(), &pdu) + let pdu = Arc::new(pdu.clone()); + + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + .await .map_err(|e| e.to_string())?; - Ok(pdu) + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + if previous.len() == 1 { + previous.first().cloned() + } else { + None + }, + &pdu.auth_events + .iter() + .map(|id| { + auth_cache + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + }) + }) + .collect::, _>>()?, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed".to_string())?; + + if !is_authed { + return Err("Event has failed auth check with auth events".to_string()); + } + + Ok((pdu, previous)) }) } @@ -990,7 +1026,10 @@ async fn fetch_check_auth_events( let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await - .map(|mut vec| vec.remove(0))?; + .map(|mut vec| { + vec.pop() + .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) + })??; stack.extend(ev.auth_events()); auth_cache.insert(ev.event_id().clone(), ev); @@ -1028,11 +1067,12 @@ async fn fetch_events( { Ok(res) => { let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - Arc::new(pdu) + pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, @@ -1063,31 +1103,11 @@ async fn fetch_signing_keys( } } -fn signature_and_hash_check( - pub_key_map: &ruma::signatures::PublicKeyMap, - value: CanonicalJsonObject, -) -> std::result::Result { - Ok( - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - error!("CONTENT HASH FAILED"); - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } - Err(_e) => { - return Err("Signature verification failed".to_string()); - } - }, - ) -} - -async fn forward_extremity_ids( +/// Gather all state snapshots needed to resolve the current state of the room. +/// +/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) +async fn forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, @@ -1102,6 +1122,8 @@ async fn forward_extremity_ids( } } + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; let mut fork_states = vec![]; for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { @@ -1109,6 +1131,10 @@ async fn forward_extremity_ids( .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); + + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } let state = db .rooms .state_full(&pdu.room_id, &state_hash)? @@ -1144,6 +1170,17 @@ async fn forward_extremity_ids( } } + // This guarantees that our current room state is included + if !includes_current_state && current_hash.is_some() { + fork_states.push( + db.rooms + .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(), + ) + } + Ok(fork_states) } From c65bde41e01d388a7601b9af860c0950838f65ce Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:08:59 -0500 Subject: [PATCH 014/103] WIP gather and update forward extremities --- src/database/rooms.rs | 18 ++++++ src/pdu.rs | 21 ++++++- src/server_server.rs | 138 ++++++++++++++++++++++++++++-------------- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 325a2e2..665e328 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,6 +397,24 @@ impl Rooms { Ok(events) } + /// Force an update to the leaves of a room. + pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + for event_id in event_ids.iter() { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/pdu.rs b/src/pdu.rs index 340ddee..e38410f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -284,6 +284,25 @@ impl state_res::Event for PduEvent { } } +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { + self.event_id == other.event_id + } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_id.partial_cmp(&other.event_id) + } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { + self.event_id.cmp(&other.event_id) + } +} + /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. diff --git a/src/server_server.rs b/src/server_server.rs index 0eb7d6f..16a1a8e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ - client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -25,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -600,31 +599,21 @@ pub async fn send_transaction_message_route<'a>( let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(sig) = value.get("signatures") { - match sig { - CanonicalJsonValue::Object(entity) => { - for key in entity.keys() { - // TODO: save this in a DB maybe... - // fetch the public signing key - let origin = <&ServerName>::try_from(key.as_str()).unwrap(); - let keys = fetch_signing_keys(&db, origin).await?; - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } - } - _ => { - resolved_map.insert( - event_id, - Err("`signatures` is not a JSON object".to_string()), - ); - continue; - } - } + if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { + let sender = + UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); + let origin = sender.server_name(); + + // TODO: this could fail or the server not respond... + let keys = fetch_signing_keys(&db, origin).await?; + + pub_key_map.insert( + origin.to_string(), + keys.into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); } else { resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); continue; @@ -642,8 +631,9 @@ pub async fn send_transaction_message_route<'a>( // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. if not timeline event: stop - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (pdu, previous) = match validate_event( + // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // the events found in step 8 can be authed/resolved and appended to the DB + let (pdu, previous): (_, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -670,6 +660,9 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all + // the checks in this list starting at 1. These are not timeline events. + // // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db @@ -771,8 +764,12 @@ pub async fn send_transaction_message_route<'a>( ); }; - // Gather the forward extremities and resolve - let fork_states = match forward_extremities( + // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res + // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) + // + // calculate_forward_extremities takes care of adding the current state if not already in the state sets + // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. + let (mut fork_states, fork_ids) = match calculate_forward_extremities( &db, &pdu, server_name, @@ -788,6 +785,12 @@ pub async fn send_transaction_message_route<'a>( } }; + // add the incoming events to the mix of state snapshots + // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets + fork_states.insert(state_at_event.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -901,7 +904,9 @@ pub async fn send_transaction_message_route<'a>( Err("Event has been soft failed".into()), ); } else { - append_state(&db, &pdu)?; + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_state(&db, &pdu, &fork_ids)?; + // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } @@ -1106,25 +1111,52 @@ async fn fetch_signing_keys( /// Gather all state snapshots needed to resolve the current state of the room. /// /// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) -async fn forward_extremities( +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). +/// +/// The state snapshot of the incoming event __needs__ to be added to the resulting list. +async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, -) -> Result>>> { +) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut is_incoming_leaf = true; + // Make sure the incoming event is not already a forward extremity + // FIXME: I think this could happen if different servers send us the same event?? + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + + // If the incoming event is already referenced by an existing event + // then do nothing - it's not a candidate to be a new extremity if + // it has been referenced. + if already_referenced(db, pdu)? { + is_incoming_leaf = false; + // This event has been dealt with already?? + } + + // TODO: + // [dendrite] Checks if any other leaves have been referenced and removes them + // but as long as we update the pdu leaves here and for events on our server this + // should not be possible. + + // Remove any forward extremities that are referenced by this incoming events prev_events for incoming_leaf in &pdu.prev_events { - if !current_leaves.contains(incoming_leaf) { - current_leaves.push(incoming_leaf.clone()); + if current_leaves.contains(incoming_leaf) { + if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { + current_leaves.remove(pos); + } } } let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; - let mut fork_states = vec![]; + let mut fork_states = BTreeSet::new(); for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db @@ -1142,8 +1174,10 @@ async fn forward_extremities( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - fork_states.push(state); + fork_states.insert(state); } else { + error!("Forward extremity not found... {}", id); + let res = db .sending .send_federation_request( @@ -1166,25 +1200,37 @@ async fn forward_extremities( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - fork_states.push(state); + fork_states.insert(state); } } + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.push( + fork_states.insert( db.rooms .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(), - ) + ); } - Ok(fork_states) + Ok((fork_states, dbg!(current_leaves))) } -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { +/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) +fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { + Ok(false) +} + +fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1195,13 +1241,17 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, + pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db, )?; + // If we update the room leaves after calling append_pdu it will stick since append_pdu + // calls replace_pdu_leaves with only the given event. + db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; + // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 00436a32b0e6a85e6b77e530fb90df5fa4d1a958 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:36:44 -0500 Subject: [PATCH 015/103] Update ruma and rocket to latest git rev and tokio to 1.0 Ruma updated the event signing validation code and there was a dep resolving failure with serde rocket and tokio so I updated rocket latest and tokio 1.0 to fix. --- Cargo.lock | 454 ++++++++++++++++++++++++-------------- Cargo.toml | 6 +- src/client_server/sync.rs | 3 +- src/main.rs | 5 + 4 files changed, 297 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d08de95..f172072 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,12 +105,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -148,15 +142,15 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "5a4bad0c5981acc24bc09e532f35160f952e35422603f0563cd7a73c2c2e65a0" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -164,6 +158,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cc" version = "1.0.66" @@ -192,7 +192,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -210,7 +210,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.0.2", "trust-dns-resolver", ] @@ -387,9 +387,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "a3add2ec7727c9584a0ce75ee3c0f54f0ab692c7934450cc3a0287251e3a4f06" dependencies = [ "pear", "serde", @@ -457,9 +457,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -482,15 +482,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -499,15 +499,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -517,24 +517,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -543,7 +543,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -578,7 +578,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -609,7 +609,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -617,8 +617,28 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.24", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.0.2", + "tokio-util 0.6.1", "tracing", "tracing-futures", ] @@ -640,9 +660,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -660,11 +680,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -675,7 +695,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] @@ -697,19 +727,43 @@ version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.4", "socket2", - "tokio", + "tokio 0.2.24", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.4", + "socket2", + "tokio 1.0.2", "tower-service", "tracing", "want", @@ -721,10 +775,10 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.9", "native-tls", - "tokio", + "tokio 0.2.24", "tokio-tls", ] @@ -868,15 +922,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -889,9 +943,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] @@ -986,21 +1040,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1015,6 +1071,16 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -1044,6 +1110,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1170,7 +1245,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi 0.3.9", ] @@ -1221,11 +1296,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.4", ] [[package]] @@ -1241,9 +1316,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -1258,9 +1333,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1309,9 +1384,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1365,9 +1440,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1437,6 +1512,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1444,7 +1528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] @@ -1470,9 +1554,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1482,9 +1566,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1501,14 +1585,14 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64 0.13.0", - "bytes", + "base64", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.9", "hyper-tls", "ipnet", "js-sys", @@ -1518,10 +1602,10 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.24", "tokio-tls", "url", "wasm-bindgen", @@ -1558,7 +1642,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1578,7 +1662,7 @@ dependencies = [ "serde", "state", "time", - "tokio", + "tokio 1.0.2", "ubyte", "version_check", "yansi", @@ -1587,7 +1671,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1599,23 +1683,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.2", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", "time", - "tokio", + "tokio 1.0.2", "tokio-rustls", "uncased", "unicode-xid", @@ -1624,8 +1709,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "js_int", @@ -1642,8 +1727,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "http", "percent-encoding", @@ -1657,8 +1742,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1668,8 +1753,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "ruma-api", "ruma-common", @@ -1682,8 +1767,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "http", @@ -1701,8 +1786,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "maplit", @@ -1714,8 +1799,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-common", @@ -1728,8 +1813,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1739,8 +1824,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-api", @@ -1754,11 +1839,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "paste", - "rand 0.8.1", + "rand 0.8.2", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1768,8 +1853,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro2", "quote", @@ -1779,16 +1864,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" -dependencies = [ - "serde", -] +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" [[package]] name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "form_urlencoded", "itoa", @@ -1800,8 +1882,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1811,10 +1893,10 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1828,7 +1910,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1851,11 +1933,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1934,18 +2016,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -2026,9 +2108,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" @@ -2065,7 +2147,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" +source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" dependencies = [ "itertools", "maplit", @@ -2138,14 +2220,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.2", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] @@ -2172,18 +2254,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" dependencies = [ "lazy_static", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" dependencies = [ "const_fn", "libc", @@ -2238,28 +2320,41 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.11", + "slab", +] + +[[package]] +name = "tokio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.7", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.4", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2268,14 +2363,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.0.2", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "rustls", - "tokio", - "webpki", + "pin-project-lite 0.2.4", + "tokio 1.0.2", ] [[package]] @@ -2285,7 +2390,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.24", ] [[package]] @@ -2294,12 +2399,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio", + "tokio 0.2.24", +] + +[[package]] +name = "tokio-util" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.0.2", + "tokio-stream", ] [[package]] @@ -2325,7 +2445,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -2376,7 +2496,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "url", ] @@ -2396,7 +2516,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "trust-dns-proto", ] @@ -2511,9 +2631,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" diff --git a/Cargo.toml b/Cargo.toml index 004cbfd..eb594a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.0.2", features = ["macros", "time"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..be51aeb 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,7 +674,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/main.rs b/src/main.rs index 9817c56..7d5bc71 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,6 +28,11 @@ fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); + std::env::set_var( + "CONDUIT_CONFIG", + "/home/devinr/aprog/rust/__forks__/conduit/conduit.toml", + ); + let config = Figment::from(rocket::Config::release_default()) .merge( From 3a6f2644508db5f2e2e0a471a2b568d7a12b2d81 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:41:38 -0500 Subject: [PATCH 016/103] Add ability to update room leaves with multiple eventIds Tokio seems a bit broken with Rocket... --- src/client_server/membership.rs | 2 ++ src/database/rooms.rs | 37 ++++++++++++++++----------------- src/server_server.rs | 7 ++----- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70bb480..1159185 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -665,6 +665,8 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + // TODO: can we simplify the DAG or should we copy it exactly?? + &pdu.prev_events, &db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 665e328..a3f3aab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,8 +397,11 @@ impl Rooms { Ok(events) } - /// Force an update to the leaves of a room. - pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// `prev_events`. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -415,21 +418,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room with a new event. - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; - } - - prefix.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; - - Ok(()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -465,6 +453,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, + leaves: &[EventId], db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state @@ -497,7 +486,7 @@ impl Rooms { // We no longer keep this pdu as an outlier self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + self.replace_pdu_leaves(&pdu.room_id, leaves)?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -943,7 +932,17 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; + // remove the + self.append_pdu( + &pdu, + pdu_json, + count, + pdu_id.clone().into(), + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + &[pdu.event_id.clone()], + db, + )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist diff --git a/src/server_server.rs b/src/server_server.rs index 16a1a8e..f782ad5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -1245,13 +1245,10 @@ fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> R utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + &new_room_leaves, &db, )?; - // If we update the room leaves after calling append_pdu it will stick since append_pdu - // calls replace_pdu_leaves with only the given event. - db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; - // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From a897608025a3a973f69eeeb43c233fc466375b20 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:59:08 -0500 Subject: [PATCH 017/103] Roll back tokio and rocket update since ruma's request is at 0.2 tokio --- Cargo.lock | 210 +++++++++----------------------------- Cargo.toml | 4 +- src/client_server/sync.rs | 3 +- src/main.rs | 5 - 4 files changed, 54 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f172072..ce17095 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,6 +105,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -192,7 +198,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "directories", "http", "image", @@ -210,7 +216,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.0.2", + "tokio", "trust-dns-resolver", ] @@ -617,28 +623,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 0.2.24", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - -[[package]] -name = "h2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.0.2", - "tokio-util 0.6.1", + "tokio", + "tokio-util", "tracing", "tracing-futures", ] @@ -699,16 +685,6 @@ dependencies = [ "http", ] -[[package]] -name = "http-body" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" -dependencies = [ - "bytes 1.0.1", - "http", -] - [[package]] name = "httparse" version = "1.3.4" @@ -731,39 +707,15 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", + "h2", "http", - "http-body 0.3.1", + "http-body", "httparse", "httpdate", "itoa", "pin-project 1.0.4", "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" -dependencies = [ - "bytes 1.0.1", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.0", - "http", - "http-body 0.4.0", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.4", - "socket2", - "tokio 1.0.2", + "tokio", "tower-service", "tracing", "want", @@ -776,9 +728,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ "bytes 0.5.6", - "hyper 0.13.9", + "hyper", "native-tls", - "tokio 0.2.24", + "tokio", "tokio-tls", ] @@ -1040,23 +992,21 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.2", + "miow", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio" -version = "0.7.7" +name = "mio-uds" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ + "iovec", "libc", - "log", - "miow 0.3.6", - "ntapi", - "winapi 0.3.9", + "mio", ] [[package]] @@ -1071,16 +1021,6 @@ dependencies = [ "ws2_32-sys", ] -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - [[package]] name = "native-tls" version = "0.2.7" @@ -1110,15 +1050,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num-integer" version = "0.1.44" @@ -1585,14 +1516,14 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64", + "base64 0.13.0", "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.9", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", @@ -1605,7 +1536,7 @@ dependencies = [ "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio 0.2.24", + "tokio", "tokio-tls", "url", "wasm-bindgen", @@ -1642,7 +1573,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "async-trait", "atomic", @@ -1662,7 +1593,7 @@ dependencies = [ "serde", "state", "time", - "tokio 1.0.2", + "tokio", "ubyte", "version_check", "yansi", @@ -1671,7 +1602,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "devise", "glob", @@ -1683,24 +1614,23 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" dependencies = [ "cookie", "either", "http", - "hyper 0.14.2", + "hyper", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", "time", - "tokio 1.0.2", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -1896,7 +1826,7 @@ name = "ruma-signatures" version = "0.6.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ - "base64", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -1910,7 +1840,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1933,11 +1863,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" dependencies = [ - "base64", + "base64 0.12.3", "log", "ring", "sct", @@ -2325,36 +2255,23 @@ dependencies = [ "futures-core", "iovec", "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.11", - "slab", -] - -[[package]] -name = "tokio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" -dependencies = [ - "autocfg", - "bytes 1.0.1", "libc", "memchr", - "mio 0.7.7", + "mio", + "mio-uds", "num_cpus", - "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite 0.1.11", "signal-hook-registry", + "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "1.0.0" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -2363,24 +2280,14 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls", - "tokio 1.0.2", - "webpki", -] - -[[package]] -name = "tokio-stream" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.0.2", + "rustls", + "tokio", + "webpki", ] [[package]] @@ -2390,7 +2297,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio 0.2.24", + "tokio", ] [[package]] @@ -2404,22 +2311,7 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "tokio-util" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" -dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.4", - "tokio 1.0.2", - "tokio-stream", + "tokio", ] [[package]] @@ -2496,7 +2388,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "url", ] @@ -2516,7 +2408,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "trust-dns-proto", ] diff --git a/Cargo.toml b/Cargo.toml index eb594a0..405c89f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "031948c1daaa146128d8a435be116476f2adde00", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.0.2", features = ["macros", "time"] } +tokio = { version = "0.2.24" } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index be51aeb..3136116 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,8 +674,7 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let delay = tokio::time::sleep(duration); - tokio::pin!(delay); + let mut delay = tokio::time::delay_for(duration); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/main.rs b/src/main.rs index 7d5bc71..9817c56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,11 +28,6 @@ fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - std::env::set_var( - "CONDUIT_CONFIG", - "/home/devinr/aprog/rust/__forks__/conduit/conduit.toml", - ); - let config = Figment::from(rocket::Config::release_default()) .merge( From a119d858f368d2f714efe8104895727117e02a90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 018/103] feature: push rule settings --- src/client_server/push.rs | 624 +++++++++++++++++++++++++++++++++++++- src/client_server/room.rs | 8 +- src/main.rs | 5 + 3 files changed, 618 insertions(+), 19 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 05ba8d0..667d667 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,16 +1,22 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::warn; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + r0::push::{ + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, + get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + }, }, events::EventType, + push::{ + ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, + RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, + }, }; #[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use rocket::{delete, get, post, put}; #[cfg_attr( feature = "conduit_bin", @@ -36,16 +42,201 @@ pub async fn get_pushrules_all_route( .into()) } -#[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_>/<_>/<_>", - //data = "" -))] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn get_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = event.content.global; + let rule = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::_Custom(_) => None, + }; + + if let Some(rule) = rule { + Ok(get_pushrule::Response { rule }.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + } +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] pub async fn set_pushrule_route( db: State<'_, Database>, - //body: Ruma, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + + global.override_.insert(OverridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + + global.underride.insert(UnderridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + + global.sender.insert(SenderPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + + global.room.insert(RoomPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + + global.content.insert(ContentPushRule( + PatternedPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + pattern: body.pattern.clone().unwrap_or_default(), + } + .into(), + )); + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; @@ -54,19 +245,426 @@ pub async fn set_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn get_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let actions = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::_Custom(_) => None, + }; + + db.flush().await?; + + Ok(get_pushrule_actions::Response { + actions: actions.unwrap_or_default(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn set_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.actions = body.actions.clone(); + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.actions = body.actions.clone(); + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.actions = body.actions.clone(); + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.actions = body.actions.clone(); + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.actions = body.actions.clone(); + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + }; + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_pushrule_actions::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") +)] +pub async fn get_pushrule_enabled_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let enabled = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::_Custom(_) => false, + }; + + db.flush().await?; + + Ok(get_pushrule_enabled::Response { enabled }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_enabled_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.enabled = body.enabled; + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.enabled = body.enabled; + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.enabled = body.enabled; + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.enabled = body.enabled; + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.enabled = body.enabled; + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; Ok(set_pushrule_enabled::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn delete_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(delete_pushrule::Response.into()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 631d87b..4adc335 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -220,12 +220,8 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_user, - &room_id, - &db, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; } // 6. Events implied by name and topic diff --git a/src/main.rs b/src/main.rs index 9817c56..054c859 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,7 +55,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, + client_server::get_pushrule_route, client_server::set_pushrule_enabled_route, + client_server::get_pushrule_enabled_route, + client_server::get_pushrule_actions_route, + client_server::set_pushrule_actions_route, + client_server::delete_pushrule_route, client_server::get_room_event_route, client_server::get_filter_route, client_server::create_filter_route, From 35c1904b37812b08576e8da84d9e4effd2f71fc8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 24 Jan 2021 20:18:40 -0500 Subject: [PATCH 019/103] Finish forward extremity gathering, use resolved state as new snapshot --- src/server_server.rs | 147 +++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 67 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f782ad5..e733d24 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,6 +18,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -483,34 +484,6 @@ pub async fn get_public_rooms_route( .into()) } -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum PrevEvents { - Sequential(T), - Fork(Vec), -} - -impl IntoIterator for PrevEvents { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Sequential(item) => vec![item].into_iter(), - Self::Fork(list) => list.into_iter(), - } - } -} - -impl PrevEvents { - pub fn new(id: &[T]) -> Self { - match id { - [] => panic!("All events must have previous event"), - [single_id] => Self::Sequential(single_id.clone()), - rest => Self::Fork(rest.to_vec()), - } - } -} - #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -605,8 +578,16 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - // TODO: this could fail or the server not respond... - let keys = fetch_signing_keys(&db, origin).await?; + let keys = match fetch_signing_keys(&db, origin).await { + Ok(keys) => keys, + Err(_) => { + resolved_map.insert( + event_id, + Err("Could not find signing keys for this server".to_string()), + ); + continue; + } + }; pub_key_map.insert( origin.to_string(), @@ -769,11 +750,12 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, fork_ids) = match calculate_forward_extremities( + let (mut fork_states, extremities) = match calculate_forward_extremities( &db, &pdu, server_name, &pub_key_map, + current_state, &mut auth_cache, ) .await @@ -791,6 +773,7 @@ pub async fn send_transaction_message_route<'a>( let fork_states = fork_states.into_iter().collect::>(); + let mut update_state = false; // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -800,11 +783,12 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // We do need to force an update to this rooms state + update_state = true; + // TODO: remove this is for current debugging Jan, 15 2021 let mut number_fetches = 0_u32; let mut auth_events = vec![]; - // this keeps track if we error so we can break out of these inner loops - // to continue on with the incoming PDU's for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { @@ -821,14 +805,12 @@ pub async fn send_transaction_message_route<'a>( .await .map(|mut vec| { number_fetches += 1; - vec.remove(0) + vec.pop() }) { - Ok(aev) => aev, - Err(_) => { - resolved_map.insert( - event_id.clone(), - Err("Event has been soft failed".into()), - ); + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); continue 'main_pdu_loop; } }, @@ -839,20 +821,19 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); - let mut event_map = EventMap::new(); // Add everything we will need to event_map - event_map.extend( + auth_cache.extend( auth_events .iter() .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) .flatten(), ); - event_map.extend( + auth_cache.extend( incoming_auth_events .into_iter() .map(|pdu| (pdu.event_id().clone(), pdu)), ); - event_map.extend( + auth_cache.extend( state_at_event .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), @@ -873,7 +854,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut event_map, + &mut auth_cache, ) { Ok(res) => res .into_iter() @@ -905,14 +886,23 @@ pub async fn send_transaction_message_route<'a>( ); } else { // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_state(&db, &pdu, &fork_ids)?; + append_incoming_pdu( + &db, + &pdu, + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1029,6 +1019,7 @@ async fn fetch_check_auth_events( continue; } + // TODO: Batch these async calls so we can wait on multiple at once let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await .map(|mut vec| { @@ -1119,6 +1110,7 @@ async fn calculate_forward_extremities( pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, + current_state: BTreeMap<(EventType, Option), Arc>, auth_cache: &mut EventMap>, ) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; @@ -1126,17 +1118,13 @@ async fn calculate_forward_extremities( let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - is_incoming_leaf = false; - // Not sure what to do here - } - + // // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if already_referenced(db, pdu)? { + if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { is_incoming_leaf = false; - // This event has been dealt with already?? + // Not sure what to do here } // TODO: @@ -1213,29 +1201,54 @@ async fn calculate_forward_extremities( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.insert( - db.rooms - .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(), - ); + fork_states.insert(current_state); } Ok((fork_states, dbg!(current_leaves))) } -/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) -fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { - Ok(false) -} - -fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { +/// Update the room state to be the resolved state and add the fully auth'ed event +/// to the DB. +/// +/// TODO: If we force the state we need to validate all events in that state +/// any events we fetched from another server need to be fully verified? +fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: Option>>, +) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let new = state + .into_iter() + .map(|((ev, k), pdu)| { + Ok(( + ( + ev, + k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + db.rooms + .get_pdu_id(pdu.event_id()) + .ok() + .flatten() + .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? + .to_vec(), + )) + }) + .collect::>()?; + + info!("Force update of state for {:?}", pdu); + + db.rooms.force_state(pdu.room_id(), new, &db.globals)?; + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 894b6ef0379946d26fa1f50f45daea50d739014f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:33:41 -0500 Subject: [PATCH 020/103] Resolved state is set as the current room state on incoming events --- src/server_server.rs | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e733d24..14a1d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -870,36 +870,20 @@ pub async fn send_transaction_message_route<'a>( } }; - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_incoming_pdu( + &db, &pdu, - single_prev, - &state_at_forks, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - } else { - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( - &db, - &pdu, - &extremities, - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; - // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); - } + // Event has passed all auth/stateres checks + resolved_map.insert(pdu.event_id().clone(), Ok(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -1210,8 +1194,7 @@ async fn calculate_forward_extremities( /// Update the room state to be the resolved state and add the fully auth'ed event /// to the DB. /// -/// TODO: If we force the state we need to validate all events in that state -/// any events we fetched from another server need to be fully verified? +/// TODO: Since all these events passed state resolution can we trust them to add fn append_incoming_pdu( db: &Database, pdu: &PduEvent, From b8b40ce38b2b6ac14294293bbf7b50330f3b667d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:50:45 -0500 Subject: [PATCH 021/103] Cleanup dbg prints and error messages --- src/server_server.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 14a1d0c..20d76f1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -496,7 +496,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - dbg!(&*body); + // dbg!(&*body); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -1148,8 +1148,6 @@ async fn calculate_forward_extremities( fork_states.insert(state); } else { - error!("Forward extremity not found... {}", id); - let res = db .sending .send_federation_request( @@ -1188,7 +1186,7 @@ async fn calculate_forward_extremities( fork_states.insert(current_state); } - Ok((fork_states, dbg!(current_leaves))) + Ok((fork_states, current_leaves)) } /// Update the room state to be the resolved state and add the fully auth'ed event From cd0c5c0566251f882b05f97f53266e251f11c4af Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 11:20:33 -0500 Subject: [PATCH 022/103] Append state event that pass resolution to DB, update to tokio 1.1 --- Cargo.lock | 607 ++++++++++++++------------------------ Cargo.toml | 14 +- src/client_server/sync.rs | 5 +- src/database.rs | 3 +- src/database/globals.rs | 14 +- src/server_server.rs | 113 +++++-- 6 files changed, 322 insertions(+), 434 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce17095..66f624c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" @@ -48,6 +33,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.42" @@ -76,7 +82,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,32 +91,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.3", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -142,9 +128,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" [[package]] name = "bytemuck" @@ -158,12 +144,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -176,12 +156,6 @@ version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -198,7 +172,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -264,7 +238,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -273,7 +247,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const_fn", "crossbeam-utils", "lazy_static", @@ -288,10 +262,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -349,7 +329,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -376,7 +356,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -442,25 +422,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.12" @@ -549,7 +513,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -571,20 +535,20 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.1+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -597,12 +561,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" @@ -611,11 +569,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -661,7 +619,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -670,18 +628,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -699,11 +657,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -723,15 +681,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", + "bytes", "hyper", "native-tls", "tokio", - "tokio-tls", + "tokio-native-tls", ] [[package]] @@ -784,16 +742,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -804,7 +753,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -816,9 +765,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] @@ -831,18 +780,15 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -856,16 +802,6 @@ dependencies = [ "serde", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -874,9 +810,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "linked-hash-map" @@ -895,11 +831,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -950,16 +886,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -969,56 +895,27 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "mio" -version = "0.6.23" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", "log", "miow", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", + "ntapi", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "socket2", + "winapi", ] [[package]] @@ -1040,14 +937,12 @@ dependencies = [ ] [[package]] -name = "net2" -version = "0.2.37" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1101,12 +996,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" - [[package]] name = "once_cell" version = "1.5.2" @@ -1120,7 +1009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1173,12 +1062,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.1.57", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1256,12 +1145,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1289,7 +1172,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1371,9 +1254,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1416,7 +1299,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -1507,17 +1390,17 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", @@ -1530,14 +1413,13 @@ dependencies = [ "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "serde", "serde_urlencoded", "tokio", - "tokio-tls", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1567,13 +1449,13 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1602,7 +1484,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1614,7 +1496,7 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=031948c1daaa146128d8a435be116476f2adde00#031948c1daaa146128d8a435be116476f2adde00" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", @@ -1626,6 +1508,7 @@ dependencies = [ "parking_lot", "pear", "percent-encoding", + "pin-project-lite", "ref-cast", "smallvec", "state", @@ -1640,7 +1523,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "js_int", @@ -1658,7 +1541,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "http", "percent-encoding", @@ -1673,7 +1556,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1684,7 +1567,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "ruma-api", "ruma-common", @@ -1698,7 +1581,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "http", @@ -1717,7 +1600,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "maplit", @@ -1730,7 +1613,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-common", @@ -1744,7 +1627,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1755,7 +1638,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-api", @@ -1770,10 +1653,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "paste", - "rand 0.8.2", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1784,7 +1667,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro2", "quote", @@ -1795,12 +1678,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "form_urlencoded", "itoa", @@ -1813,7 +1696,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1824,9 +1707,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1840,18 +1723,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1863,11 +1740,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64", "log", "ring", "sct", @@ -1887,7 +1764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1946,18 +1823,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2048,9 +1925,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2077,15 +1954,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" +source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" dependencies = [ "itertools", + "log", "maplit", "ruma", "serde", "serde_json", "thiserror", - "tracing", ] [[package]] @@ -2139,9 +2016,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2154,12 +2031,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.2", + "rand 0.8.3", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2184,18 +2061,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2203,7 +2080,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2231,9 +2108,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2246,32 +2123,28 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" +checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "autocfg", + "bytes", "libc", "memchr", "mio", - "mio-uds", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite", "signal-hook-registry", - "slab", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2279,39 +2152,51 @@ dependencies = [ ] [[package]] -name = "tokio-rustls" -version = "0.14.1" +name = "tokio-native-tls" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" -dependencies = [ - "futures-core", - "rustls", - "tokio", - "webpki", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ "native-tls", "tokio", ] [[package]] -name = "tokio-util" -version = "0.3.1" +name = "tokio-rustls" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "bytes 0.5.6", + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" +dependencies = [ + "async-stream", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", + "pin-project-lite", "tokio", + "tokio-stream", ] [[package]] @@ -2325,9 +2210,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2335,24 +2220,11 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", + "cfg-if", + "pin-project-lite", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" version = "0.1.17" @@ -2374,18 +2246,22 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", "tokio", @@ -2394,17 +2270,17 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -2436,15 +2312,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2523,17 +2390,17 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2541,9 +2408,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2556,11 +2423,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2568,9 +2435,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2578,9 +2445,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2591,15 +2458,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2617,9 +2484,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2627,12 +2494,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2643,12 +2504,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2667,7 +2522,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2676,17 +2531,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 405c89f..2c6c741 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,28 +14,28 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "031948c1daaa146128d8a435be116476f2adde00", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.24" } +tokio = { version = "1.1.0", features = ["macros", "time", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" @@ -50,7 +50,7 @@ rand = "0.7.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.0" # Used for conduit::Error type thiserror = "1.0.22" # Used to generate thumbnails for images @@ -60,7 +60,7 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices regex = "1.4.2" diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..3bfff45 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -674,9 +674,10 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { - _ = &mut delay => {} + _ = &mut delay, if delay.is_elapsed() => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index e94a517..190f8be 100644 --- a/src/database.rs +++ b/src/database.rs @@ -106,8 +106,7 @@ impl Database { db.open_tree("global")?, db.open_tree("servertimeout_signingkey")?, config, - ) - .await?, + )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 4670068..7156a75 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -26,11 +26,7 @@ pub struct Globals { } impl Globals { - pub async fn load( - globals: sled::Tree, - server_keys: sled::Tree, - config: Config, - ) -> Result { + pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -77,11 +73,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), servertimeout_signingkey: server_keys, }) diff --git a/src/server_server.rs b/src/server_server.rs index 20d76f1..adf3c58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -25,7 +25,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryFrom, fmt::Debug, future::Future, @@ -839,7 +839,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - match state_res::StateResolution::resolve( + let res = match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, &fork_states @@ -856,10 +856,7 @@ pub async fn send_transaction_message_route<'a>( .collect(), &mut auth_cache, ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), + Ok(res) => res, Err(_) => { resolved_map.insert( pdu.event_id().clone(), @@ -867,7 +864,29 @@ pub async fn send_transaction_message_route<'a>( ); continue 'main_pdu_loop; } + }; + let mut resolved = BTreeMap::new(); + for (k, id) in res { + // We should know of the event but just incase + let pdu = match auth_cache.get(&id) { + Some(pdu) => pdu.clone(), + None => { + match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) + .await + .map(|mut vec| vec.pop()) + { + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); + continue 'main_pdu_loop; + } + } + } + }; + resolved.insert(k, pdu); } + resolved }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). @@ -1199,37 +1218,67 @@ fn append_incoming_pdu( new_room_leaves: &[EventId], state: Option>>, ) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid.to_vec(), + ); + } + None => { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // TODO: can we use are current state if we just add this event to the end of our + // pduid_pdu tree?? + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + + db.rooms.append_pdu( + &*pdu, + utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &new_room_leaves, + &db, + )?; + // TODO: is this ok... + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pdu_id.to_vec(), + ); + } + } + } + + info!("Force update of state for {:?}", pdu); + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + } + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let new = state - .into_iter() - .map(|((ev, k), pdu)| { - Ok(( - ( - ev, - k.ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - db.rooms - .get_pdu_id(pdu.event_id()) - .ok() - .flatten() - .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? - .to_vec(), - )) - }) - .collect::>()?; - - info!("Force update of state for {:?}", pdu); - - db.rooms.force_state(pdu.room_id(), new, &db.globals)?; - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 56b816a2be7a8286f8ec4e60e198e64e4a12227d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 21:45:33 -0500 Subject: [PATCH 023/103] Fix and integrate outlier tree, build forks after adding event to DB --- src/database.rs | 2 +- src/database/rooms.rs | 95 +++++++----- src/server_server.rs | 334 +++++++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 168 deletions(-) diff --git a/src/database.rs b/src/database.rs index 190f8be..7ad18cb 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3f3aab..d459aee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,9 +27,10 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::Duration, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, sending::Sending}; /// The unique identifier of each state group. /// @@ -67,7 +68,7 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) pduid_outlierpdu: sled::Tree, } impl Rooms { @@ -85,13 +86,20 @@ impl Rooms { let mut pduid = room_id.as_bytes().to_vec(); pduid.push(0xff); pduid.extend_from_slice(&pduid_short?); - self.pduid_pdu.get(&pduid)?.map_or_else( - || Err(Error::bad_database("Failed to find PDU in state snapshot.")), - |b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }, - ) + match self.pduid_pdu.get(&pduid)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")), + None => self + .pduid_outlierpdu + .get(pduid)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + } }) .filter_map(|r| r.ok()) .map(|pdu| { @@ -137,12 +145,20 @@ impl Rooms { Ok::<_, Error>(Some(( pdu_id.clone().into(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + match self.pduid_pdu.get(&pdu_id)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + None => self + .pduid_outlierpdu + .get(pdu_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + }, ))) }) } else { @@ -307,9 +323,12 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) @@ -328,13 +347,17 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } + /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { @@ -420,23 +443,27 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { + self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) + } else { + Ok(None) + } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + + // we need to be able to find it by event_id + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let res = self - .eventid_outlierpdu + .pduid_outlierpdu .insert( - event_id.as_bytes(), + pdu_id, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -484,7 +511,9 @@ impl Rooms { } // We no longer keep this pdu as an outlier - self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { + self.pduid_outlierpdu.remove(id)?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index adf3c58..ad0a1a4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -614,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (_, Vec>) = match validate_event( + let (pdu, previous): (Arc, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -638,69 +638,75 @@ pub async fn send_transaction_message_route<'a>( None }; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, + let (mut state_at_event, incoming_auth_events): ( + StateMap>, + Vec>, + ) = match db + .sending + .send_federation_request( + &db.globals, + server_name, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events( + &db, server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, ) - .await - { - Ok(res) => { - let state = fetch_events( + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( &db, server_name, &pub_key_map, - &res.pdu_ids, + &res.auth_chain_ids, &mut auth_cache, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -750,12 +756,25 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, extremities) = match calculate_forward_extremities( + let extremities = match calculate_forward_extremities(&db, &pdu).await { + Ok(fork_ids) => fork_ids, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; + + // Now that the event has passed all auth it is added into the timeline, we do have to + // find the leaves otherwise we would do this sooner + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + let mut fork_states = match build_forward_extremity_snapshots( &db, - &pdu, + pdu.room_id(), server_name, - &pub_key_map, current_state, + &extremities, + &pub_key_map, &mut auth_cache, ) .await @@ -767,6 +786,9 @@ pub async fn send_transaction_message_route<'a>( } }; + // Make this the state after (since we appended_incoming_pdu this should agree with our servers + // current state). + state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // add the incoming events to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets fork_states.insert(state_at_event.clone()); @@ -840,7 +862,7 @@ pub async fn send_transaction_message_route<'a>( ); let res = match state_res::StateResolution::resolve( - &pdu.room_id, + pdu.room_id(), &RoomVersionId::Version6, &fork_states .into_iter() @@ -865,6 +887,7 @@ pub async fn send_transaction_message_route<'a>( continue 'main_pdu_loop; } }; + let mut resolved = BTreeMap::new(); for (k, id) in res { // We should know of the event but just incase @@ -890,10 +913,9 @@ pub async fn send_transaction_message_route<'a>( }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( + update_resolved_state( &db, - &pdu, - &extremities, + pdu.room_id(), if update_state { Some(state_at_forks) } else { @@ -905,7 +927,10 @@ pub async fn send_transaction_message_route<'a>( resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { + pdus: dbg!(resolved_map), + } + .into()) } /// An async function that can recursively calls itself. @@ -1036,13 +1061,14 @@ async fn fetch_check_auth_events( Ok(()) } -/// Find the event and auth it. +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. /// /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? -async fn fetch_events( +pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, key_map: &PublicKeyMap, @@ -1071,6 +1097,13 @@ async fn fetch_events( .await .map_err(|_| Error::Conflict("Authentication of event failed"))?; + // create the pduid for this event but stick it in the outliers DB + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1084,7 +1117,7 @@ async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -async fn fetch_signing_keys( +pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, ) -> Result> { @@ -1108,26 +1141,28 @@ async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. -async fn calculate_forward_extremities( +pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, - origin: &ServerName, - pub_key_map: &PublicKeyMap, - current_state: BTreeMap<(EventType, Option), Arc>, - auth_cache: &mut EventMap>, -) -> Result<(BTreeSet>>, Vec)> { +) -> Result> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - // + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = false; - // Not sure what to do here + // + // We first check if know of the event and then don't include it as a forward + // extremity if it is a timeline event + if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); } // TODO: @@ -1144,11 +1179,34 @@ async fn calculate_forward_extremities( } } - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + + Ok(current_leaves) +} + +/// This should always be called after the incoming event has been appended to the DB. +/// +/// This guarentees that the incoming event will be in the state sets (at least our servers +/// and the sending server). +pub(crate) async fn build_forward_extremity_snapshots( + db: &Database, + room_id: &RoomId, + origin: &ServerName, + current_state: StateMap>, + current_leaves: &[EventId], + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let current_hash = db.rooms.current_state_hash(room_id)?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); - for id in ¤t_leaves { + for id in current_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1158,14 +1216,21 @@ async fn calculate_forward_extremities( if current_hash.as_ref() == Some(&state_hash) { includes_current_state = true; } - let state = db + + let mut state_before = db .rooms - .state_full(&pdu.room_id, &state_hash)? + .state_full(room_id, &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(state); + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } else { let res = db .sending @@ -1173,7 +1238,7 @@ async fn calculate_forward_extremities( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: pdu.room_id(), + room_id, event_id: id, }, ) @@ -1181,41 +1246,38 @@ async fn calculate_forward_extremities( // TODO: This only adds events to the auth_cache, there is for sure a better way to // do this... - fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?; + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache) + let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) .await? .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); + .collect::>(); - fork_states.insert(state); + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } } - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { fork_states.insert(current_state); } - Ok((fork_states, current_leaves)) + Ok(fork_states) } -/// Update the room state to be the resolved state and add the fully auth'ed event -/// to the DB. -/// -/// TODO: Since all these events passed state resolution can we trust them to add -fn append_incoming_pdu( +pub(crate) fn update_resolved_state( db: &Database, - pdu: &PduEvent, - new_room_leaves: &[EventId], + room_id: &RoomId, state: Option>>, ) -> Result<()> { // Update the state of the room if needed @@ -1236,44 +1298,50 @@ fn append_incoming_pdu( ); } None => { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // TODO: can we use are current state if we just add this event to the end of our - // pduid_pdu tree?? - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &*pdu, - utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &new_room_leaves, - &db, - )?; - // TODO: is this ok... - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pdu_id.to_vec(), - ); + error!("We didn't append an event as an outlier\n{:?}", pdu); } } } - info!("Force update of state for {:?}", pdu); - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; + db.rooms.force_state(room_id, new_state, &db.globals)?; } + Ok(()) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +pub(crate) fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: &StateMap>, +) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + pduid.to_vec(), + ); + } + None => { + error!("We didn't append an event as an outlier\n{:?}", pdu); + } + } + } + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1281,7 +1349,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1292,9 +1360,7 @@ fn append_incoming_pdu( &db, )?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + db.rooms.set_room_state(pdu.room_id(), &state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From a0ecd76e210cc924884a5b1c1d2d81d3b608827a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 26 Jan 2021 21:53:03 -0500 Subject: [PATCH 024/103] Run cargo fmt using nightly --- src/client_server/push.rs | 21 +++++++++++---------- src/client_server/read_marker.rs | 3 ++- src/database/appservice.rs | 6 ++++-- src/database/globals.rs | 9 +++++---- src/error.rs | 6 +++++- src/main.rs | 16 ++++++++++------ 6 files changed, 37 insertions(+), 24 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..fd938c1 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -5,10 +5,11 @@ use ruma::{ error::ErrorKind, r0::push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, - get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, + set_pushrule_enabled, RuleKind, }, }, - events::EventType, + events::{push_rules, EventType}, push::{ ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, @@ -30,7 +31,7 @@ pub async fn get_pushrules_all_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -54,7 +55,7 @@ pub async fn get_pushrule_route( let event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -116,7 +117,7 @@ pub async fn set_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -262,7 +263,7 @@ pub async fn get_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -325,7 +326,7 @@ pub async fn set_pushrule_actions_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -428,7 +429,7 @@ pub async fn get_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -488,7 +489,7 @@ pub async fn set_pushrule_enabled_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -591,7 +592,7 @@ pub async fn delete_pushrule_route( let mut event = db .account_data - .get::(None, &sender_user, EventType::PushRules)? + .get::(None, &sender_user, EventType::PushRules)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 0c4ec1a..bb76a44 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -2,7 +2,8 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, r0::capabilities::get_capabilities, r0::read_marker::set_read_marker, + error::ErrorKind, + r0::{capabilities::get_capabilities, read_marker::set_read_marker}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 26ea5b9..764291d 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; #[derive(Clone)] pub struct Appservice { diff --git a/src/database/globals.rs b/src/database/globals.rs index 7156a75..fc4adc3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,10 +4,11 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, + time::Duration, +}; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; diff --git a/src/error.rs b/src/error.rs index 13efce6..d8f10f4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; +use std::{ + collections::HashMap, + sync::RwLock, + time::{Duration, Instant}, +}; use log::error; use ruma::{ diff --git a/src/main.rs b/src/main.rs index 054c859..5aa0a19 100644 --- a/src/main.rs +++ b/src/main.rs @@ -18,11 +18,15 @@ pub use rocket::State; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use log::LevelFilter; -use rocket::figment::{ - providers::{Env, Format, Toml}, - Figment, +use rocket::{ + catch, catchers, + fairing::AdHoc, + figment::{ + providers::{Env, Format, Toml}, + Figment, + }, + routes, Request, }; -use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; fn setup_rocket() -> rocket::Rocket { // Force log level off, so we can use our own logger @@ -143,6 +147,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_key_changes_route, client_server::get_pushers_route, client_server::set_pushers_route, + // client_server::third_party_route, client_server::upgrade_room_route, server_server::get_server_version_route, server_server::get_server_keys_route, @@ -165,8 +170,7 @@ fn setup_rocket() -> rocket::Rocket { .await .expect("config is valid"); - data.sending - .start_handler(&data.globals, &data.rooms, &data.appservice); + data.sending.start_handler(&data); log::set_boxed_logger(Box::new(ConduitLogger { db: data.clone(), last_logs: Default::default(), From 2d69e816997d9bf4f51b6e35c6a9c408fb1c144a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 26 Jan 2021 21:54:35 -0500 Subject: [PATCH 025/103] WIP: send out push notification, impl pusher routes It seems in order to test this I may also have to impl the email 3pid route? I need to call the set_pusher route somehow. --- src/client_server/account.rs | 14 ++ src/client_server/push.rs | 34 ++- src/database.rs | 11 +- src/database/pusher.rs | 148 +++++++++++ src/database/rooms.rs | 3 + src/database/sending.rs | 470 +++++++++++++++++++++++------------ 6 files changed, 514 insertions(+), 166 deletions(-) create mode 100644 src/database/pusher.rs diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 76354b6..9f6c576 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -659,3 +659,17 @@ pub async fn deactivate_route( } .into()) } + +/*/ +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/account/3pid", data = "") +)] +pub async fn third_party_route( + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(account::add_3pid::Response::default().into()) +} +*/ diff --git a/src/client_server/push.rs b/src/client_server/push.rs index fd938c1..3a81679 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -666,20 +666,36 @@ pub async fn delete_pushrule_route( Ok(delete_pushrule::Response.into()) } -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] -pub async fn get_pushers_route() -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushers", data = "") +)] +pub async fn get_pushers_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + Ok(get_pushers::Response { - pushers: Vec::new(), + pushers: db.pusher.get_pusher(sender)?, } .into()) } -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] -pub async fn set_pushers_route(db: State<'_, Database>) -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/pushers/set", data = "") +)] +pub async fn set_pushers_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let pusher = body.pusher.clone(); + + db.pusher.set_pusher(sender, pusher)?; + db.flush().await?; - Ok(get_pushers::Response { - pushers: Vec::new(), - } - .into()) + Ok(set_pusher::Response::default().into()) } diff --git a/src/database.rs b/src/database.rs index 7ad18cb..b8dc524 100644 --- a/src/database.rs +++ b/src/database.rs @@ -4,6 +4,7 @@ pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; +pub mod pusher; pub mod rooms; pub mod sending; pub mod transaction_ids; @@ -17,9 +18,11 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::collections::HashMap; -use std::fs::remove_dir_all; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + fs::remove_dir_all, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] @@ -73,6 +76,7 @@ pub struct Database { pub sending: sending::Sending, pub admin: admin::Admin, pub appservice: appservice::Appservice, + pub pusher: pusher::PushData, pub _db: sled::Db, } @@ -187,6 +191,7 @@ impl Database { cached_registrations: Arc::new(RwLock::new(HashMap::new())), id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, }, + pusher: pusher::PushData::new(&db)?, _db: db, }; diff --git a/src/database/pusher.rs b/src/database/pusher.rs new file mode 100644 index 0000000..041085d --- /dev/null +++ b/src/database/pusher.rs @@ -0,0 +1,148 @@ +use crate::{Error, PduEvent, Result}; +use ruma::{ + api::client::r0::push::{Pusher, PusherKind}, + events::{ + room::{ + member::MemberEventContent, + message::{MessageEventContent, TextMessageEventContent}, + }, + EventType, + }, + push::{PushCondition, Ruleset}, + UserId, +}; + +#[derive(Debug, Clone)] +pub struct PushData { + /// UserId + pushkey -> Pusher + pub(super) senderkey_pusher: sled::Tree, +} + +impl PushData { + pub fn new(db: &sled::Db) -> Result { + Ok(Self { + senderkey_pusher: db.open_tree("senderkey_pusher")?, + }) + } + + pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { + let mut key = sender.as_bytes().to_vec(); + key.extend_from_slice(pusher.pushkey.as_bytes()); + + self.senderkey_pusher.insert( + key, + &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), + )?; + + Ok(()) + } + + pub fn get_pusher(&self, sender: &UserId) -> Result> { + self.senderkey_pusher + .scan_prefix(sender.as_bytes()) + .values() + .map(|push: std::result::Result| { + let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + Ok(serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + }) + .collect::>>() + } +} + +pub async fn send_push_notice( + user: &UserId, + pusher: &Pusher, + ruleset: Ruleset, + pdu: &PduEvent, +) -> Result<()> { + for rule in ruleset.into_iter() { + // TODO: can actions contain contradictory Actions + if rule + .actions + .iter() + .any(|act| matches!(act, ruma::push::Action::DontNotify)) + || !rule.enabled + { + continue; + } + + match rule.rule_id.as_str() { + ".m.rule.master" => {} + ".m.rule.suppress_notices" => {} + ".m.rule.invite_for_me" => {} + ".m.rule.member_event" => { + if let EventType::RoomMember = &pdu.kind { + // TODO use this? + let _member = serde_json::from_value::(pdu.content.clone()) + .map_err(|_| Error::bad_database("PDU contained bad message content"))?; + if let Some(conditions) = rule.conditions { + if conditions.iter().any(|cond| match cond { + PushCondition::EventMatch { key, pattern } => { + let mut json = + serde_json::to_value(pdu).expect("PDU is valid JSON"); + for key in key.split('.') { + json = json[key].clone(); + } + // TODO: this is baddddd + json.to_string().contains(pattern) + } + _ => false, + }) {} + } + } + } + ".m.rule.contains_display_name" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + if body.contains(user.localpart()) { + send_notice(user, &pusher, &pdu).await?; + } + } + } + } + ".m.rule.tombstone" => {} + ".m.rule.roomnotif" => {} + ".m.rule.contains_user_name" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + if body.contains(user.localpart()) { + send_notice(user, &pusher, &pdu).await?; + } + } + } + } + ".m.rule.call" => {} + ".m.rule.encrypted_room_one_to_one" => {} + ".m.rule.room_one_to_one" => {} + ".m.rule.message" => {} + ".m.rule.encrypted" => {} + _ => {} + } + } + Ok(()) +} + +async fn send_notice(_sender: &UserId, pusher: &Pusher, _event: &PduEvent) -> Result<()> { + if let Some(PusherKind::Http) = pusher.kind { + log::error!("YAHOOO"); + } else { + // EMAIL + todo!("send an email") + } + Ok(()) +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d459aee..19554f6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -531,6 +531,9 @@ impl Rooms { self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + // See if the event matches any known pushers + db.sending.send_push_pdu(&*pdu_id)?; + match pdu.kind { EventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { diff --git a/src/database/sending.rs b/src/database/sending.rs index 4b0309f..a478501 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,43 +1,62 @@ use std::{ collections::HashMap, convert::TryFrom, - fmt::Debug, + fmt::{Debug, Display, Formatter}, sync::Arc, time::{Duration, Instant, SystemTime}, }; -use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; +use crate::{appservice_server, server_server, utils, Database, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::info; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, + events::{push_rules, EventType}, ServerName, }; use sled::IVec; -use tokio::select; -use tokio::sync::Semaphore; +use tokio::{select, sync::Semaphore}; + +use super::{ + account_data::AccountData, appservice::Appservice, globals::Globals, pusher::PushData, + rooms::Rooms, +}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum OutgoingKind { + Appservice(Box), + Push(Vec), + Normal(Box), +} + +impl Display for OutgoingKind { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + OutgoingKind::Appservice(name) => f.write_str(name.as_str()), + OutgoingKind::Normal(name) => f.write_str(name.as_str()), + OutgoingKind::Push(_) => f.write_str("Push notification TODO"), + } + } +} #[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+)ServerName + PduId - pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+)ServerName + PduId (pduid can be empty for reservation) + pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)ServerName / UserId + PduId + pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId (pduid can be empty for reservation) pub(super) maximum_requests: Arc, } impl Sending { - pub fn start_handler( - &self, - globals: &super::globals::Globals, - rooms: &super::rooms::Rooms, - appservice: &super::appservice::Appservice, - ) { + pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); - let rooms = rooms.clone(); - let globals = globals.clone(); - let appservice = appservice.clone(); + let rooms = db.rooms.clone(); + let globals = db.globals.clone(); + let appservice = db.appservice.clone(); + let pusher = db.pusher.clone(); + let account_data = db.account_data.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -45,45 +64,57 @@ impl Sending { // Retry requests we could not finish yet let mut current_transactions = HashMap::new(); - for (server, pdu, is_appservice) in servercurrentpdus + for (outgoing_kind, pdu) in servercurrentpdus .iter() .filter_map(|r| r.ok()) .filter_map(|(key, _)| Self::parse_servercurrentpdus(key).ok()) - .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key + .filter(|(_, pdu)| !pdu.is_empty()) // Skip reservation key .take(50) // This should not contain more than 50 anyway { current_transactions - .entry((server, is_appservice)) + .entry(outgoing_kind) .or_insert_with(Vec::new) .push(pdu); } - for ((server, is_appservice), pdus) in current_transactions { + for (outgoing_kind, pdus) in current_transactions { futures.push(Self::handle_event( - server, - is_appservice, + outgoing_kind, pdus, - &globals, &rooms, + &globals, &appservice, + &pusher, + &account_data, )); } - let mut last_failed_try: HashMap, (u32, Instant)> = HashMap::new(); + let mut last_failed_try: HashMap = HashMap::new(); let mut subscriber = servernamepduids.watch_prefix(b""); loop { select! { Some(response) = futures.next() => { match response { - Ok((server, is_appservice)) => { - let mut prefix = if is_appservice { - b"+".to_vec() - } else { - Vec::new() + Ok(outgoing_kind) => { + let mut prefix = match &outgoing_kind { + OutgoingKind::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + } + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(server) => { + let mut p = vec![]; + p.extend_from_slice(server.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); for key in servercurrentpdus @@ -116,22 +147,45 @@ impl Sending { servernamepduids.remove(¤t_key).unwrap(); } - futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice)); + futures.push( + Self::handle_event( + outgoing_kind.clone(), + new_pdus, + &rooms, + &globals, + &appservice, + &pusher, + &account_data + ) + ); } else { servercurrentpdus.remove(&prefix).unwrap(); // servercurrentpdus with the prefix should be empty now } } - Err((server, is_appservice, e)) => { - info!("Couldn't send transaction to {}\n{}", server, e); - let mut prefix = if is_appservice { - b"+".to_vec() - } else { - Vec::new() + Err((outgoing_kind, e)) => { + info!("Couldn't send transaction to {}\n{}", outgoing_kind, e); + let mut prefix = match &outgoing_kind { + OutgoingKind::Appservice(serv) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(serv.as_bytes()); + p + }, + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(serv) => { + let mut p = vec![]; + p.extend_from_slice(serv.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); + prefix.push(0xff); - last_failed_try.insert(server.clone(), match last_failed_try.get(&server) { + + last_failed_try.insert(outgoing_kind.clone(), match last_failed_try.get(&outgoing_kind) { Some(last_failed) => { (last_failed.0+1, Instant::now()) }, @@ -157,40 +211,56 @@ impl Sending { instant.elapsed() < min_elapsed_duration }; - if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( + if let Some((outgoing_kind, pdu_id)) = utils::string_from_bytes( parts .next() .expect("splitn will always return 1 or more elements"), ) - .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) - .map(|server_str| { + .map_err(|_| Error::bad_database("[Utf8] ServerName in servernamepduid bytes are invalid.")) + .and_then(|ident_str| { // Appservices start with a plus - if server_str.starts_with('+') { - (server_str[1..].to_owned(), true) + Ok(if ident_str.starts_with('+') { + OutgoingKind::Appservice( + Box::::try_from(&ident_str[1..]) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? + ) + } else if ident_str.starts_with('$') { + OutgoingKind::Push(ident_str[1..].as_bytes().to_vec()) } else { - (server_str, false) - } + OutgoingKind::Normal( + Box::::try_from(ident_str) + .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? + ) + }) }) - .and_then(|(server_str, is_appservice)| Box::::try_from(server_str) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid.")).map(|s| (s, is_appservice))) - .ok() - .and_then(|(server, is_appservice)| parts + .and_then(|outgoing_kind| parts .next() .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) - .ok() - .map(|pdu_id| (server, is_appservice, pdu_id)) + .map(|pdu_id| (outgoing_kind, pdu_id)) ) - .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, exponential_backoff) { + .ok() + .filter(|(outgoing_kind, _)| { + if last_failed_try.get(outgoing_kind).map_or(false, exponential_backoff) { return false; } - let mut prefix = if *is_appservice { - b"+".to_vec() - } else { - Vec::new() + let mut prefix = match outgoing_kind { + OutgoingKind::Appservice(serv) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(serv.as_bytes()); + p + }, + OutgoingKind::Push(id) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(&id); + p + }, + OutgoingKind::Normal(serv) => { + let mut p = vec![]; + p.extend_from_slice(serv.as_bytes()); + p + }, }; - prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); servercurrentpdus @@ -201,7 +271,17 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice)); + futures.push( + Self::handle_event( + outgoing_kind, + vec![pdu_id.into()], + &rooms, + &globals, + &appservice, + &pusher, + &account_data + ) + ); } } } @@ -210,6 +290,22 @@ impl Sending { }); } + pub fn send_push_pdu(&self, pdu_id: &[u8]) -> Result<()> { + // Make sure we don't cause utf8 errors when parsing to a String... + let pduid = String::from_utf8_lossy(pdu_id).as_bytes().to_vec(); + + // these are valid ServerName chars + // (byte.is_ascii_alphanumeric() || byte == b'-' || byte == b'.') + let mut key = b"$".to_vec(); + // keep each pdu push unique + key.extend_from_slice(pduid.as_slice()); + key.push(0xff); + key.extend_from_slice(pdu_id); + self.servernamepduids.insert(key, b"")?; + + Ok(()) + } + pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -230,95 +326,154 @@ impl Sending { } async fn handle_event( - server: Box, - is_appservice: bool, + kind: OutgoingKind, pdu_ids: Vec, - globals: &super::globals::Globals, - rooms: &super::rooms::Rooms, - appservice: &super::appservice::Appservice, - ) -> std::result::Result<(Box, bool), (Box, bool, Error)> { - if is_appservice { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database( - "Event in servernamepduids not found in db.", - ), - ) - })? - .to_any_event(), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); - appservice_server::send_request( - &globals, - appservice - .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error - appservice::event::push_events::v1::Request { - events: &pdu_jsons, - txn_id: &utils::random_string(16), - }, - ) - .await - .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) - } else { - let pdu_jsons = pdu_ids - .iter() - .map(|pdu_id| { - Ok::<_, (Box, Error)>( - // TODO: check room version and remove event_id if needed - serde_json::from_str( - PduEvent::convert_to_outgoing_federation_event( - rooms - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (server.clone(), e))? - .ok_or_else(|| { - ( - server.clone(), - Error::bad_database( - "Event in servernamepduids not found in db.", - ), - ) - })?, - ) - .json() - .get(), + rooms: &Rooms, + globals: &Globals, + appservice: &Appservice, + pusher: &PushData, + account_data: &AccountData, + ) -> std::result::Result { + match kind { + OutgoingKind::Appservice(server) => { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Box, Error)>( + rooms + .get_pdu_from_id(pdu_id) + .map_err(|e| (server.clone(), e))? + .ok_or_else(|| { + ( + server.clone(), + Error::bad_database( + "[Appservice] Event in servernamepduids not found in ", + ), + ) + })? + .to_any_event(), ) - .expect("Raw<..> is always valid"), - ) - }) - .filter_map(|r| r.ok()) - .collect::>(); + }) + .filter_map(|r| r.ok()) + .collect::>(); + appservice_server::send_request( + &globals, + appservice + .get_registration(server.as_str()) + .unwrap() + .unwrap(), // TODO: handle error + appservice::event::push_events::v1::Request { + events: &pdu_jsons, + txn_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| OutgoingKind::Appservice(server.clone())) + .map_err(|e| (OutgoingKind::Appservice(server.clone()), e)) + } + OutgoingKind::Push(id) => { + let pdus = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (Vec, Error)>( + rooms + .get_pdu_from_id(pdu_id) + .map_err(|e| (id.clone(), e))? + .ok_or_else(|| { + ( + id.clone(), + Error::bad_database( + "[Push] Event in servernamepduids not found in db.", + ), + ) + })?, + ) + }) + .filter_map(|r| r.ok()) + .collect::>(); + dbg!(&pdus); + for pdu in &pdus { + for user in rooms.room_members(&pdu.room_id) { + dbg!(&user); + let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + for pusher in pusher + .get_pusher(&user) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + { + let rules_for_user = account_data + .get::( + None, + &user, + EventType::PushRules, + ) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); + dbg!(&pusher); + dbg!(&rules_for_user); - server_server::send_request( - &globals, - &*server, - send_transaction_message::v1::Request { - origin: globals.server_name(), - pdus: &pdu_jsons, - edus: &[], - origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), - }, - ) - .await - .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + crate::database::pusher::send_push_notice( + &user, + &pusher, + rules_for_user, + pdu, + ) + .await + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + } + } + } + + Ok(OutgoingKind::Push(id)) + } + OutgoingKind::Normal(server) => { + let pdu_jsons = pdu_ids + .iter() + .map(|pdu_id| { + Ok::<_, (OutgoingKind, Error)>( + // TODO: check room version and remove event_id if needed + serde_json::from_str( + PduEvent::convert_to_outgoing_federation_event( + rooms + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? + .ok_or_else(|| { + ( + OutgoingKind::Normal(server.clone()), + Error::bad_database( + "[Normal] Event in servernamepduids not found in db.", + ), + ) + })?, + ) + .json() + .get(), + ) + .expect("Raw<..> is always valid"), + ) + }) + .filter_map(|r| r.ok()) + .collect::>(); + + server_server::send_request( + &globals, + &*server, + send_transaction_message::v1::Request { + origin: globals.server_name(), + pdus: &pdu_jsons, + edus: &[], + origin_server_ts: SystemTime::now(), + transaction_id: &utils::random_string(16), + }, + ) + .await + .map(|_response| OutgoingKind::Normal(server.clone())) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e)) + } } } - fn parse_servercurrentpdus(key: IVec) -> Result<(Box, IVec, bool)> { + fn parse_servercurrentpdus(key: IVec) -> Result<(OutgoingKind, IVec)> { let mut parts = key.splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); let pdu = parts @@ -330,19 +485,26 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with('+') { - (&server[1..], true) + Ok::<_, Error>(if server.starts_with('+') { + ( + OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + IVec::from(pdu), + ) + } else if server.starts_with('$') { + ( + OutgoingKind::Push(server.as_bytes().to_vec()), + IVec::from(pdu), + ) } else { - (&*server, false) - }; - - Ok::<_, Error>(( - Box::::try_from(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?, - IVec::from(pdu), - is_appservice, - )) + ( + OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + IVec::from(pdu), + ) + }) } pub async fn send_federation_request( From 73124629b7d4cdbddef36c52bfe5e494bad1ac01 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 10:14:09 -0500 Subject: [PATCH 026/103] Add general rules matching for pusher, calc unread msgs --- Cargo.lock | 16 ++ Cargo.toml | 2 +- src/database/pusher.rs | 412 +++++++++++++++++++++++++++++++++++++--- src/database/sending.rs | 129 +++++++------ src/server_server.rs | 7 +- 5 files changed, 469 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66f624c..859d854 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1534,6 +1534,7 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", ] @@ -1680,6 +1681,21 @@ name = "ruma-identifiers-validation" version = "0.2.0" source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +[[package]] +name = "ruma-push-gateway-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + [[package]] name = "ruma-serde" version = "0.3.0" diff --git a/Cargo.toml b/Cargo.toml index 2c6c741..a8760c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 041085d..a1a6130 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -1,17 +1,26 @@ -use crate::{Error, PduEvent, Result}; +use crate::{Database, Error, PduEvent, Result}; +use log::{error, info, warn}; use ruma::{ - api::client::r0::push::{Pusher, PusherKind}, - events::{ - room::{ - member::MemberEventContent, - message::{MessageEventContent, TextMessageEventContent}, + api::{ + client::r0::push::{Pusher, PusherKind}, + push_gateway::send_event_notification::{ + self, + v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - EventType, + OutgoingRequest, }, - push::{PushCondition, Ruleset}, - UserId, + events::room::{ + member::{MemberEventContent, MembershipState}, + message::{MessageEventContent, TextMessageEventContent}, + power_levels::PowerLevelsEventContent, + }, + events::EventType, + push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, + uint, UInt, UserId, }; +use std::{convert::TryFrom, fmt::Debug, time::Duration}; + #[derive(Debug, Clone)] pub struct PushData { /// UserId + pushkey -> Pusher @@ -29,6 +38,15 @@ impl PushData { let mut key = sender.as_bytes().to_vec(); key.extend_from_slice(pusher.pushkey.as_bytes()); + // There are 2 kinds of pushers but the spec says: null deletes the pusher. + if pusher.kind.is_none() { + return self + .senderkey_pusher + .remove(key) + .map(|_| ()) + .map_err(Into::into); + } + self.senderkey_pusher.insert( key, &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), @@ -46,15 +64,95 @@ impl PushData { Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) - .collect::>>() + .collect() + } +} + +pub async fn send_request( + globals: &crate::database::globals::Globals, + destination: &str, + request: T, +) -> Result +where + T: Debug, +{ + let destination = destination.replace("/_matrix/push/v1/notify", ""); + + let http_request = request + .try_into_http_request(&destination, Some("")) + .map_err(|e| { + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + })?; + + let mut reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + // TODO: we could keep this very short and let expo backoff do it's thing... + *reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + + // Because reqwest::Response -> http::Response is complicated: + match reqwest_response { + Ok(mut reqwest_response) => { + let status = reqwest_response.status(); + let mut http_response = http::Response::builder().status(status); + let headers = http_response.headers_mut().unwrap(); + + for (k, v) in reqwest_response.headers_mut().drain() { + if let Some(key) = k { + headers.insert(key, v); + } + } + + let status = reqwest_response.status(); + + let body = reqwest_response + .bytes() + .await + .unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }) // TODO: handle timeout + .into_iter() + .collect::>(); + + if status != 200 { + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from( + http_response + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + info!( + "Push gateway returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Push gateway returned bad response.") + }) + } + Err(e) => Err(e.into()), } } pub async fn send_push_notice( user: &UserId, - pusher: &Pusher, + unread: UInt, + pushers: &[Pusher], ruleset: Ruleset, pdu: &PduEvent, + db: &Database, ) -> Result<()> { for rule in ruleset.into_iter() { // TODO: can actions contain contradictory Actions @@ -69,8 +167,44 @@ pub async fn send_push_notice( match rule.rule_id.as_str() { ".m.rule.master" => {} - ".m.rule.suppress_notices" => {} - ".m.rule.invite_for_me" => {} + ".m.rule.suppress_notices" => { + if pdu.kind == EventType::RoomMessage + && pdu + .content + .get("msgtype") + .map_or(false, |ty| ty == "m.notice") + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.invite_for_me" => { + if let EventType::RoomMember = &pdu.kind { + if pdu.state_key.as_deref() == Some(user.as_str()) + && serde_json::from_value::(pdu.content.clone()) + .map_err(|_| Error::bad_database("PDU contained bad message content"))? + .membership + == MembershipState::Invite + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + } ".m.rule.member_event" => { if let EventType::RoomMember = &pdu.kind { // TODO use this? @@ -88,7 +222,17 @@ pub async fn send_push_notice( json.to_string().contains(pattern) } _ => false, - }) {} + }) { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } } } } @@ -103,13 +247,71 @@ pub async fn send_push_notice( &msg_content { if body.contains(user.localpart()) { - send_notice(user, &pusher, &pdu).await?; + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + } + } + ".m.rule.tombstone" => { + if pdu.kind == EventType::RoomTombstone && pdu.state_key.as_deref() == Some("") { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.roomnotif" => { + if let EventType::RoomMessage = &pdu.kind { + let msg_content = + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::bad_database("PDU contained bad message content") + })?; + if let MessageEventContent::Text(TextMessageEventContent { body, .. }) = + &msg_content + { + let power_level_cmp = |pl: PowerLevelsEventContent| { + &pl.notifications.room + <= pl.users.get(&pdu.sender).unwrap_or(&ruma::int!(0)) + }; + let deserialize = |pl: PduEvent| { + serde_json::from_value::(pl.content).ok() + }; + if body.contains("@room") + && db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|(_, pl)| pl) + .map(deserialize) + .flatten() + .map_or(false, power_level_cmp) + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; } } } } - ".m.rule.tombstone" => {} - ".m.rule.roomnotif" => {} ".m.rule.contains_user_name" => { if let EventType::RoomMessage = &pdu.kind { let msg_content = @@ -121,28 +323,180 @@ pub async fn send_push_notice( &msg_content { if body.contains(user.localpart()) { - send_notice(user, &pusher, &pdu).await?; + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; } } } } - ".m.rule.call" => {} - ".m.rule.encrypted_room_one_to_one" => {} - ".m.rule.room_one_to_one" => {} - ".m.rule.message" => {} - ".m.rule.encrypted" => {} + ".m.rule.call" => { + if pdu.kind == EventType::CallInvite { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.encrypted_room_one_to_one" => { + if db.rooms.room_members(&pdu.room_id).count() == 2 + && pdu.kind == EventType::RoomEncrypted + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.room_one_to_one" => { + if db.rooms.room_members(&pdu.room_id).count() == 2 + && pdu.kind == EventType::RoomMessage + { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.message" => { + if pdu.kind == EventType::RoomMessage { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } + ".m.rule.encrypted" => { + if pdu.kind == EventType::RoomEncrypted { + let tweaks = rule + .actions + .iter() + .filter_map(|a| match a { + Action::SetTweak(tweak) => Some(tweak.clone()), + _ => None, + }) + .collect::>(); + send_notice(unread, pushers, tweaks, pdu, db).await?; + } + } _ => {} } } Ok(()) } -async fn send_notice(_sender: &UserId, pusher: &Pusher, _event: &PduEvent) -> Result<()> { - if let Some(PusherKind::Http) = pusher.kind { - log::error!("YAHOOO"); - } else { - // EMAIL - todo!("send an email") +async fn send_notice( + unread: UInt, + pushers: &[Pusher], + tweaks: Vec, + event: &PduEvent, + db: &Database, +) -> Result<()> { + let (http, _emails): (Vec<&Pusher>, _) = pushers + .iter() + .partition(|pusher| pusher.kind == Some(PusherKind::Http)); + + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + for pusher in http { + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = pusher.data.url.as_ref() { + url + } else { + error!("Http Pusher must have URL specified."); + continue; + }; + + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + device.data = Some(pusher.data.clone()); + + // this is not done if "event_id_only" is the format + device.tweaks = tweaks.clone(); + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == EventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + + if event.kind == EventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + } + + let name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = name.as_deref(); + let room_name = db + .rooms + .room_state_get(&event.room_id, &EventType::RoomName, "")? + .map(|(_, pdu)| match pdu.content.get("name") { + Some(serde_json::Value::String(s)) => Some(s.to_string()), + _ => None, + }) + .flatten(); + notifi.room_name = room_name.as_deref(); + + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } } + + // TODO: email + // for email in emails {} + Ok(()) } diff --git a/src/database/sending.rs b/src/database/sending.rs index a478501..48e427e 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -13,16 +13,11 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - ServerName, + uint, ServerName, UInt, }; use sled::IVec; use tokio::{select, sync::Semaphore}; -use super::{ - account_data::AccountData, appservice::Appservice, globals::Globals, pusher::PushData, - rooms::Rooms, -}; - #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(Box), @@ -52,11 +47,7 @@ impl Sending { pub fn start_handler(&self, db: &Database) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); - let rooms = db.rooms.clone(); - let globals = db.globals.clone(); - let appservice = db.appservice.clone(); - let pusher = db.pusher.clone(); - let account_data = db.account_data.clone(); + let db = db.clone(); tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -79,15 +70,7 @@ impl Sending { } for (outgoing_kind, pdus) in current_transactions { - futures.push(Self::handle_event( - outgoing_kind, - pdus, - &rooms, - &globals, - &appservice, - &pusher, - &account_data, - )); + futures.push(Self::handle_event(outgoing_kind, pdus, &db)); } let mut last_failed_try: HashMap = HashMap::new(); @@ -151,11 +134,7 @@ impl Sending { Self::handle_event( outgoing_kind.clone(), new_pdus, - &rooms, - &globals, - &appservice, - &pusher, - &account_data + &db, ) ); } else { @@ -275,11 +254,7 @@ impl Sending { Self::handle_event( outgoing_kind, vec![pdu_id.into()], - &rooms, - &globals, - &appservice, - &pusher, - &account_data + &db, ) ); } @@ -325,14 +300,11 @@ impl Sending { Ok(()) } + // TODO this is the whole DB but is it better to clone smaller parts than the whole thing?? async fn handle_event( kind: OutgoingKind, pdu_ids: Vec, - rooms: &Rooms, - globals: &Globals, - appservice: &Appservice, - pusher: &PushData, - account_data: &AccountData, + db: &Database, ) -> std::result::Result { match kind { OutgoingKind::Appservice(server) => { @@ -340,7 +312,7 @@ impl Sending { .iter() .map(|pdu_id| { Ok::<_, (Box, Error)>( - rooms + db.rooms .get_pdu_from_id(pdu_id) .map_err(|e| (server.clone(), e))? .ok_or_else(|| { @@ -357,8 +329,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); appservice_server::send_request( - &globals, - appservice + &db.globals, + db.appservice .get_registration(server.as_str()) .unwrap() .unwrap(), // TODO: handle error @@ -376,7 +348,7 @@ impl Sending { .iter() .map(|pdu_id| { Ok::<_, (Vec, Error)>( - rooms + db.rooms .get_pdu_from_id(pdu_id) .map_err(|e| (id.clone(), e))? .ok_or_else(|| { @@ -391,36 +363,67 @@ impl Sending { }) .filter_map(|r| r.ok()) .collect::>(); + dbg!(&pdus); + for pdu in &pdus { - for user in rooms.room_members(&pdu.room_id) { + // Redacted events are not notification targets (we don't send push for them) + if pdu.unsigned.get("redacted_because").is_some() { + continue; + } + for user in db.rooms.room_members(&pdu.room_id) { dbg!(&user); + let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - for pusher in pusher + let pushers = db + .pusher .get_pusher(&user) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + + let rules_for_user = db + .account_data + .get::(None, &user, EventType::PushRules) + .map_err(|e| (OutgoingKind::Push(id.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); + + let unread: UInt = if let Some(last_read) = db + .rooms + .edus + .private_read_get(&pdu.room_id, &user) .map_err(|e| (OutgoingKind::Push(id.clone()), e))? { - let rules_for_user = account_data - .get::( - None, - &user, - EventType::PushRules, - ) + (db.rooms + .pdus_since(&user, &pdu.room_id, last_read) .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); - dbg!(&pusher); - dbg!(&rules_for_user); + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + matches!( + pdu.kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) + .count() as u32) + .into() + } else { + // Just return zero unread messages + uint!(0) + }; - crate::database::pusher::send_push_notice( - &user, - &pusher, - rules_for_user, - pdu, - ) - .await - .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - } + dbg!(&pushers); + + // dbg!(&rules_for_user); + + crate::database::pusher::send_push_notice( + &user, + unread, + &pushers, + rules_for_user, + pdu, + db, + ) + .await + .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; } } @@ -434,7 +437,7 @@ impl Sending { // TODO: check room version and remove event_id if needed serde_json::from_str( PduEvent::convert_to_outgoing_federation_event( - rooms + db.rooms .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -456,10 +459,10 @@ impl Sending { .collect::>(); server_server::send_request( - &globals, + &db.globals, &*server, send_transaction_message::v1::Request { - origin: globals.server_name(), + origin: db.globals.server_name(), pdus: &pdu_jsons, edus: &[], origin_server_ts: SystemTime::now(), diff --git a/src/server_server.rs b/src/server_server.rs index ad0a1a4..03952eb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,6 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{error, info, warn}; +use log::{info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -12,7 +11,7 @@ use ruma::{ VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, - query::get_profile_information, + query::get_profile_information::{self, v1::ProfileField}, transactions::send_transaction_message, }, OutgoingRequest, @@ -222,7 +221,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination( +pub(crate) async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &ServerName, ) -> (String, Option) { From e4dc7ea8ac658f66fb6c3fc2c98b7fbb48f6dd5f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 14:19:56 -0500 Subject: [PATCH 027/103] Prevent admin room from recursively spamming itself and user -> user --- src/client_server/push.rs | 5 ++- src/database/pusher.rs | 84 +++++++++++++++++++++++++-------------- src/database/rooms.rs | 3 +- src/database/sending.rs | 28 +++++++++---- 4 files changed, 80 insertions(+), 40 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 3a81679..e648849 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -94,7 +94,10 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )) } } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index a1a6130..c4f5801 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -182,7 +182,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.invite_for_me" => { @@ -201,7 +202,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -231,7 +234,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -255,7 +260,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -270,7 +277,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.roomnotif" => { @@ -307,7 +315,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -331,7 +341,9 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + .await?; + break; } } } @@ -346,7 +358,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.encrypted_room_one_to_one" => { @@ -361,7 +374,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.room_one_to_one" => { @@ -376,7 +390,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.message" => { @@ -389,7 +404,8 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } ".m.rule.encrypted" => { @@ -402,12 +418,14 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db).await?; + send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + break; } } _ => {} } } + Ok(()) } @@ -417,6 +435,7 @@ async fn send_notice( tweaks: Vec, event: &PduEvent, db: &Database, + name: &str, ) -> Result<()> { let (http, _emails): (Vec<&Pusher>, _) = pushers .iter() @@ -436,10 +455,15 @@ async fn send_notice( }; let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - device.data = Some(pusher.data.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = Some(data_minus_url); - // this is not done if "event_id_only" is the format - device.tweaks = tweaks.clone(); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } let d = &[device]; let mut notifi = Notification::new(d); @@ -459,12 +483,13 @@ async fn send_notice( } if event_id_only { - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + error!("SEND PUSH NOTICE `{}`", name); + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -474,8 +499,8 @@ async fn send_notice( notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = name.as_deref(); + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); let room_name = db .rooms .room_state_get(&event.room_id, &EventType::RoomName, "")? @@ -486,12 +511,13 @@ async fn send_notice( .flatten(); notifi.room_name = room_name.as_deref(); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + error!("SEND PUSH NOTICE Full `{}`", name); + // send_request( + // &db.globals, + // &url, + // send_event_notification::v1::Request::new(notifi), + // ) + // .await?; } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 19554f6..ac7d27d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,10 +27,9 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::Duration, }; -use super::{admin::AdminCommand, sending::Sending}; +use super::admin::AdminCommand; /// The unique identifier of each state group. /// diff --git a/src/database/sending.rs b/src/database/sending.rs index 48e427e..ce81e8c 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -364,17 +364,33 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - dbg!(&pdus); - for pdu in &pdus { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - for user in db.rooms.room_members(&pdu.room_id) { - dbg!(&user); + // Skip events that came from the admin room + if db + .rooms + .room_aliases(&pdu.room_id) + .any(|alias| match alias { + Ok(a) => a.as_str().starts_with("#admins:"), + _ => false, + }) + || pdu.sender.as_str().starts_with("@conduit:") + { + continue; + } + + for user in db.rooms.room_members(&pdu.room_id) { let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; + + // Don't notify the user of their own events + if user == pdu.sender { + continue; + } + let pushers = db .pusher .get_pusher(&user) @@ -410,10 +426,6 @@ impl Sending { uint!(0) }; - dbg!(&pushers); - - // dbg!(&rules_for_user); - crate::database::pusher::send_push_notice( &user, unread, From 4a92a29b566d27876ae85e5366272a695e17689b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 028/103] State resolution outline for /send --- Cargo.lock | 152 +++++++------------ Cargo.toml | 8 +- src/main.rs | 2 +- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 349 +++++++++++++++++++++++++++++++++++++++---- 6 files changed, 384 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff405..9ab184c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,21 +229,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" [[package]] name = "constant_time_eq" @@ -645,9 +635,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -674,9 +664,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -772,9 +762,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -1063,9 +1053,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1148,12 +1138,12 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1168,18 +1158,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.13.0+1.1.1i" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "045e4dc48af57aad93d665885789b43222ae26f4886494da12d1ed58d309dcb6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" dependencies = [ "autocfg", "cc", @@ -1202,9 +1192,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1216,9 +1206,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" @@ -1326,9 +1316,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1393,9 +1383,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1507,9 +1497,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", "bytes", @@ -1536,7 +1526,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1636,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "js_int", @@ -1654,7 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "http", "percent-encoding", @@ -1669,7 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1680,7 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "ruma-api", "ruma-common", @@ -1694,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "http", @@ -1713,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "maplit", @@ -1726,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-common", @@ -1740,7 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1751,7 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-api", @@ -1766,7 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "paste", "rand", @@ -1780,7 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro2", "quote", @@ -1791,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "serde", ] @@ -1799,7 +1788,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "form_urlencoded", "itoa", @@ -1812,7 +1801,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1823,7 +1812,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "base64 0.12.3", "ring", @@ -1889,12 +1878,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2012,9 +1995,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2060,13 +2043,12 @@ checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2094,7 +2076,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" +source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" dependencies = [ "itertools", "maplit", @@ -2177,9 +2159,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" dependencies = [ "proc-macro2", "quote", @@ -2295,9 +2277,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2365,9 +2347,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2544,9 +2526,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2644,30 +2626,6 @@ version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "web-sys" version = "0.3.46" diff --git a/Cargo.toml b/Cargo.toml index f7fbdc5..c2db3d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } diff --git a/src/main.rs b/src/main.rs index 65434a5..4cab764 100644 --- a/src/main.rs +++ b/src/main.rs @@ -190,7 +190,7 @@ async fn main() { } #[catch(404)] -fn not_found_catcher(_req: &'_ Request<'_>) -> String { +fn not_found_catcher(_: &Request<'_>) -> String { "404 Not Found".to_owned() } diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..f6ec415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -17,7 +17,7 @@ use std::{ time::UNIX_EPOCH, }; -#[derive(Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..e2f44cd 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, + api::{AuthScheme, IncomingRequest, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -29,7 +29,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -40,10 +40,7 @@ pub struct Ruma { #[cfg(feature = "conduit_bin")] impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma where - ::Incoming: TryFrom>> + std::fmt::Debug, - <::Incoming as std::convert::TryFrom< - http::request::Request>, - >>::Error: std::fmt::Debug, + T::Incoming: IncomingRequest, { type Error = (); type Owned = Data; @@ -152,8 +149,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); debug!("{:?}", http_request); - - match ::Incoming::try_from(http_request) { + match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { body: t, sender_user, @@ -173,7 +169,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index 3fea4da..d68e9fa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -11,17 +11,18 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::get_missing_events, + event::{get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; +use state_res::StateMap; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, @@ -476,6 +477,34 @@ pub async fn get_public_rooms_route( .into()) } +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum PrevEvents { + Sequential(T), + Fork(Vec), +} + +impl IntoIterator for PrevEvents { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + match self { + Self::Sequential(item) => vec![item].into_iter(), + Self::Fork(list) => list.into_iter(), + } + } +} + +impl PrevEvents { + pub fn new(id: &[T]) -> Self { + match id { + [] => panic!("All events must have previous event"), + [single_id] => Self::Sequential(single_id.clone()), + rest => Self::Fork(rest.to_vec()), + } + } +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -532,55 +561,313 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this - // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. - - // 3. Passes hash checks, otherwise it is redacted before being processed further. - // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let keys = db.globals.keypair(); + let mut pub_key_set = BTreeMap::new(); + pub_key_set.insert( + "ed25519:1".to_string(), + String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), + ); + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert("domain".to_string(), pub_key_set); + + let value = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + } + } else { + value + } + } + Err(_e) => { + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + }; + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) .expect("all ruma pdus are conduit pdus"); - let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(room_id)? { + if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + // TODO: remove the need to convert to state_res + let event = pdu.convert_for_state_res(); + let previous = pdu + .prev_events + .first() + .map(|id| { + db.rooms + .get_pdu(id) + .expect("todo") + .map(|ev| ev.convert_for_state_res()) + }) + .flatten(); - let next_room_state = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - value, - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + // 4. + let auth_events = db.rooms.get_auth_events( + &pdu.room_id, + &pdu.kind, + &pdu.sender, + pdu.state_key.as_deref(), + pdu.content.clone(), )?; - - db.rooms.set_room_state(&room_id, &next_room_state)?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + auth_events + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with auth events".into()), + ); + continue; } - resolved_map.insert(event_id, Ok::<(), String>(())); + let mut previous_states = vec![]; + for id in &pdu.prev_events { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + previous_states.push(state); + } else { + // fetch the state + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + let state_at_event = if previous_states.is_empty() { + // State is empty + Default::default() + } else if previous_states.len() == 1 { + previous_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &previous_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + state_at_event + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Event failed auth with state_at + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with state at the event".into()), + ); + continue; + } + + // The event could still be soft failed + append_state_soft(&db, &pdu)?; + + // Gather the forward extremities and resolve + let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; + let mut fork_states = vec![]; + for id in &forward_extrems { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + fork_states.push(state); + } else { + // This is probably an error?? + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 6. + let state_at_forks = if fork_states.is_empty() { + // State is empty + Default::default() + } else if fork_states.len() == 1 { + fork_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous, + state_at_forks + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail + resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + } else { + append_state(&db, &pdu)?; + // Event has passed all auth/stateres checks + resolved_map.insert(event.event_id(), Ok(())); + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 4b9976aa743321a0c062fac9ffd2de737531b717 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 31 Dec 2020 08:40:49 -0500 Subject: [PATCH 029/103] Update state-res, use the new Event trait This also bumps ruma to latest and removes js_int infavor of the ruma re-export --- Cargo.lock | 140 +++++++++++++++++++------------- Cargo.toml | 8 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 22 +++-- src/client_server/message.rs | 7 +- src/database/rooms.rs | 68 ++++++++++------ src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 116 +++++++++++++------------- src/server_server.rs | 64 ++++++++------- 11 files changed, 252 insertions(+), 183 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ab184c..b05a3c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "rand", "regex", + "rand 0.7.3", "reqwest", "ring", "rocket", @@ -585,6 +585,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi", +] + [[package]] name = "gif" version = "0.11.1" @@ -847,9 +858,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.9" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1396,11 +1407,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.0", + "rand_hc 0.3.0", ] [[package]] @@ -1410,7 +1433,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.0", ] [[package]] @@ -1419,7 +1452,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +dependencies = [ + "getrandom 0.2.0", ] [[package]] @@ -1428,7 +1470,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.0", ] [[package]] @@ -1443,7 +1494,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1571,7 +1622,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1625,7 +1676,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "js_int", @@ -1643,7 +1694,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1709,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "http", @@ -1702,7 +1753,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "maplit", @@ -1715,7 +1766,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1791,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-api", @@ -1755,21 +1806,21 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand", + "rand 0.8.0", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", + "ruma-serde-macros", "serde", - "strum", ] [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro2", "quote", @@ -1780,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "serde", ] @@ -1788,7 +1839,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "form_urlencoded", "itoa", @@ -1800,8 +1851,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1812,9 +1863,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -2076,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" +source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" dependencies = [ "itertools", "maplit", @@ -2136,27 +2187,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "strum" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syn" version = "1.0.55" @@ -2176,7 +2206,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2416,7 +2446,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index c2db3d9..bf74e8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# TODO: remove the gen-eventid feature +state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -37,8 +38,7 @@ log = "0.4.11" http = "0.2.1" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" + # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for appservice registration files diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..2bff20c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -124,7 +124,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..f792062 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -131,7 +131,7 @@ pub async fn get_content_thumbnail_route( allow_remote: false, height: body.height, width: body.width, - method: body.method, + method: body.method.clone(), server_name: &body.server_name, media_id: &body.media_id, }, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..eb44085 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::StateEvent; +use state_res::Event; use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, @@ -594,19 +594,19 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) + PduEvent::from_id_val(&event_id, value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; + .collect::>>>()?; let control_events = event_map .values() - .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .filter(|pdu| state_res::is_power_event(pdu)) + .map(|pdu| pdu.event_id.clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -646,7 +646,8 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -685,8 +686,13 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( +<<<<<<< HEAD &PduEvent::from(&**pdu), utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +======= + &pdu, + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +>>>>>>> 6232d1f (Update state-res, use the new Event trait) count, pdu_id.clone().into(), &db.globals, @@ -695,7 +701,9 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert((pdu.kind(), pdu.state_key()), pdu_id); + if let Some(key) = &pdu.state_key { + state.insert((pdu.kind(), key.to_string()), pdu_id); + } } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..c56cc94 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,10 @@ use ruma::{ events::EventContent, EventId, }; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -46,7 +49,7 @@ pub async fn send_message_event_route( return Ok(send_message_event::Response { event_id }.into()); } - let mut unsigned = serde_json::Map::new(); + let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.build_and_append_pdu( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b35d006..f0129c6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, @@ -67,12 +67,8 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> state_res::Result> { +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { let pid = self .get_pdu_id(event_id) .map_err(StateError::custom)? @@ -91,7 +87,7 @@ impl StateStore for Rooms { .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) .map_err(Into::into) - .and_then(|pdu: StateEvent| { + .and_then(|pdu: PduEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == room_id { @@ -112,7 +108,7 @@ impl Rooms { &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -141,7 +137,7 @@ impl Rooms { pdu, )) }) - .collect::>>() + .collect() } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -181,7 +177,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -205,7 +201,7 @@ impl Rooms { content: serde_json::Value, ) -> Result> { let auth_events = state_res::auth_types_for_event( - kind.clone(), + kind, sender, state_key.map(|s| s.to_string()), content, @@ -213,7 +209,13 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get( + room_id, + &event_type, + &state_key + .as_deref() + .expect("found a non state event in auth events"), + )? { events.insert((event_type, state_key), pdu); } } @@ -290,7 +292,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) } else { @@ -795,23 +800,40 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()); + .map(Arc::new); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, + &Arc::new(PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key: Some(state_key.clone()), + prev_events, + depth: (prev_events.len() as u32).into(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), + redacts, + unsigned: unsigned + .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }), prev_event, None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) + Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) .collect::>>()?, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..2b1b03d 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 9da0776..153dce9 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index f6ec415..c764700 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -33,8 +32,8 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] - pub unsigned: serde_json::Map, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub unsigned: BTreeMap, pub hashes: EventHash, pub signatures: BTreeMap, BTreeMap>, } @@ -227,61 +226,66 @@ impl PduEvent { ) .expect("Raw::from_value always works") } -} -impl From<&state_res::StateEvent> for PduEvent { - fn from(pdu: &state_res::StateEvent) -> Self { - Self { - event_id: pdu.event_id(), - room_id: pdu.room_id().clone(), - sender: pdu.sender().clone(), - origin_server_ts: (pdu - .origin_server_ts() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .try_into() - .expect("time is valid"), - kind: pdu.kind(), - content: pdu.content().clone(), - state_key: Some(pdu.state_key()), - prev_events: pdu.prev_event_ids(), - depth: *pdu.depth(), - auth_events: pdu.auth_events(), - redacts: pdu.redacts().cloned(), - unsigned: pdu.unsigned().clone().into_iter().collect(), - hashes: pdu.hashes().clone(), - signatures: pdu.signatures(), - } + pub fn from_id_val( + event_id: &EventId, + json: CanonicalJsonObject, + ) -> Result { + json.insert( + "event_id".to_string(), + ruma::serde::to_canonical_value(event_id).expect("event_id is a valid Value"), + ); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) } } -impl PduEvent { - pub fn convert_for_state_res(&self) -> Arc { - Arc::new( - // For consistency of eventId (just in case) we use the one - // generated by conduit for everything. - state_res::StateEvent::from_id_value( - self.event_id.clone(), - json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }), - ) - .expect("all conduit PDUs are state events"), - ) +impl state_res::Event for PduEvent { + fn event_id(&self) -> &EventId { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + &self.room_id + } + + fn sender(&self) -> &UserId { + &self.sender + } + fn kind(&self) -> EventType { + self.kind.clone() + } + + fn content(&self) -> serde_json::Value { + self.content.clone() + } + fn origin_server_ts(&self) -> std::time::SystemTime { + UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + } + + fn state_key(&self) -> Option { + self.state_key.clone() + } + fn prev_events(&self) -> Vec { + self.prev_events.to_vec() + } + fn depth(&self) -> &UInt { + &self.depth + } + fn auth_events(&self) -> Vec { + self.auth_events.to_vec() + } + fn redacts(&self) -> Option<&EventId> { + self.redacts.as_ref() + } + fn hashes(&self) -> &EventHash { + &self.hashes + } + fn signatures(&self) -> BTreeMap, BTreeMap> { + self.signatures.clone() + } + fn unsigned(&self) -> &BTreeMap { + &self.unsigned } } @@ -315,7 +319,7 @@ pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, - pub unsigned: Option>, + pub unsigned: Option>, pub state_key: Option, pub redacts: Option, } diff --git a/src/server_server.rs b/src/server_server.rs index d68e9fa..58d85b1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,12 +20,13 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::StateMap; +use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, + sync::Arc, time::{Duration, SystemTime}, }; @@ -610,17 +611,12 @@ pub async fn send_transaction_message_route<'a>( continue; } - // TODO: remove the need to convert to state_res - let event = pdu.convert_for_state_res(); + let event = Arc::new(pdu.clone()); + let previous = pdu .prev_events .first() - .map(|id| { - db.rooms - .get_pdu(id) - .expect("todo") - .map(|ev| ev.convert_for_state_res()) - }) + .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); // 4. @@ -637,27 +633,32 @@ pub async fn send_transaction_message_route<'a>( previous.clone(), auth_events .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) + .map(|(k, v)| (k, Arc::new(v))) .collect(), None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with auth events".into()), ); continue; } - let mut previous_states = vec![]; + let mut previous_states: Vec>> = vec![]; for id in &pdu.prev_events { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .collect(); previous_states.push(state); } else { // fetch the state @@ -693,7 +694,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -702,7 +703,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -712,17 +713,14 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_event, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Event failed auth with state_at resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with state at the event".into()), ); continue; @@ -733,14 +731,20 @@ pub async fn send_transaction_message_route<'a>( // Gather the forward extremities and resolve let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states = vec![]; + let mut fork_states: Vec>> = vec![]; for id in &forward_extrems { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + fork_states.push(state); } else { // This is probably an error?? @@ -776,7 +780,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -785,7 +789,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -795,20 +799,20 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Soft fail - resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + resolved_map.insert( + event.event_id().clone(), + Err("Event has been soft failed".into()), + ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id(), Ok(())); + resolved_map.insert(event.event_id().clone(), Ok(())); } } From 63af3d3da06d1fdb4d8e8be3637d2814efba799d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 3 Jan 2021 17:26:17 -0500 Subject: [PATCH 030/103] Step 5 in /send just fetches state from incoming server --- Cargo.lock | 78 ++++++------- src/database/rooms.rs | 29 +---- src/pdu.rs | 2 +- src/server_server.rs | 264 +++++++++++++++++++++++------------------- 4 files changed, 186 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b05a3c4..227e822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -44,9 +44,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -369,9 +369,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -576,11 +576,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -834,9 +834,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" @@ -1046,9 +1046,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1149,9 +1149,9 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.31" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -1178,9 +1178,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.59" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1407,7 +1407,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1452,7 +1452,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1494,25 +1494,25 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -2005,9 +2005,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -2088,9 +2088,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" [[package]] name = "socket2" @@ -2111,9 +2111,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" +source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.55" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" +checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" dependencies = [ "proc-macro2", "quote", @@ -2214,18 +2214,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -2752,9 +2752,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f0129c6..ef76c39 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -802,32 +802,9 @@ impl Rooms { ))?)? .map(Arc::new); event_auth::valid_membership_change( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - &Arc::new(PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key: Some(state_key.clone()), - prev_events, - depth: (prev_events.len() as u32).into(), - auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) - .collect(), - redacts, - unsigned: unsigned - .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), - }), + Some(state_key.as_str()), + &sender, + content.clone(), prev_event, None, // TODO: third party invite &auth_events diff --git a/src/pdu.rs b/src/pdu.rs index c764700..2997317 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -229,7 +229,7 @@ impl PduEvent { pub fn from_id_val( event_id: &EventId, - json: CanonicalJsonObject, + mut json: CanonicalJsonObject, ) -> Result { json.insert( "event_id".to_string(), diff --git a/src/server_server.rs b/src/server_server.rs index 58d85b1..3c4308c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,13 +11,15 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::Raw, + signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; @@ -578,32 +580,13 @@ pub async fn send_transaction_message_route<'a>( let mut pub_key_map = BTreeMap::new(); pub_key_map.insert("domain".to_string(), pub_key_set); - let value = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - } - } else { - value - } - } - Err(_e) => { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - }; - - let pdu = serde_json::from_value::( - serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("all ruma pdus are conduit pdus"); + let pdu = match signature_and_hash_check(&pub_key_map, value) { + Ok(pdu) => pdu, + Err(e) => { + resolved_map.insert(event_id, Err(e)); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -619,7 +602,10 @@ pub async fn send_transaction_message_route<'a>( .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); - // 4. + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // the auth events that would be correct for this pdu. Put another way we should use the auth events + // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( &pdu.room_id, &pdu.kind, @@ -627,6 +613,12 @@ pub async fn send_transaction_message_route<'a>( pdu.state_key.as_deref(), pdu.content.clone(), )?; + + let mut event_map: state_res::EventMap> = auth_events + .iter() + .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .collect(); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, @@ -635,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), - None, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -646,66 +638,38 @@ pub async fn send_transaction_message_route<'a>( continue; } - let mut previous_states: Vec>> = vec![]; - for id in &pdu.prev_events { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? + let server_name = body.body.origin.clone(); + let (state_at_event, incoming_auth_events): (StateMap>, _) = match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) + .await? .into_iter() - .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); - previous_states.push(state); - } else { - // fetch the state - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } - } - } - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - let state_at_event = if previous_states.is_empty() { - // State is empty - Default::default() - } else if previous_states.len() == 1 { - previous_states[0].clone() - } else { - match state_res::StateResolution::resolve( - &pdu.room_id, - &RoomVersionId::Version6, - &previous_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - None, - &db.rooms, - ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), - Err(e) => panic!("{:?}", e), + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; } }; @@ -713,8 +677,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event, - None, + state_at_event.clone(), // TODO: could this be &state avoid .clone + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -747,22 +711,7 @@ pub async fn send_transaction_message_route<'a>( fork_states.push(state); } else { - // This is probably an error?? - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } + todo!("we don't know of a pdu that is part of our known forks OOPS") } } @@ -773,6 +722,18 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // Add as much as we can to the `event_map` (less DB hits) + event_map.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + ); + event_map.extend( + state_at_event + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, @@ -784,7 +745,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - None, + &mut event_map, &db.rooms, ) { Ok(res) => res @@ -819,8 +780,74 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn signature_and_hash_check( + pub_key_map: &ruma::signatures::PublicKeyMap, + value: CanonicalJsonObject, +) -> std::result::Result { + let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".into()), + } + } else { + value + } + } + Err(_e) => return Err("Signature verification failed".into()), + }; + + serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// TODO: this needs to add events to the DB in a way that does not +/// effect the state of the room +async fn fetch_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + events: &[EventId], +) -> Result> { + let mut pdus = vec![]; + for id in events { + match db.rooms.get_pdu(id)? { + Some(pdu) => pdus.push(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: id }, + ) + .await + { + Ok(res) => { + let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(pdu) => { + // TODO: add to our DB somehow? + pdus.push(pdu); + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + } + } + Ok(pdus) +} + fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() + db.rooms.get_pdu_leaves(room_id) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -854,20 +881,15 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + // db.rooms.append_pdu( + // pdu, + // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + // count, + // pdu_id.clone().into(), + // &db.globals, + // &db.account_data, + // &db.admin, + // )?; Ok(()) } From cdeb1236a20e39d0f9635e1c0e7ed40882effd59 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 5 Jan 2021 09:21:41 -0500 Subject: [PATCH 031/103] Fix clippy warnings remove unused imports --- Cargo.lock | 2 +- src/appservice_server.rs | 2 +- src/client_server/membership.rs | 1 - src/database/globals.rs | 4 ++- src/database/sending.rs | 12 ++++----- src/pdu.rs | 7 +---- src/server_server.rs | 45 ++++++++++++++++++++------------- 7 files changed, 39 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 227e822..9dc0b38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" +source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" dependencies = [ "itertools", "maplit", diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index eb44085..4e093c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -675,7 +675,6 @@ async fn join_room_by_id_helper( .iter() .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) .expect("Found event_id in sorted events that is not in resolved state"); diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..beb7de5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,9 +9,11 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +pub type DestinationCache = Arc, (String, Option)>>>; + #[derive(Clone)] pub struct Globals { - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: DestinationCache, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, diff --git a/src/database/sending.rs b/src/database/sending.rs index fd32793..d99c4f3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -79,7 +79,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -125,7 +125,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -157,7 +157,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -186,7 +186,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -220,7 +220,7 @@ impl Sending { } pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -330,7 +330,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/pdu.rs b/src/pdu.rs index 2997317..86fbc9f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,12 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::UNIX_EPOCH, -}; +use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 3c4308c..3de3636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,14 +11,13 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::Raw, signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -220,7 +219,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &ServerName, ) -> (String, Option) { let mut host = None; @@ -594,13 +593,14 @@ pub async fn send_transaction_message_route<'a>( continue; } + let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - - let previous = pdu - .prev_events - .first() - .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) - .flatten(); + // Fetch any unknown events or retrieve them from the DB + let previous = + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { + mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not @@ -616,14 +616,14 @@ pub async fn send_transaction_message_route<'a>( let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - auth_events + &auth_events .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), @@ -638,7 +638,6 @@ pub async fn send_transaction_message_route<'a>( continue; } - let server_name = body.body.origin.clone(); let (state_at_event, incoming_auth_events): (StateMap>, _) = match db .sending .send_federation_request( @@ -652,8 +651,18 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) - .await? + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); @@ -677,8 +686,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event.clone(), // TODO: could this be &state avoid .clone - None, // TODO: third party invite + &state_at_event, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -760,7 +769,7 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks, + &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? From 8de0d9f9ced7c11d24bd38d20f871bae11ed863e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 08:52:30 -0500 Subject: [PATCH 032/103] Remove StateStore trait from state-res collect events needed --- Cargo.lock | 62 +++++++++--------- Cargo.toml | 2 +- src/client_server/membership.rs | 4 -- src/database/rooms.rs | 100 ++++++++++++++++++---------- src/server_server.rs | 111 +++++++++++++++++++------------- 5 files changed, 163 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dc0b38..f621d16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -558,7 +558,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -587,13 +587,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -722,7 +722,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -1272,11 +1272,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.3", ] [[package]] @@ -1292,9 +1292,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1309,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" [[package]] name = "pin-utils" @@ -1416,13 +1416,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" dependencies = [ "libc", "rand_chacha 0.3.0", - "rand_core 0.6.0", + "rand_core 0.6.1", "rand_hc 0.3.0", ] @@ -1443,7 +1443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1457,11 +1457,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.1", ] [[package]] @@ -1479,7 +1479,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1569,7 +1569,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "serde", "serde_urlencoded", "tokio", @@ -1809,7 +1809,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand 0.8.0", + "rand 0.8.1", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2028,9 +2028,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" dependencies = [ "dtoa", "linked-hash-map", @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" +source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2398,7 +2398,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "tracing-attributes", "tracing-core", ] diff --git a/Cargo.toml b/Cargo.toml index bf74e8a..fdcc4ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4e093c2..ea14268 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -618,7 +618,6 @@ async fn join_room_by_id_helper( &room_id, &control_events, &mut event_map, - &db.rooms, &event_ids, ); @@ -629,7 +628,6 @@ async fn join_room_by_id_helper( &sorted_control_events, &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); @@ -654,7 +652,6 @@ async fn join_room_by_id_helper( &events_to_sort, power_level, &mut event_map, - &db.rooms, ); let resolved_events = state_res::StateResolution::iterative_auth_check( @@ -663,7 +660,6 @@ async fn join_room_by_id_helper( &sorted_event_ids, &resolved_control_events, &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ef76c39..fe4f23c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -67,40 +67,6 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { - let pid = self - .get_pdu_id(event_id) - .map_err(StateError::custom)? - .ok_or_else(|| { - StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db: {}", - event_id.as_str() - )) - })?; - - serde_json::from_slice( - &self - .pduid_pdu - .get(pid) - .map_err(StateError::custom)? - .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, - ) - .map_err(Into::into) - .and_then(|pdu: PduEvent| { - // conduit's PDU's always contain a room_id but some - // of ruma's do not so this must be an Option - if pdu.room_id() == room_id { - Ok(Arc::new(pdu)) - } else { - Err(StateError::NotFound( - "Found PDU for incorrect room in db.".into(), - )) - } - }) - } -} - impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -222,6 +188,72 @@ impl Rooms { Ok(events) } + /// Returns a Vec of the related auth events to the given `event`. + /// + /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. + pub fn auth_events_full( + &self, + room_id: &RoomId, + event_ids: &[EventId], + ) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + if let Some(ev) = self.get_pdu(&ev_id)? { + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) + } + + /// Returns a Vec representing the difference in auth chains of the given `events`. + /// + /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). + pub fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + use std::collections::BTreeSet; + + let mut chains = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_events_full(room_id, &ids)? + .into_iter() + .map(|pdu| pdu.event_id) + .collect::>(); + chains.push(chain); + } + + if let Some(chain) = chains.first() { + let rest = chains.iter().skip(1).flatten().cloned().collect(); + let common = chain.intersection(&rest).collect::>(); + + Ok(chains + .iter() + .flatten() + .filter(|id| !common.contains(&id)) + .cloned() + .collect::>() + .into_iter() + .collect()) + } else { + Ok(vec![]) + } + } + /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. diff --git a/src/server_server.rs b/src/server_server.rs index 3de3636..f68475c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -603,7 +603,7 @@ pub async fn send_transaction_message_route<'a>( }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not // the auth events that would be correct for this pdu. Put another way we should use the auth events // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( @@ -637,50 +637,56 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 4. - let (state_at_event, incoming_auth_events): (StateMap>, _) = match db - .sending - .send_federation_request( - &db.globals, - server_name.clone(), - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) - .collect(); - - ( - state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) - .await?, + // Step 5. event passes auth based on state at the event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - } - Err(_) => { - resolved_map.insert( - event.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await + { + Ok(res) => { + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .collect(); + + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await? + .into_iter() + .map(Arc::new) + .collect(), + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; if !state_res::event_auth::auth_check( &RoomVersionId::Version6, @@ -698,6 +704,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 5. // The event could still be soft failed append_state_soft(&db, &pdu)?; @@ -724,18 +731,30 @@ pub async fn send_transaction_message_route<'a>( } } - // 6. + // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() } else if fork_states.len() == 1 { fork_states[0].clone() } else { + let auth_events = fork_states + .iter() + .map(|map| { + db.rooms.auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + }) + .collect(); + // Add as much as we can to the `event_map` (less DB hits) event_map.extend( incoming_auth_events .into_iter() - .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + .map(|pdu| (pdu.event_id().clone(), pdu)), ); event_map.extend( state_at_event @@ -754,8 +773,8 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), + &auth_events, &mut event_map, - &db.rooms, ) { Ok(res) => res .into_iter() From 168ae8dca00d480ad28d80e65918853f1802091b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 15:05:09 -0500 Subject: [PATCH 033/103] Fill event_map with all events that will be needed for resolution --- Cargo.lock | 2 +- src/server_server.rs | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f621d16..7a79dbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "regex", "rand 0.7.3", + "regex", "reqwest", "ring", "rocket", diff --git a/src/server_server.rs b/src/server_server.rs index f68475c..e87c05c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -565,7 +565,7 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this - + // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); // 2. Passes signature checks, otherwise event is dropped. @@ -741,16 +741,24 @@ pub async fn send_transaction_message_route<'a>( let auth_events = fork_states .iter() .map(|map| { - db.rooms.auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), - ) + db.rooms + .auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) }) - .collect(); + .collect::>>()?; - // Add as much as we can to the `event_map` (less DB hits) + // Add everything we will need to event_map + event_map.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); event_map.extend( incoming_auth_events .into_iter() @@ -773,7 +781,10 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - &auth_events, + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), &mut event_map, ) { Ok(res) => res From d0b8d0f5fdaf2ee62b6d14702cda5d2a154c241b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 12 Jan 2021 08:26:52 -0500 Subject: [PATCH 034/103] Fix signature/hash checks, fetch recursive auth events --- src/client_server/membership.rs | 7 +- src/database/rooms.rs | 4 +- src/pdu.rs | 12 +- src/server_server.rs | 240 +++++++++++++++++++++++++------- 4 files changed, 193 insertions(+), 70 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea14268..29b6c14 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -681,13 +681,8 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( -<<<<<<< HEAD - &PduEvent::from(&**pdu), - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), -======= &pdu, - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), ->>>>>>> 6232d1f (Update state-res, use the new Event trait) + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fe4f23c..88a772b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; +use state_res::{event_auth, Event, StateMap}; use std::{ collections::{BTreeMap, HashMap}, @@ -193,7 +193,7 @@ impl Rooms { /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. pub fn auth_events_full( &self, - room_id: &RoomId, + _room_id: &RoomId, event_ids: &[EventId], ) -> Result> { let mut result = BTreeMap::new(); diff --git a/src/pdu.rs b/src/pdu.rs index 86fbc9f..750f9cf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,7 +4,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -286,12 +286,11 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn process_incoming_pdu( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", @@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu( )) .expect("ruma's reference hashes are valid event ids"); - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - (event_id, value) } diff --git a/src/server_server.rs b/src/server_server.rs index e87c05c..141d5bb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ + device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -18,13 +20,14 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - signatures::{CanonicalJsonObject, PublicKeyMap}, + serde::to_canonical_value, + signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, net::{IpAddr, SocketAddr}, sync::Arc, @@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } + dbg!(&*body); + for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>( } "m.presence" => {} "m.receipt" => {} + "m.device_list_update" => {} _ => {} }, Err(_err) => { @@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. // 3. Passes hash checks, otherwise it is redacted before being processed further. - let keys = db.globals.keypair(); - let mut pub_key_set = BTreeMap::new(); - pub_key_set.insert( - "ed25519:1".to_string(), - String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), - ); + let server_name = body.body.origin.clone(); let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert("domain".to_string(), pub_key_set); + if let Some(sig) = value.get("signatures") { + match sig { + CanonicalJsonValue::Object(entity) => { + for key in entity.keys() { + // TODO: save this in a DB maybe... + // fetch the public signing key + let res = db + .sending + .send_federation_request( + &db.globals, + Box::::try_from(key.to_string()).unwrap(), + get_server_keys::v2::Request::new(), + ) + .await?; - let pdu = match signature_and_hash_check(&pub_key_map, value) { + pub_key_map.insert( + res.server_key.server_name.to_string(), + res.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); + } + } + _ => { + resolved_map.insert( + event_id, + Err("`signatures` is not a JSON object".to_string()), + ); + continue; + } + } + } else { + resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); + continue; + } + + let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>( } }; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - // Fetch any unknown events or retrieve them from the DB + dbg!(&*event); + // Fetch any unknown prev_events or retrieve them from the DB let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { - mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), _ => None, }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not - // the auth events that would be correct for this pdu. Put another way we should use the auth events - // the pdu claims are its auth events - let auth_events = db.rooms.get_auth_events( - &pdu.room_id, - &pdu.kind, - &pdu.sender, - pdu.state_key.as_deref(), - pdu.content.clone(), - )?; + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) + .await + { + Ok(events) => events, + Err(_) => { + resolved_map.insert( + pdu.event_id, + Err("Failed to recursively gather auth events".into()), + ); + continue; + } + }; let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); - if !state_res::event_auth::auth_check( + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - &auth_events - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + &pdu.auth_events + .iter() + .map(|id| { + event_map + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + Error::Conflict( + "Auth event not found, event failed recursive auth checks.", + ) + }) + }) + .collect::>>()?, None, // TODO: third party invite ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { + .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( pdu.event_id, Err("Event has failed auth check with auth events".into()), @@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, -) -> std::result::Result { - let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".into()), +) -> std::result::Result { + Ok( + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + error!("CONTENT HASH FAILED"); + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => return Err("Signature verification failed".into()), - }; - - serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }, ) - .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have missing events it fails. +async fn fetch_check_auth_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + event_ids: &[EventId], +) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + let ev = match db.rooms.get_pdu(&ev_id)? { + Some(pdu) => pdu, + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: &ev_id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(mut val) => { + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map") + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }; + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) } /// TODO: this needs to add events to the DB in a way that does not @@ -865,10 +988,21 @@ async fn fetch_events( .await { Ok(res) => { - let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); match signature_and_hash_check(key_map, value) { - Ok(pdu) => { + Ok(mut val) => { // TODO: add to our DB somehow? + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + pdus.push(pdu); } Err(e) => { @@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; db.rooms.append_pdu( pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, From d108a735a45e6fe9b0fcda00b6d0ebbeff043f4a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 035/103] State resolution outline for /send --- src/server_server.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 141d5bb..31d6467 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1067,6 +1067,59 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { Ok(()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 52392628e9eff02ed0db65481c551840ba879405 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 14:39:56 -0500 Subject: [PATCH 036/103] Convert uses of Box to a ref --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 2 +- src/server_server.rs | 117 ++++++++++++++++++++++---------- 7 files changed, 92 insertions(+), 43 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..0dc40a9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -70,7 +70,7 @@ pub async fn get_alias_helper( .sending .send_federation_request( &db.globals, - room_alias.server_name().to_owned(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2bff20c..87d5fc8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -137,7 +137,7 @@ pub async fn get_public_rooms_filtered_helper( .sending .send_federation_request( &db.globals, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f792062..275038a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -77,7 +77,7 @@ pub async fn get_content_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -126,7 +126,7 @@ pub async fn get_content_thumbnail_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content_thumbnail::Request { allow_remote: false, height: body.height, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29b6c14..40e4183 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -468,7 +468,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, @@ -547,7 +547,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/sending.rs b/src/database/sending.rs index d99c4f3..e6cdc76 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -303,7 +303,7 @@ impl Sending { server_server::send_request( &globals, - server.clone(), + &*server, send_transaction_message::v1::Request { origin: globals.server_name(), pdus: &pdu_jsons, @@ -348,7 +348,7 @@ impl Sending { pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where diff --git a/src/pdu.rs b/src/pdu.rs index 750f9cf..340ddee 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -287,7 +287,7 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub(crate) fn process_incoming_pdu( +pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/server_server.rs b/src/server_server.rs index 31d6467..64e0a05 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,11 +20,12 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, StateMap}; +use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, @@ -36,7 +37,7 @@ use std::{ pub async fn send_request( globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where @@ -50,7 +51,7 @@ where .actual_destination_cache .read() .unwrap() - .get(&destination) + .get(destination) .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { @@ -61,7 +62,7 @@ where .actual_destination_cache .write() .unwrap() - .insert(destination.clone(), result.clone()); + .insert(Box::::from(destination), result.clone()); result }; @@ -278,9 +279,9 @@ async fn find_actual_destination( (actual_destination, host) } -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() @@ -572,11 +573,9 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then - let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let server_name = body.body.origin.clone(); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { match sig { @@ -588,7 +587,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - Box::::try_from(key.to_string()).unwrap(), + <&ServerName>::try_from(key.as_str()).unwrap(), get_server_keys::v2::Request::new(), ) .await?; @@ -616,6 +615,9 @@ pub async fn send_transaction_message_route<'a>( continue; } + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { @@ -625,15 +627,20 @@ pub async fn send_transaction_message_route<'a>( }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // to our PduEvent type also finally verifying the first step listed above val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let pdu = match serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); + ) { + Ok(pdu) => pdu, + Err(_) => { + resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -644,18 +651,15 @@ pub async fn send_transaction_message_route<'a>( let event = Arc::new(pdu.clone()); dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // Recursively gather all auth events checking that the previous auth events are valid. let auth_events: Vec = - match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) - .await - { + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { Ok(events) => events, Err(_) => { resolved_map.insert( @@ -707,7 +711,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - server_name.clone(), + server_name, get_room_state_ids::v1::Request { room_id: pdu.room_id(), event_id: pdu.event_id(), @@ -716,8 +720,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -734,7 +737,7 @@ pub async fn send_transaction_message_route<'a>( ( state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) .await? .into_iter() .map(Arc::new) @@ -881,6 +884,52 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +async fn auth_each_event( + db: &Database, + value: CanonicalJsonObject, + event_id: EventId, + pub_key_map: &PublicKeyMap, + server_name: &ServerName, + auth_cache: EventMap>, +) -> std::result::Result { + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let mut val = signature_and_hash_check(&pub_key_map, value)?; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; + + // If we have no idea about this room skip the PDU + if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { + return Err("Room is unknown to this server".into()); + } + + // Fetch any unknown prev_events or retrieve them from the DB + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; + + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(events) => events, + Err(_) => return Err("Failed to recursively gather auth events".into()), + }; + + Ok(pdu) +} + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -909,7 +958,7 @@ fn signature_and_hash_check( /// events `auth_events`. If the chain is found to have missing events it fails. async fn fetch_check_auth_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, event_ids: &[EventId], ) -> Result> { @@ -929,13 +978,13 @@ async fn fetch_check_auth_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: &ev_id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { val.insert( @@ -970,7 +1019,7 @@ async fn fetch_check_auth_events( /// effect the state of the room async fn fetch_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, events: &[EventId], ) -> Result> { @@ -982,13 +1031,13 @@ async fn fetch_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { // TODO: add to our DB somehow? From 4cf530c55b32c494f1dde191fc07c2bcfed4ceac Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 21:32:22 -0500 Subject: [PATCH 037/103] Abstract event validation/fetching, add outlier and signing key DB trees Fixed the miss named commented out keys in conduit-example.toml. --- conduit-example.toml | 4 +- src/database.rs | 10 +- src/database/globals.rs | 77 ++++- src/database/rooms.rs | 97 ++---- src/error.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 632 +++++++++++++++++++--------------------- 7 files changed, 415 insertions(+), 413 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index b82da2c..bb3ae33 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,11 +23,11 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#allow_registration = true +#allow_registration = false # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#allow_encryption = true +#allow_encryption = false #allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 diff --git a/src/database.rs b/src/database.rs index 8fcffd9..ea65d6f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -22,7 +22,7 @@ use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; use tokio::sync::Semaphore; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, database_path: String, @@ -102,7 +102,12 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config).await?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + ) + .await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -155,6 +160,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index beb7de5..7eb162b 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,10 @@ use crate::{database::Config, utils, Error, Result}; use log::error; -use ruma::ServerName; -use std::collections::HashMap; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + ServerName, ServerSigningKeyId, +}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -20,10 +23,15 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey } impl Globals { - pub async fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load( + globals: sled::Tree, + server_keys: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -82,6 +90,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, + servertimeout_signingkey: server_keys, }) } @@ -139,4 +148,66 @@ impl Globals { pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { self.jwt_decoding_key.as_ref() } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { + // Remove outdated keys + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, _) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + + if now > valid_until { + self.servertimeout_signingkey.remove(k)?; + } + } + + let mut key = origin.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice( + &(keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .to_be_bytes(), + ); + + self.servertimeout_signingkey.insert( + key, + serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, bytes) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + // If these keys are still valid use em! + if valid_until > now { + return serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + } + } + Ok(BTreeMap::default()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a772b..81abd62 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,6 +65,9 @@ pub struct Rooms { /// The state for a given state hash. pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + + /// Any pdu that has passed the steps up to auth with auth_events. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { @@ -188,72 +191,6 @@ impl Rooms { Ok(events) } - /// Returns a Vec of the related auth events to the given `event`. - /// - /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. - pub fn auth_events_full( - &self, - _room_id: &RoomId, - event_ids: &[EventId], - ) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - if let Some(ev) = self.get_pdu(&ev_id)? { - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) - } - - /// Returns a Vec representing the difference in auth chains of the given `events`. - /// - /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). - pub fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - use std::collections::BTreeSet; - - let mut chains = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_events_full(room_id, &ids)? - .into_iter() - .map(|pdu| pdu.event_id) - .collect::>(); - chains.push(chain); - } - - if let Some(chain) = chains.first() { - let rest = chains.iter().skip(1).flatten().cloned().collect(); - let common = chain.intersection(&rest).collect::>(); - - Ok(chains - .iter() - .flatten() - .filter(|id| !common.contains(&id)) - .cloned() - .collect::>() - .into_iter() - .collect()) - } else { - Ok(vec![]) - } - } - /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. @@ -475,6 +412,31 @@ impl Rooms { Ok(()) } + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns true if the event_id was previously inserted. + pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + let res = self + .eventid_outlierpdu + .insert( + event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + ) + .map(|op| op.is_some())?; + Ok(res) + } + /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -516,6 +478,9 @@ impl Rooms { } } + // We no longer keep this pdu as an outlier + self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending diff --git a/src/error.rs b/src/error.rs index c57843c..fed545c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -122,10 +122,9 @@ impl log::Log for ConduitLogger { let output = format!("{} - {}", record.level(), record.args()); if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record + && (record.module_path().map_or(false, |path| { + path.starts_with("conduit::") || path.starts_with("state") + }) || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/main.rs b/src/main.rs index 4cab764..e5c0399 100644 --- a/src/main.rs +++ b/src/main.rs @@ -167,6 +167,7 @@ fn setup_rocket() -> rocket::Rocket { .figment() .extract() .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) .await .expect("config is valid"); diff --git a/src/server_server.rs b/src/server_server.rs index 64e0a05..6907e34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,4 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -7,7 +6,6 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ - device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -20,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -28,9 +25,12 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, + future::Future, net::{IpAddr, SocketAddr}, + pin::Pin, + result::Result as StdResult, sync::Arc, time::{Duration, SystemTime}, }; @@ -575,6 +575,26 @@ pub async fn send_transaction_message_route<'a>( // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + continue; + } + }; + if !db.rooms.exists(&room_id)? { + resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); + continue; + } + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { @@ -583,20 +603,12 @@ pub async fn send_transaction_message_route<'a>( for key in entity.keys() { // TODO: save this in a DB maybe... // fetch the public signing key - let res = db - .sending - .send_federation_request( - &db.globals, - <&ServerName>::try_from(key.as_str()).unwrap(), - get_server_keys::v2::Request::new(), - ) - .await?; + let origin = <&ServerName>::try_from(key.as_str()).unwrap(); + let keys = fetch_signing_keys(&db, origin).await?; pub_key_map.insert( - res.server_key.server_name.to_string(), - res.server_key - .verify_keys - .into_iter() + origin.to_string(), + keys.into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(), ); @@ -615,10 +627,31 @@ pub async fn send_transaction_message_route<'a>( continue; } - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = match signature_and_hash_check(&pub_key_map, value) { + // TODO: make this persist but not a DB Tree... + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) + // like structure so that once an auth event is known it would know (using indexes maybe) all of + // the auth events that it references. + let mut auth_cache = EventMap::new(); + + // 1. check the server is in the room (optional) + // 2. check content hash, redact if doesn't match + // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. persist this event as an outlier + // 7. if not timeline event: stop + let pdu = match validate_event( + &db, + value, + event_id.clone(), + &pub_key_map, + server_name, + // All the auth events gathered will be here + &mut auth_cache, + ) + .await + { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -626,59 +659,31 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = match serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - Ok(pdu) => pdu, - Err(_) => { - resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); - continue; - } - }; + let pdu = Arc::new(pdu.clone()); - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - - let event = Arc::new(pdu.clone()); - dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + let previous = match fetch_events( + &db, + server_name, + &pub_key_map, + &pdu.prev_events, + &mut auth_cache, + ) + .await + { + Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), _ => None, }; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => { - resolved_map.insert( - pdu.event_id, - Err("Failed to recursively gather auth events".into()), - ); - continue; - } - }; - - let mut event_map: state_res::EventMap> = auth_events + let mut event_map: state_res::EventMap> = auth_cache .iter() - .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); // Check that the event passes auth based on the auth_events let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &pdu.auth_events .iter() @@ -696,9 +701,10 @@ pub async fn send_transaction_message_route<'a>( None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( - pdu.event_id, + pdu.event_id().clone(), Err("Event has failed auth check with auth events".into()), ); continue; @@ -720,7 +726,14 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events( + &db, + server_name, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, + ) + .await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -732,21 +745,26 @@ pub async fn send_transaction_message_route<'a>( let state = state .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); ( state, - fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) - .await? - .into_iter() - .map(Arc::new) - .collect(), + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await? + .into_iter() + .collect(), ) } Err(_) => { resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Fetching state for event failed".into()), ); continue; @@ -755,7 +773,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &state_at_event, None, // TODO: third party invite @@ -764,37 +782,21 @@ pub async fn send_transaction_message_route<'a>( { // Event failed auth with state_at resolved_map.insert( - pdu.event_id, + event_id, Err("Event has failed auth check with state at the event".into()), ); continue; } // End of step 5. - // The event could still be soft failed - append_state_soft(&db, &pdu)?; - // Gather the forward extremities and resolve - let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states: Vec>> = vec![]; - for id in &forward_extrems { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - fork_states.push(state); - } else { - todo!("we don't know of a pdu that is part of our known forks OOPS") + let fork_states = match forward_extremity_ids(&db, &pdu) { + Ok(states) => states, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; } - } + }; // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { @@ -803,19 +805,47 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { - let auth_events = fork_states - .iter() - .map(|map| { - db.rooms - .auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), + let mut auth_events = vec![]; + // this keeps track if we error so we can break out of these inner loops + // to continue on with the incoming PDU's + let mut failed = false; + for map in &fork_states { + let mut state_auth = vec![]; + for pdu in map.values() { + let event = match auth_cache.get(pdu.event_id()) { + Some(aev) => aev.clone(), + // We should know about every event at this point but just incase... + None => match fetch_events( + &db, + server_name, + &pub_key_map, + &[pdu.event_id().clone()], + &mut auth_cache, ) - .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) - }) - .collect::>>()?; + .await + .map(|mut vec| vec.remove(0)) + { + Ok(aev) => aev.clone(), + Err(_) => { + resolved_map.insert( + event_id.clone(), + Err("Event has been soft failed".into()), + ); + failed = true; + break; + } + }, + }; + state_auth.push(event); + } + if failed { + break; + } + auth_events.push(state_auth); + } + if failed { + continue; + } // Add everything we will need to event_map event_map.extend( @@ -862,74 +892,163 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous, &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { - // Soft fail + // Soft fail, we add the event as an outlier. resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Event has been soft failed".into()), ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id().clone(), Ok(())); + resolved_map.insert(pdu.event_id().clone(), Ok(())); } } Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } -async fn auth_each_event( - db: &Database, +/// Validate any event that is given to us by another server. +/// +/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). +/// 2. Passes signature checks, otherwise event is dropped. +/// 3. Passes hash checks, otherwise it is redacted before being processed further. +/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). +/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +fn validate_event<'a>( + db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &PublicKeyMap, - server_name: &ServerName, - auth_cache: EventMap>, -) -> std::result::Result { - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = signature_and_hash_check(&pub_key_map, value)?; + pub_key_map: &'a PublicKeyMap, + origin: &'a ServerName, + auth_cache: &'a mut EventMap>, +) -> Pin> + 'a + Send>> { + Box::pin(async move { + let mut val = signature_and_hash_check(&pub_key_map, value)?; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { - return Err("Room is unknown to this server".into()); - } + fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + .await + .map_err(|_| "Event failed auth chain check".to_string())?; - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + db.rooms + .append_pdu_outlier(pdu.event_id(), &pdu) + .map_err(|e| e.to_string())?; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => return Err("Failed to recursively gather auth events".into()), - }; - - Ok(pdu) + Ok(pdu) + }) } +/// Find the event and auth it. +/// +/// 1. Look in the main timeline (pduid_pdu tree) +/// 2. Look at outlier pdu tree +/// 3. Ask origin server over federation +/// 4. TODO: Ask other servers over federation? +async fn fetch_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + events: &[EventId], + auth_cache: &mut EventMap>, +) -> Result>> { + let mut pdus = vec![]; + for id in events { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db.rooms.get_pdu_outlier(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; + + Arc::new(pdu) + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }, + }; + pdus.push(pdu); + } + Ok(pdus) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, +) -> Result> { + match db.globals.signing_keys_for(origin)? { + keys if !keys.is_empty() => Ok(keys), + _ => { + let keys = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + } +} fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -954,122 +1073,29 @@ fn signature_and_hash_check( ) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have missing events it fails. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], -) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); +fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { + let mut fork_states = vec![]; + for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - let ev = match db.rooms.get_pdu(&ev_id)? { - Some(pdu) => pdu, - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &ev_id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map") - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, - }; - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) -} - -/// TODO: this needs to add events to the DB in a way that does not -/// effect the state of the room -async fn fetch_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - events: &[EventId], -) -> Result> { - let mut pdus = vec![]; - for id in events { - match db.rooms.get_pdu(id)? { - Some(pdu) => pdus.push(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - // TODO: add to our DB somehow? - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); - - pdus.push(pdu); - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + fork_states.push(state); + } else { + return Err(Error::Conflict( + "we don't know of a pdu that is part of our known forks OOPS", + )); } } - Ok(pdus) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - db.rooms.get_pdu_leaves(room_id) + Ok(fork_states) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -1078,9 +1104,12 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( - pdu, + &pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), @@ -1089,78 +1118,9 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { &db.admin, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // db.rooms.append_pdu( - // pdu, - // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - // count, - // pdu_id.clone().into(), - // &db.globals, - // &db.account_data, - // &db.admin, - // )?; - - Ok(()) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() -} - -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 4b2eb5ab82d6fc80aac2ab78a0c02d3add245743 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 11:05:57 -0500 Subject: [PATCH 038/103] Fix ALL clippy warnings --- src/client_server/account.rs | 78 ++++------------------ src/client_server/membership.rs | 40 ++--------- src/client_server/message.rs | 6 +- src/client_server/profile.rs | 12 +--- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 90 +++++-------------------- src/client_server/state.rs | 6 +- src/database/admin.rs | 6 +- src/database/rooms.rs | 113 ++++++++++++++------------------ src/database/sending.rs | 19 +++--- src/error.rs | 8 ++- src/server_server.rs | 4 +- 12 files changed, 106 insertions(+), 282 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 75544b7..6927a53 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -239,11 +239,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Make conduit bot join @@ -264,11 +260,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -302,11 +294,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.1 Join Rules @@ -323,11 +311,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -346,11 +330,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -367,11 +347,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 6. Events implied by name and topic @@ -390,11 +366,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( @@ -410,11 +382,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Room alias @@ -436,11 +404,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -463,11 +427,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -486,11 +446,7 @@ pub async fn register_route( }, &user_id, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Send welcome message @@ -515,11 +471,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -691,11 +643,7 @@ pub async fn deactivate_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 40e4183..70bb480 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,11 +124,7 @@ pub async fn leave_room_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -164,11 +160,7 @@ pub async fn invite_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -220,11 +212,7 @@ pub async fn kick_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -280,11 +268,7 @@ pub async fn ban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -332,11 +316,7 @@ pub async fn unban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -685,9 +665,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; if state_events.contains(ev_id) { @@ -717,11 +695,7 @@ async fn join_room_by_id_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c56cc94..c64c390 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -68,11 +68,7 @@ pub async fn send_message_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..21759a8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -63,11 +63,7 @@ pub async fn set_displayname_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update @@ -160,11 +156,7 @@ pub async fn set_avatar_url_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..282c35a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -31,11 +31,7 @@ pub async fn redact_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..631d87b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,11 +65,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Let the room creator join @@ -90,11 +86,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -135,11 +127,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4. Events set by preset @@ -175,11 +163,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -196,11 +180,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -225,11 +205,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 5. Events listed in initial_state @@ -248,11 +224,7 @@ pub async fn create_room_route( pdu_builder, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -273,11 +245,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -295,11 +263,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -322,11 +286,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -413,11 +373,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Get the old room federations status @@ -457,11 +413,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Join the new room @@ -482,11 +434,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Recommended transferable state events list from the specs @@ -519,11 +467,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -566,11 +510,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index faa415d..ae5e251 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,11 +284,7 @@ pub async fn send_state_event_for_key_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; Ok(event_id) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..501722e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -60,11 +60,7 @@ impl Admin { }, &conduit_user, &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, ) .unwrap(); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 81abd62..d62d4b0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::error; use regex::Regex; use ring::digest; @@ -447,9 +447,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, - globals: &super::globals::Globals, - account_data: &super::account_data::AccountData, - admin: &super::admin::Admin, + db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -486,7 +484,7 @@ impl Rooms { // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; self.pduid_pdu.insert( &pdu_id, @@ -521,8 +519,8 @@ impl Rooms { ) })?, &pdu.sender, - account_data, - globals, + &db.account_data, + &db.globals, )?; } } @@ -540,10 +538,10 @@ impl Rooms { self.tokenids.insert(key, &[])?; } - if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", globals.server_name()) + &format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), )? @@ -570,10 +568,11 @@ impl Rooms { ); match parsed_config { Ok(yaml) => { - admin.send(AdminCommand::RegisterAppservice(yaml)); + db.admin + .send(AdminCommand::RegisterAppservice(yaml)); } Err(e) => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( format!( "Could not parse appservice config: {}", @@ -584,7 +583,7 @@ impl Rooms { } } } else { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( "Expected code block in command body.", ), @@ -592,10 +591,10 @@ impl Rooms { } } "list_appservices" => { - admin.send(AdminCommand::ListAppservices); + db.admin.send(AdminCommand::ListAppservices); } _ => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( "Command: {}, Args: {:?}", command, args @@ -696,17 +695,12 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - globals: &super::globals::Globals, - sending: &super::sending::Sending, - admin: &super::admin::Admin, - account_data: &super::account_data::AccountData, - appservice: &super::appservice::Appservice, + db: &Database, ) -> Result { let PduBuilder { event_type, @@ -789,7 +783,7 @@ impl Rooms { if !match event_type { EventType::RoomEncryption => { // Only allow encryption events if it's allowed in the config - globals.allow_encryption() + db.globals.allow_encryption() } EventType::RoomMember => { let prev_event = self @@ -895,13 +889,13 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(globals.server_name()) + to_canonical_value(db.globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), + db.globals.server_name().as_str(), + db.globals.keypair(), &mut pdu_json, &RoomVersionId::Version6, ) @@ -922,24 +916,16 @@ impl Rooms { // Increment the last index and use that // This is also the next_batch/since value - let count = globals.next_count()?; + let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu( - &pdu, - pdu_json, - count, - pdu_id.clone().into(), - globals, - account_data, - admin, - )?; + self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -948,31 +934,28 @@ impl Rooms { for server in self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) + .filter(|server| &**server != db.globals.server_name()) { - sending.send_pdu(&server, &pdu_id)?; + db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") .and_then(|users| users.as_sequence()) - .map_or_else( - || Vec::new(), - |users| { - users - .iter() - .map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) - .filter_map(|o| o) - .collect::>() - }, - ); + .map_or_else(Vec::new, |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let aliases = namespaces .get("aliases") .and_then(|users| users.get("regex")) @@ -989,29 +972,31 @@ impl Rooms { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, globals.server_name()).ok() + UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - if bridge_user_id.map_or(false, |bridge_user_id| { - self.is_joined(&bridge_user_id, room_id).unwrap_or(false) - }) || users.iter().any(|users| { + let user_is_joined = + |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { + }; + let matching_aliases = |aliases: Regex| { room_aliases .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || self - .room_members(&room_id) - .filter_map(|r| r.ok()) - .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + }; + + if bridge_user_id.map_or(false, user_is_joined) + || users.iter().any(matching_users) + || aliases.map_or(false, matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } diff --git a/src/database/sending.rs b/src/database/sending.rs index e6cdc76..101daf3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -149,6 +149,15 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); + let exponential_backoff = |(tries, instant): &(u32, Instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }; if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() @@ -173,15 +182,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, |(tries, instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - instant.elapsed() < min_elapsed_duration - }) { + if last_failed_try.get(server).map_or(false, exponential_backoff) { return false; } diff --git a/src/error.rs b/src/error.rs index fed545c..13efce6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,10 +121,12 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); + let match_mod_path = + |path: &str| path.starts_with("conduit::") || path.starts_with("state"); + if self.enabled(record.metadata()) - && (record.module_path().map_or(false, |path| { - path.starts_with("conduit::") || path.starts_with("state") - }) || record + && (record.module_path().map_or(false, match_mod_path) + || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/server_server.rs b/src/server_server.rs index 6907e34..ae59583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1113,9 +1113,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time From db0aee3318b39d24ac37915ce49018117c0c03f2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 15:46:47 -0500 Subject: [PATCH 039/103] Use the auth_events for step 6, WIP forward_extremity_ids fn --- src/server_server.rs | 159 ++++++++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 55 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ae59583..77f0fa8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); - for pdu in &body.pdus { + 'main_pdu_loop: for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -660,7 +660,6 @@ pub async fn send_transaction_message_route<'a>( }; let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB let previous = match fetch_events( &db, @@ -675,6 +674,7 @@ pub async fn send_transaction_message_route<'a>( _ => None, }; + // [auth_cache] At this point we have the auth chain of the incoming event. let mut event_map: state_res::EventMap> = auth_cache .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -688,7 +688,7 @@ pub async fn send_transaction_message_route<'a>( &pdu.auth_events .iter() .map(|id| { - event_map + auth_cache .get(id) .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) .ok_or_else(|| { @@ -790,7 +790,15 @@ pub async fn send_transaction_message_route<'a>( // End of step 5. // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids(&db, &pdu) { + let fork_states = match forward_extremity_ids( + &db, + &pdu, + server_name, + &pub_key_map, + &mut auth_cache, + ) + .await + { Ok(states) => states, Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); @@ -805,47 +813,44 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // TODO: remove this is for current debugging Jan, 15 2021 + let mut number_fetches = 0_u32; let mut auth_events = vec![]; // this keeps track if we error so we can break out of these inner loops // to continue on with the incoming PDU's - let mut failed = false; for map in &fork_states { let mut state_auth = vec![]; - for pdu in map.values() { - let event = match auth_cache.get(pdu.event_id()) { + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), // We should know about every event at this point but just incase... None => match fetch_events( &db, server_name, &pub_key_map, - &[pdu.event_id().clone()], + &[auth_id.clone()], &mut auth_cache, ) .await - .map(|mut vec| vec.remove(0)) - { - Ok(aev) => aev.clone(), + .map(|mut vec| { + number_fetches += 1; + vec.remove(0) + }) { + Ok(aev) => aev, Err(_) => { resolved_map.insert( event_id.clone(), Err("Event has been soft failed".into()), ); - failed = true; - break; + continue 'main_pdu_loop; } }, }; state_auth.push(event); } - if failed { - break; - } auth_events.push(state_auth); } - if failed { - continue; - } + info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map event_map.extend( @@ -886,7 +891,13 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), - Err(e) => panic!("{:?}", e), + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("State resolution failed, either an event could not be found or deserialization".into()), + ); + continue 'main_pdu_loop; + } } }; @@ -914,6 +925,7 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). @@ -955,6 +967,37 @@ fn validate_event<'a>( }) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + /// Find the event and auth it. /// /// 1. Look in the main timeline (pduid_pdu tree) @@ -1000,36 +1043,6 @@ async fn fetch_events( Ok(pdus) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| vec.remove(0))?; - - stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); - } - Ok(()) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. async fn fetch_signing_keys( @@ -1049,6 +1062,7 @@ async fn fetch_signing_keys( } } } + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -1073,9 +1087,23 @@ fn signature_and_hash_check( ) } -fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { +async fn forward_extremity_ids( + db: &Database, + pdu: &PduEvent, + origin: &ServerName, + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + + for incoming_leaf in &pdu.prev_events { + if !current_leaves.contains(incoming_leaf) { + current_leaves.push(incoming_leaf.clone()); + } + } + let mut fork_states = vec![]; - for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1090,11 +1118,32 @@ fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result Date: Sat, 16 Jan 2021 16:37:20 -0500 Subject: [PATCH 040/103] Fixing the incoming events algorithm (review with time) --- src/database/rooms.rs | 2 +- src/server_server.rs | 237 ++++++++++++++++++++++++------------------ 2 files changed, 138 insertions(+), 101 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d62d4b0..325a2e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -150,7 +150,7 @@ impl Rooms { } } - /// Returns the last state hash key added to the db. + /// Returns the state hash for this pdu. pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } diff --git a/src/server_server.rs b/src/server_server.rs index 77f0fa8..0eb7d6f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,6 +5,7 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -590,6 +591,8 @@ pub async fn send_transaction_message_route<'a>( continue; } }; + + // 1. check the server is in the room (optional) if !db.rooms.exists(&room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); continue; @@ -634,14 +637,13 @@ pub async fn send_transaction_message_route<'a>( // the auth events that it references. let mut auth_cache = EventMap::new(); - // 1. check the server is in the room (optional) // 2. check content hash, redact if doesn't match // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 6. persist this event as an outlier // 7. if not timeline event: stop - let pdu = match validate_event( + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (pdu, previous) = match validate_event( &db, value, event_id.clone(), @@ -659,59 +661,16 @@ pub async fn send_transaction_message_route<'a>( } }; - let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events( - &db, - server_name, - &pub_key_map, - &pdu.prev_events, - &mut auth_cache, - ) - .await - { - Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), - _ => None, + let single_prev = if previous.len() == 1 { + previous.first().cloned() + } else { + None }; - // [auth_cache] At this point we have the auth chain of the incoming event. - let mut event_map: state_res::EventMap> = auth_cache - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); + // 6. persist the event as an outlier. + db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; - // Check that the event passes auth based on the auth_events - let is_authed = state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - Error::Conflict( - "Auth event not found, event failed recursive auth checks.", - ) - }) - }) - .collect::>>()?, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))?; - - if !is_authed { - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has failed auth check with auth events".into()), - ); - continue; - } - // End of step 4. - - // Step 5. event passes auth based on state at the event + // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -757,9 +716,7 @@ pub async fn send_transaction_message_route<'a>( &res.auth_chain_ids, &mut auth_cache, ) - .await? - .into_iter() - .collect(), + .await?, ) } Err(_) => { @@ -771,10 +728,11 @@ pub async fn send_transaction_message_route<'a>( } }; + // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous.clone(), + single_prev.clone(), &state_at_event, None, // TODO: third party invite ) @@ -787,10 +745,34 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - // End of step 5. + // End of step 10. + + // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(pdu.room_id())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + single_prev.clone(), + ¤t_state, + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail, we add the event as an outlier. + resolved_map.insert( + pdu.event_id().clone(), + Err("Event has been soft failed".into()), + ); + }; // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids( + let fork_states = match forward_extremities( &db, &pdu, server_name, @@ -806,7 +788,9 @@ pub async fn send_transaction_message_route<'a>( } }; - // Step 6. event passes auth based on state of all forks and current room state + // 13. start state-res with all previous forward extremities minus the ones that are in + // the prev_events of this event plus the new one created by this event and use + // the result as the new room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() @@ -852,6 +836,7 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); + let mut event_map = EventMap::new(); // Add everything we will need to event_map event_map.extend( auth_events @@ -904,7 +889,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous, + single_prev, &state_at_forks, None, ) @@ -925,14 +910,19 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// An async function that can recursively calls itself. +type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; + /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. Passes signature checks, otherwise event is dropped. -/// 3. Passes hash checks, otherwise it is redacted before being processed further. -/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). -/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +/// 2. check content hash, redact if doesn't match +/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events +/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" +/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. if not timeline event: stop +/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fn validate_event<'a>( db: &'a Database, value: CanonicalJsonObject, @@ -940,9 +930,24 @@ fn validate_event<'a>( pub_key_map: &'a PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> Pin> + 'a + Send>> { +) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { Box::pin(async move { - let mut val = signature_and_hash_check(&pub_key_map, value)?; + let mut val = + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value + } + } + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -959,11 +964,42 @@ fn validate_event<'a>( .await .map_err(|_| "Event failed auth chain check".to_string())?; - db.rooms - .append_pdu_outlier(pdu.event_id(), &pdu) + let pdu = Arc::new(pdu.clone()); + + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + .await .map_err(|e| e.to_string())?; - Ok(pdu) + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + if previous.len() == 1 { + previous.first().cloned() + } else { + None + }, + &pdu.auth_events + .iter() + .map(|id| { + auth_cache + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + }) + }) + .collect::, _>>()?, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed".to_string())?; + + if !is_authed { + return Err("Event has failed auth check with auth events".to_string()); + } + + Ok((pdu, previous)) }) } @@ -990,7 +1026,10 @@ async fn fetch_check_auth_events( let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await - .map(|mut vec| vec.remove(0))?; + .map(|mut vec| { + vec.pop() + .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) + })??; stack.extend(ev.auth_events()); auth_cache.insert(ev.event_id().clone(), ev); @@ -1028,11 +1067,12 @@ async fn fetch_events( { Ok(res) => { let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - Arc::new(pdu) + pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, @@ -1063,31 +1103,11 @@ async fn fetch_signing_keys( } } -fn signature_and_hash_check( - pub_key_map: &ruma::signatures::PublicKeyMap, - value: CanonicalJsonObject, -) -> std::result::Result { - Ok( - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - error!("CONTENT HASH FAILED"); - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } - Err(_e) => { - return Err("Signature verification failed".to_string()); - } - }, - ) -} - -async fn forward_extremity_ids( +/// Gather all state snapshots needed to resolve the current state of the room. +/// +/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) +async fn forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, @@ -1102,6 +1122,8 @@ async fn forward_extremity_ids( } } + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; let mut fork_states = vec![]; for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { @@ -1109,6 +1131,10 @@ async fn forward_extremity_ids( .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); + + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } let state = db .rooms .state_full(&pdu.room_id, &state_hash)? @@ -1144,6 +1170,17 @@ async fn forward_extremity_ids( } } + // This guarantees that our current room state is included + if !includes_current_state && current_hash.is_some() { + fork_states.push( + db.rooms + .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(), + ) + } + Ok(fork_states) } From 7309b2fba99b5c16704829a2729aad20f035ddc8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:08:59 -0500 Subject: [PATCH 041/103] WIP gather and update forward extremities --- src/database/rooms.rs | 18 ++++++ src/pdu.rs | 21 ++++++- src/server_server.rs | 138 ++++++++++++++++++++++++++++-------------- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 325a2e2..665e328 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,6 +397,24 @@ impl Rooms { Ok(events) } + /// Force an update to the leaves of a room. + pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + for event_id in event_ids.iter() { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/pdu.rs b/src/pdu.rs index 340ddee..e38410f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -284,6 +284,25 @@ impl state_res::Event for PduEvent { } } +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { + self.event_id == other.event_id + } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_id.partial_cmp(&other.event_id) + } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { + self.event_id.cmp(&other.event_id) + } +} + /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. diff --git a/src/server_server.rs b/src/server_server.rs index 0eb7d6f..16a1a8e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ - client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -25,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -600,31 +599,21 @@ pub async fn send_transaction_message_route<'a>( let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(sig) = value.get("signatures") { - match sig { - CanonicalJsonValue::Object(entity) => { - for key in entity.keys() { - // TODO: save this in a DB maybe... - // fetch the public signing key - let origin = <&ServerName>::try_from(key.as_str()).unwrap(); - let keys = fetch_signing_keys(&db, origin).await?; - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } - } - _ => { - resolved_map.insert( - event_id, - Err("`signatures` is not a JSON object".to_string()), - ); - continue; - } - } + if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { + let sender = + UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); + let origin = sender.server_name(); + + // TODO: this could fail or the server not respond... + let keys = fetch_signing_keys(&db, origin).await?; + + pub_key_map.insert( + origin.to_string(), + keys.into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); } else { resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); continue; @@ -642,8 +631,9 @@ pub async fn send_transaction_message_route<'a>( // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. if not timeline event: stop - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (pdu, previous) = match validate_event( + // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // the events found in step 8 can be authed/resolved and appended to the DB + let (pdu, previous): (_, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -670,6 +660,9 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all + // the checks in this list starting at 1. These are not timeline events. + // // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db @@ -771,8 +764,12 @@ pub async fn send_transaction_message_route<'a>( ); }; - // Gather the forward extremities and resolve - let fork_states = match forward_extremities( + // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res + // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) + // + // calculate_forward_extremities takes care of adding the current state if not already in the state sets + // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. + let (mut fork_states, fork_ids) = match calculate_forward_extremities( &db, &pdu, server_name, @@ -788,6 +785,12 @@ pub async fn send_transaction_message_route<'a>( } }; + // add the incoming events to the mix of state snapshots + // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets + fork_states.insert(state_at_event.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -901,7 +904,9 @@ pub async fn send_transaction_message_route<'a>( Err("Event has been soft failed".into()), ); } else { - append_state(&db, &pdu)?; + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_state(&db, &pdu, &fork_ids)?; + // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } @@ -1106,25 +1111,52 @@ async fn fetch_signing_keys( /// Gather all state snapshots needed to resolve the current state of the room. /// /// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) -async fn forward_extremities( +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). +/// +/// The state snapshot of the incoming event __needs__ to be added to the resulting list. +async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, -) -> Result>>> { +) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut is_incoming_leaf = true; + // Make sure the incoming event is not already a forward extremity + // FIXME: I think this could happen if different servers send us the same event?? + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + + // If the incoming event is already referenced by an existing event + // then do nothing - it's not a candidate to be a new extremity if + // it has been referenced. + if already_referenced(db, pdu)? { + is_incoming_leaf = false; + // This event has been dealt with already?? + } + + // TODO: + // [dendrite] Checks if any other leaves have been referenced and removes them + // but as long as we update the pdu leaves here and for events on our server this + // should not be possible. + + // Remove any forward extremities that are referenced by this incoming events prev_events for incoming_leaf in &pdu.prev_events { - if !current_leaves.contains(incoming_leaf) { - current_leaves.push(incoming_leaf.clone()); + if current_leaves.contains(incoming_leaf) { + if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { + current_leaves.remove(pos); + } } } let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; - let mut fork_states = vec![]; + let mut fork_states = BTreeSet::new(); for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db @@ -1142,8 +1174,10 @@ async fn forward_extremities( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - fork_states.push(state); + fork_states.insert(state); } else { + error!("Forward extremity not found... {}", id); + let res = db .sending .send_federation_request( @@ -1166,25 +1200,37 @@ async fn forward_extremities( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - fork_states.push(state); + fork_states.insert(state); } } + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.push( + fork_states.insert( db.rooms .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(), - ) + ); } - Ok(fork_states) + Ok((fork_states, dbg!(current_leaves))) } -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { +/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) +fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { + Ok(false) +} + +fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1195,13 +1241,17 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, + pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db, )?; + // If we update the room leaves after calling append_pdu it will stick since append_pdu + // calls replace_pdu_leaves with only the given event. + db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; + // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 835cf80acd6be7d99b5bdc83c7f891dc167901d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 11 Feb 2021 13:16:14 +0100 Subject: [PATCH 042/103] fix: pushers --- src/database/pusher.rs | 62 ++++++++++++++++++++++++++--------------- src/database/rooms.rs | 6 +++- src/database/sending.rs | 55 +++++++++++------------------------- src/server_server.rs | 3 +- 4 files changed, 63 insertions(+), 63 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index c4f5801..336ef57 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -35,7 +35,10 @@ impl PushData { } pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { + println!("CCCCCCCCCCCCCCCCCCCCCc"); + dbg!(&pusher); let mut key = sender.as_bytes().to_vec(); + key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); // There are 2 kinds of pushers but the spec says: null deletes the pusher. @@ -48,7 +51,7 @@ impl PushData { } self.senderkey_pusher.insert( - key, + dbg!(key), &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), )?; @@ -56,11 +59,16 @@ impl PushData { } pub fn get_pusher(&self, sender: &UserId) -> Result> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + self.senderkey_pusher - .scan_prefix(sender.as_bytes()) + .scan_prefix(dbg!(prefix)) .values() - .map(|push: std::result::Result| { - let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + .map(|push| { + println!("DDDDDDDDDDDDDDDDDDDDDDDDDD"); + let push = + dbg!(push).map_err(|_| Error::bad_database("Invalid push bytes in db."))?; Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) @@ -85,14 +93,17 @@ where Error::BadServerResponse("Invalid destination") })?; - let mut reqwest_request = reqwest::Request::try_from(http_request) + let reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); // TODO: we could keep this very short and let expo backoff do it's thing... - *reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; + let reqwest_response = globals + .reqwest_client() + .execute(dbg!(reqwest_request)) + .await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -154,6 +165,12 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { + if let Some(msgtype) = pdu.content.get("msgtype").and_then(|b| b.as_str()) { + if msgtype == "m.notice" { + return Ok(()); + } + } + for rule in ruleset.into_iter() { // TODO: can actions contain contradictory Actions if rule @@ -165,7 +182,7 @@ pub async fn send_push_notice( continue; } - match rule.rule_id.as_str() { + match dbg!(rule.rule_id.as_str()) { ".m.rule.master" => {} ".m.rule.suppress_notices" => { if pdu.kind == EventType::RoomMessage @@ -437,7 +454,8 @@ async fn send_notice( db: &Database, name: &str, ) -> Result<()> { - let (http, _emails): (Vec<&Pusher>, _) = pushers + println!("BBBBBBBBBBBBBBBr"); + let (http, _emails): (Vec<&Pusher>, _) = dbg!(pushers) .iter() .partition(|pusher| pusher.kind == Some(PusherKind::Http)); @@ -445,7 +463,7 @@ async fn send_notice( // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in http { + for pusher in dbg!(http) { let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); let url = if let Some(url) = pusher.data.url.as_ref() { url @@ -484,12 +502,12 @@ async fn send_notice( if event_id_only { error!("SEND PUSH NOTICE `{}`", name); - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -512,12 +530,12 @@ async fn send_notice( notifi.room_name = room_name.as_deref(); error!("SEND PUSH NOTICE Full `{}`", name); - // send_request( - // &db.globals, - // &url, - // send_event_notification::v1::Request::new(notifi), - // ) - // .await?; + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ac7d27d..0f02e33 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1358,6 +1358,7 @@ impl Rooms { self.alias_roomid .insert(alias.alias(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(aliasid, &*alias.alias())?; } else { @@ -1370,7 +1371,10 @@ impl Rooms { "Alias does not exist.", ))?; - for key in self.aliasid_alias.scan_prefix(room_id).keys() { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for key in self.aliasid_alias.scan_prefix(prefix).keys() { self.aliasid_alias.remove(key?)?; } } diff --git a/src/database/sending.rs b/src/database/sending.rs index ce81e8c..cbe9ffa 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,7 +1,7 @@ use std::{ collections::HashMap, convert::TryFrom, - fmt::{Debug, Display, Formatter}, + fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, }; @@ -25,16 +25,6 @@ pub enum OutgoingKind { Normal(Box), } -impl Display for OutgoingKind { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - OutgoingKind::Appservice(name) => f.write_str(name.as_str()), - OutgoingKind::Normal(name) => f.write_str(name.as_str()), - OutgoingKind::Push(_) => f.write_str("Push notification TODO"), - } - } -} - #[derive(Clone)] pub struct Sending { /// The state for a given state hash. @@ -143,7 +133,7 @@ impl Sending { } } Err((outgoing_kind, e)) => { - info!("Couldn't send transaction to {}\n{}", outgoing_kind, e); + info!("Couldn't send transaction to {:?}\n{}", outgoing_kind, e); let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(serv) => { let mut p = b"+".to_vec(); @@ -278,6 +268,8 @@ impl Sending { key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; + println!("AAAA"); + Ok(()) } @@ -306,7 +298,7 @@ impl Sending { pdu_ids: Vec, db: &Database, ) -> std::result::Result { - match kind { + match dbg!(kind) { OutgoingKind::Appservice(server) => { let pdu_jsons = pdu_ids .iter() @@ -364,25 +356,12 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - for pdu in &pdus { + for pdu in dbg!(&pdus) { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - // Skip events that came from the admin room - if db - .rooms - .room_aliases(&pdu.room_id) - .any(|alias| match alias { - Ok(a) => a.as_str().starts_with("#admins:"), - _ => false, - }) - || pdu.sender.as_str().starts_with("@conduit:") - { - continue; - } - for user in db.rooms.room_members(&pdu.room_id) { let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; @@ -391,9 +370,7 @@ impl Sending { continue; } - let pushers = db - .pusher - .get_pusher(&user) + let pushers = dbg!(db.pusher.get_pusher(&user)) .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; let rules_for_user = db @@ -426,15 +403,17 @@ impl Sending { uint!(0) }; - crate::database::pusher::send_push_notice( - &user, - unread, - &pushers, - rules_for_user, - pdu, - db, + dbg!( + crate::database::pusher::send_push_notice( + &user, + unread, + &pushers, + rules_for_user, + pdu, + db, + ) + .await ) - .await .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; } } diff --git a/src/server_server.rs b/src/server_server.rs index 03952eb..a8946a9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,6 +1,6 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -17,7 +17,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, From 66af1ff6958d2096d549b98d2c830a09652d2f33 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:36:44 -0500 Subject: [PATCH 043/103] Update ruma and rocket to latest git rev and tokio to 1.0 Ruma updated the event signing validation code and there was a dep resolving failure with serde rocket and tokio so I updated rocket latest and tokio 1.0 to fix. --- Cargo.lock | 453 ++++++++++++++++++++++++-------------- Cargo.toml | 6 +- src/client_server/sync.rs | 3 +- 3 files changed, 294 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a79dbe..0561d0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -148,15 +148,15 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "5a4bad0c5981acc24bc09e532f35160f952e35422603f0563cd7a73c2c2e65a0" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -164,6 +164,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cc" version = "1.0.66" @@ -191,7 +197,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -225,7 +231,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.0.2", "trust-dns-resolver", ] @@ -247,7 +253,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time 0.2.24", "version_check", ] @@ -402,9 +408,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "a3add2ec7727c9584a0ce75ee3c0f54f0ab692c7934450cc3a0287251e3a4f06" dependencies = [ "pear", "serde", @@ -472,9 +478,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -487,9 +493,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -497,15 +503,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -514,15 +520,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -532,24 +538,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -558,7 +564,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -593,7 +599,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -624,7 +630,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -632,8 +638,28 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.24", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.0.2", + "tokio-util 0.6.1", "tracing", "tracing-futures", ] @@ -655,9 +681,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -675,11 +701,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -690,7 +716,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] @@ -712,19 +748,43 @@ version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.4", "socket2", - "tokio", + "tokio 0.2.24", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.4", + "socket2", + "tokio 1.0.2", "tower-service", "tracing", "want", @@ -736,10 +796,10 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.9", "native-tls", - "tokio", + "tokio 0.2.24", "tokio-tls", ] @@ -897,15 +957,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -918,9 +978,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] @@ -1015,21 +1075,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1044,6 +1106,16 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -1073,6 +1145,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1210,7 +1291,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi 0.3.9", ] @@ -1272,11 +1353,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.4", ] [[package]] @@ -1292,9 +1373,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1390,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1360,9 +1441,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1416,9 +1497,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1488,6 +1569,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1495,7 +1585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] @@ -1521,9 +1611,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1533,9 +1623,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1553,13 +1643,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.9", "hyper-tls", "ipnet", "js-sys", @@ -1569,10 +1659,10 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.24", "tokio-tls", "url", "wasm-bindgen", @@ -1609,7 +1699,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1628,8 +1718,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "ubyte", "version_check", "yansi", @@ -1638,7 +1728,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1650,23 +1740,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.2", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "tokio-rustls", "uncased", "unicode-xid", @@ -1675,8 +1766,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "js_int", @@ -1693,8 +1784,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "http", "percent-encoding", @@ -1708,8 +1799,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1719,8 +1810,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "ruma-api", "ruma-common", @@ -1733,8 +1824,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "http", @@ -1752,8 +1843,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "maplit", @@ -1765,8 +1856,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-common", @@ -1779,8 +1870,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1790,8 +1881,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-api", @@ -1805,11 +1896,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "paste", - "rand 0.8.1", + "rand 0.8.2", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1819,8 +1910,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro2", "quote", @@ -1830,16 +1921,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" -dependencies = [ - "serde", -] +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" [[package]] name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "form_urlencoded", "itoa", @@ -1851,8 +1939,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1862,8 +1950,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "base64 0.13.0", "ring", @@ -1902,11 +1990,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log", "ring", "sct", @@ -1985,18 +2073,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -2088,9 +2176,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" @@ -2127,7 +2215,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" +source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" dependencies = [ "itertools", "maplit", @@ -2200,14 +2288,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.2", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] @@ -2234,29 +2322,28 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" dependencies = [ "lazy_static", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" dependencies = [ "const_fn", "libc", @@ -2311,28 +2398,41 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.11", + "slab", +] + +[[package]] +name = "tokio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.7", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.4", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2341,14 +2441,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.0.2", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "rustls", - "tokio", - "webpki", + "pin-project-lite 0.2.4", + "tokio 1.0.2", ] [[package]] @@ -2358,7 +2468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.24", ] [[package]] @@ -2367,12 +2477,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio", + "tokio 0.2.24", +] + +[[package]] +name = "tokio-util" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.0.2", + "tokio-stream", ] [[package]] @@ -2398,7 +2523,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -2449,7 +2574,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "url", ] @@ -2469,7 +2594,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "trust-dns-proto", ] @@ -2584,9 +2709,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" diff --git a/Cargo.toml b/Cargo.toml index fdcc4ec..dd37838 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.0.2", features = ["macros", "time"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 494c773..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -698,7 +698,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} From 88c60605b457d0163b0b8d427e51cd07b0dd1f4c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:41:38 -0500 Subject: [PATCH 044/103] Add ability to update room leaves with multiple eventIds Tokio seems a bit broken with Rocket... --- src/client_server/membership.rs | 2 ++ src/database/rooms.rs | 37 ++++++++++++++++----------------- src/server_server.rs | 7 ++----- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70bb480..1159185 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -665,6 +665,8 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + // TODO: can we simplify the DAG or should we copy it exactly?? + &pdu.prev_events, &db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 665e328..a3f3aab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,8 +397,11 @@ impl Rooms { Ok(events) } - /// Force an update to the leaves of a room. - pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// `prev_events`. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -415,21 +418,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room with a new event. - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; - } - - prefix.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; - - Ok(()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -465,6 +453,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, + leaves: &[EventId], db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state @@ -497,7 +486,7 @@ impl Rooms { // We no longer keep this pdu as an outlier self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + self.replace_pdu_leaves(&pdu.room_id, leaves)?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -943,7 +932,17 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; + // remove the + self.append_pdu( + &pdu, + pdu_json, + count, + pdu_id.clone().into(), + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + &[pdu.event_id.clone()], + db, + )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist diff --git a/src/server_server.rs b/src/server_server.rs index 16a1a8e..f782ad5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -1245,13 +1245,10 @@ fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> R utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + &new_room_leaves, &db, )?; - // If we update the room leaves after calling append_pdu it will stick since append_pdu - // calls replace_pdu_leaves with only the given event. - db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; - // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 602edfd8499726e21b51eaa4a4a8927381c876c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 045/103] feature: push rule settings --- src/client_server/room.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 631d87b..4adc335 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -220,12 +220,8 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_user, - &room_id, - &db, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; } // 6. Events implied by name and topic From 05a4c0b325f1b8f1c7d3d5dbb56ee22b6e8af858 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 24 Jan 2021 20:18:40 -0500 Subject: [PATCH 046/103] Finish forward extremity gathering, use resolved state as new snapshot --- src/server_server.rs | 147 +++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 67 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f782ad5..e733d24 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,6 +18,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -483,34 +484,6 @@ pub async fn get_public_rooms_route( .into()) } -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum PrevEvents { - Sequential(T), - Fork(Vec), -} - -impl IntoIterator for PrevEvents { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Sequential(item) => vec![item].into_iter(), - Self::Fork(list) => list.into_iter(), - } - } -} - -impl PrevEvents { - pub fn new(id: &[T]) -> Self { - match id { - [] => panic!("All events must have previous event"), - [single_id] => Self::Sequential(single_id.clone()), - rest => Self::Fork(rest.to_vec()), - } - } -} - #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -605,8 +578,16 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - // TODO: this could fail or the server not respond... - let keys = fetch_signing_keys(&db, origin).await?; + let keys = match fetch_signing_keys(&db, origin).await { + Ok(keys) => keys, + Err(_) => { + resolved_map.insert( + event_id, + Err("Could not find signing keys for this server".to_string()), + ); + continue; + } + }; pub_key_map.insert( origin.to_string(), @@ -769,11 +750,12 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, fork_ids) = match calculate_forward_extremities( + let (mut fork_states, extremities) = match calculate_forward_extremities( &db, &pdu, server_name, &pub_key_map, + current_state, &mut auth_cache, ) .await @@ -791,6 +773,7 @@ pub async fn send_transaction_message_route<'a>( let fork_states = fork_states.into_iter().collect::>(); + let mut update_state = false; // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -800,11 +783,12 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // We do need to force an update to this rooms state + update_state = true; + // TODO: remove this is for current debugging Jan, 15 2021 let mut number_fetches = 0_u32; let mut auth_events = vec![]; - // this keeps track if we error so we can break out of these inner loops - // to continue on with the incoming PDU's for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { @@ -821,14 +805,12 @@ pub async fn send_transaction_message_route<'a>( .await .map(|mut vec| { number_fetches += 1; - vec.remove(0) + vec.pop() }) { - Ok(aev) => aev, - Err(_) => { - resolved_map.insert( - event_id.clone(), - Err("Event has been soft failed".into()), - ); + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); continue 'main_pdu_loop; } }, @@ -839,20 +821,19 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); - let mut event_map = EventMap::new(); // Add everything we will need to event_map - event_map.extend( + auth_cache.extend( auth_events .iter() .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) .flatten(), ); - event_map.extend( + auth_cache.extend( incoming_auth_events .into_iter() .map(|pdu| (pdu.event_id().clone(), pdu)), ); - event_map.extend( + auth_cache.extend( state_at_event .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), @@ -873,7 +854,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut event_map, + &mut auth_cache, ) { Ok(res) => res .into_iter() @@ -905,14 +886,23 @@ pub async fn send_transaction_message_route<'a>( ); } else { // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_state(&db, &pdu, &fork_ids)?; + append_incoming_pdu( + &db, + &pdu, + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1029,6 +1019,7 @@ async fn fetch_check_auth_events( continue; } + // TODO: Batch these async calls so we can wait on multiple at once let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await .map(|mut vec| { @@ -1119,6 +1110,7 @@ async fn calculate_forward_extremities( pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, + current_state: BTreeMap<(EventType, Option), Arc>, auth_cache: &mut EventMap>, ) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; @@ -1126,17 +1118,13 @@ async fn calculate_forward_extremities( let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - is_incoming_leaf = false; - // Not sure what to do here - } - + // // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if already_referenced(db, pdu)? { + if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { is_incoming_leaf = false; - // This event has been dealt with already?? + // Not sure what to do here } // TODO: @@ -1213,29 +1201,54 @@ async fn calculate_forward_extremities( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.insert( - db.rooms - .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(), - ); + fork_states.insert(current_state); } Ok((fork_states, dbg!(current_leaves))) } -/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) -fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { - Ok(false) -} - -fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { +/// Update the room state to be the resolved state and add the fully auth'ed event +/// to the DB. +/// +/// TODO: If we force the state we need to validate all events in that state +/// any events we fetched from another server need to be fully verified? +fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: Option>>, +) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let new = state + .into_iter() + .map(|((ev, k), pdu)| { + Ok(( + ( + ev, + k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + db.rooms + .get_pdu_id(pdu.event_id()) + .ok() + .flatten() + .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? + .to_vec(), + )) + }) + .collect::>()?; + + info!("Force update of state for {:?}", pdu); + + db.rooms.force_state(pdu.room_id(), new, &db.globals)?; + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 68f60933e6af03889440a5f9c58d10ce67359f21 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:33:41 -0500 Subject: [PATCH 047/103] Resolved state is set as the current room state on incoming events --- src/server_server.rs | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e733d24..14a1d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -870,36 +870,20 @@ pub async fn send_transaction_message_route<'a>( } }; - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_incoming_pdu( + &db, &pdu, - single_prev, - &state_at_forks, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - } else { - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( - &db, - &pdu, - &extremities, - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; - // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); - } + // Event has passed all auth/stateres checks + resolved_map.insert(pdu.event_id().clone(), Ok(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -1210,8 +1194,7 @@ async fn calculate_forward_extremities( /// Update the room state to be the resolved state and add the fully auth'ed event /// to the DB. /// -/// TODO: If we force the state we need to validate all events in that state -/// any events we fetched from another server need to be fully verified? +/// TODO: Since all these events passed state resolution can we trust them to add fn append_incoming_pdu( db: &Database, pdu: &PduEvent, From e0453e2348b5a77ea9ac0b5e40296a303027875c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:50:45 -0500 Subject: [PATCH 048/103] Cleanup dbg prints and error messages --- src/server_server.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 14a1d0c..20d76f1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -496,7 +496,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - dbg!(&*body); + // dbg!(&*body); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -1148,8 +1148,6 @@ async fn calculate_forward_extremities( fork_states.insert(state); } else { - error!("Forward extremity not found... {}", id); - let res = db .sending .send_federation_request( @@ -1188,7 +1186,7 @@ async fn calculate_forward_extremities( fork_states.insert(current_state); } - Ok((fork_states, dbg!(current_leaves))) + Ok((fork_states, current_leaves)) } /// Update the room state to be the resolved state and add the fully auth'ed event From 6fd3e1d1ddb2d9707f1713e962ee350a85e07795 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 11:20:33 -0500 Subject: [PATCH 049/103] Append state event that pass resolution to DB, update to tokio 1.1 --- Cargo.lock | 630 +++++++++++++------------------------- Cargo.toml | 12 +- src/client_server/sync.rs | 2 +- src/database.rs | 3 +- src/database/globals.rs | 14 +- src/server_server.rs | 113 +++++-- 6 files changed, 298 insertions(+), 476 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0561d0a..c7381be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" @@ -48,6 +33,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.42" @@ -76,7 +82,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,32 +91,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.3", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -142,9 +128,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" [[package]] name = "bytemuck" @@ -158,12 +144,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -176,12 +156,6 @@ version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -211,7 +185,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -279,7 +253,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -288,7 +262,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const_fn", "crossbeam-utils", "lazy_static", @@ -303,10 +277,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -364,7 +344,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -391,7 +371,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -457,25 +437,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.12" @@ -564,7 +528,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -586,20 +550,20 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.1+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -612,12 +576,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" @@ -626,11 +584,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -696,7 +654,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -705,18 +663,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -744,11 +702,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -792,15 +750,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.9", + "bytes", + "hyper", "native-tls", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", ] [[package]] @@ -853,16 +811,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -873,7 +822,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -885,9 +834,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] @@ -900,18 +849,15 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -925,30 +871,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" -dependencies = [ - "base64 0.12.3", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -957,9 +879,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "linked-hash-map" @@ -978,11 +900,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1033,16 +955,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1052,35 +964,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.7.7" @@ -1089,21 +972,19 @@ checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ "libc", "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "socket2", + "winapi", ] [[package]] @@ -1135,14 +1016,12 @@ dependencies = [ ] [[package]] -name = "net2" -version = "0.2.37" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1216,12 +1095,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" - [[package]] name = "once_cell" version = "1.5.2" @@ -1235,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1288,12 +1161,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.1.57", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1382,12 +1255,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1415,7 +1282,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1497,9 +1364,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1542,7 +1409,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -1633,17 +1500,17 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", @@ -1656,14 +1523,13 @@ dependencies = [ "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "serde", "serde_urlencoded", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1693,7 +1559,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1752,7 +1618,7 @@ dependencies = [ "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "ref-cast", "smallvec", "state", @@ -1767,7 +1633,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "js_int", @@ -1785,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "http", "percent-encoding", @@ -1800,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1811,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "ruma-api", "ruma-common", @@ -1825,7 +1691,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "http", @@ -1844,7 +1710,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "maplit", @@ -1857,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-common", @@ -1871,7 +1737,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1882,7 +1748,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-api", @@ -1897,10 +1763,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "paste", - "rand 0.8.2", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1911,7 +1777,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro2", "quote", @@ -1922,12 +1788,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "form_urlencoded", "itoa", @@ -1940,7 +1806,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1951,9 +1817,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1967,18 +1833,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1994,7 +1854,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.13.0", + "base64", "log", "ring", "sct", @@ -2014,7 +1874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2073,18 +1933,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2186,9 +2046,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2215,15 +2075,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" +source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" dependencies = [ "itertools", + "log", "maplit", "ruma", "serde", "serde_json", "thiserror", - "tracing", ] [[package]] @@ -2277,9 +2137,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2292,12 +2152,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.2", + "rand 0.8.3", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2322,28 +2182,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.1.43" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2351,7 +2201,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2379,9 +2229,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2394,38 +2244,21 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.11", - "slab", -] - -[[package]] -name = "tokio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", - "mio 0.7.7", + "mio", "num_cpus", "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2439,6 +2272,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2457,46 +2300,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.0.2", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.24", + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" dependencies = [ - "bytes 0.5.6", + "async-stream", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "tokio-util" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" -dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.4", - "tokio 1.0.2", + "pin-project-lite", + "tokio", "tokio-stream", ] @@ -2511,9 +2331,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2521,24 +2341,11 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", + "cfg-if", + "pin-project-lite", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" version = "0.1.17" @@ -2560,18 +2367,22 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", "tokio 0.2.24", @@ -2580,17 +2391,17 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -2622,15 +2433,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2709,17 +2511,17 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2727,9 +2529,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2742,11 +2544,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2754,9 +2556,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2764,9 +2566,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2777,15 +2579,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2803,9 +2605,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2813,12 +2615,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2829,12 +2625,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2853,7 +2643,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2862,17 +2652,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index dd37838..de6a966 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,24 +18,24 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.0.2", features = ["macros", "time"] } +tokio = { version = "1.1.0", features = ["macros", "time", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" @@ -50,7 +50,7 @@ rand = "0.7.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.0" # Used for conduit::Error type thiserror = "1.0.22" # Used to generate thumbnails for images @@ -60,7 +60,7 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices regex = "1.4.2" # jwt jsonwebtokens diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6cd518d..97b6ad2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay => {} + _ = &mut delay, if delay.is_elapsed() => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index ea65d6f..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -106,8 +106,7 @@ impl Database { db.open_tree("global")?, db.open_tree("servertimeout_signingkey")?, config, - ) - .await?, + )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 7eb162b..2ed6a9f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,11 +27,7 @@ pub struct Globals { } impl Globals { - pub async fn load( - globals: sled::Tree, - server_keys: sled::Tree, - config: Config, - ) -> Result { + pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -83,11 +79,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, servertimeout_signingkey: server_keys, diff --git a/src/server_server.rs b/src/server_server.rs index 20d76f1..adf3c58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -25,7 +25,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryFrom, fmt::Debug, future::Future, @@ -839,7 +839,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - match state_res::StateResolution::resolve( + let res = match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, &fork_states @@ -856,10 +856,7 @@ pub async fn send_transaction_message_route<'a>( .collect(), &mut auth_cache, ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), + Ok(res) => res, Err(_) => { resolved_map.insert( pdu.event_id().clone(), @@ -867,7 +864,29 @@ pub async fn send_transaction_message_route<'a>( ); continue 'main_pdu_loop; } + }; + let mut resolved = BTreeMap::new(); + for (k, id) in res { + // We should know of the event but just incase + let pdu = match auth_cache.get(&id) { + Some(pdu) => pdu.clone(), + None => { + match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) + .await + .map(|mut vec| vec.pop()) + { + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); + continue 'main_pdu_loop; + } + } + } + }; + resolved.insert(k, pdu); } + resolved }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). @@ -1199,37 +1218,67 @@ fn append_incoming_pdu( new_room_leaves: &[EventId], state: Option>>, ) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid.to_vec(), + ); + } + None => { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // TODO: can we use are current state if we just add this event to the end of our + // pduid_pdu tree?? + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + + db.rooms.append_pdu( + &*pdu, + utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &new_room_leaves, + &db, + )?; + // TODO: is this ok... + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pdu_id.to_vec(), + ); + } + } + } + + info!("Force update of state for {:?}", pdu); + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + } + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let new = state - .into_iter() - .map(|((ev, k), pdu)| { - Ok(( - ( - ev, - k.ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - db.rooms - .get_pdu_id(pdu.event_id()) - .ok() - .flatten() - .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? - .to_vec(), - )) - }) - .collect::>()?; - - info!("Force update of state for {:?}", pdu); - - db.rooms.force_state(pdu.room_id(), new, &db.globals)?; - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 6661de50ab5069838f60893afea9a421f6f034e3 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 21:45:33 -0500 Subject: [PATCH 050/103] Fix and integrate outlier tree, build forks after adding event to DB --- src/database.rs | 2 +- src/database/rooms.rs | 95 +++++++----- src/server_server.rs | 334 +++++++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 168 deletions(-) diff --git a/src/database.rs b/src/database.rs index b841ab9..a9cc362 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3f3aab..d459aee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,9 +27,10 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::Duration, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, sending::Sending}; /// The unique identifier of each state group. /// @@ -67,7 +68,7 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) pduid_outlierpdu: sled::Tree, } impl Rooms { @@ -85,13 +86,20 @@ impl Rooms { let mut pduid = room_id.as_bytes().to_vec(); pduid.push(0xff); pduid.extend_from_slice(&pduid_short?); - self.pduid_pdu.get(&pduid)?.map_or_else( - || Err(Error::bad_database("Failed to find PDU in state snapshot.")), - |b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }, - ) + match self.pduid_pdu.get(&pduid)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")), + None => self + .pduid_outlierpdu + .get(pduid)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + } }) .filter_map(|r| r.ok()) .map(|pdu| { @@ -137,12 +145,20 @@ impl Rooms { Ok::<_, Error>(Some(( pdu_id.clone().into(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + match self.pduid_pdu.get(&pdu_id)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + None => self + .pduid_outlierpdu + .get(pdu_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + }, ))) }) } else { @@ -307,9 +323,12 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) @@ -328,13 +347,17 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } + /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { @@ -420,23 +443,27 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { + self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) + } else { + Ok(None) + } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + + // we need to be able to find it by event_id + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let res = self - .eventid_outlierpdu + .pduid_outlierpdu .insert( - event_id.as_bytes(), + pdu_id, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -484,7 +511,9 @@ impl Rooms { } // We no longer keep this pdu as an outlier - self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { + self.pduid_outlierpdu.remove(id)?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index adf3c58..ad0a1a4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -614,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (_, Vec>) = match validate_event( + let (pdu, previous): (Arc, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -638,69 +638,75 @@ pub async fn send_transaction_message_route<'a>( None }; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, + let (mut state_at_event, incoming_auth_events): ( + StateMap>, + Vec>, + ) = match db + .sending + .send_federation_request( + &db.globals, + server_name, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events( + &db, server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, ) - .await - { - Ok(res) => { - let state = fetch_events( + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( &db, server_name, &pub_key_map, - &res.pdu_ids, + &res.auth_chain_ids, &mut auth_cache, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -750,12 +756,25 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, extremities) = match calculate_forward_extremities( + let extremities = match calculate_forward_extremities(&db, &pdu).await { + Ok(fork_ids) => fork_ids, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; + + // Now that the event has passed all auth it is added into the timeline, we do have to + // find the leaves otherwise we would do this sooner + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + let mut fork_states = match build_forward_extremity_snapshots( &db, - &pdu, + pdu.room_id(), server_name, - &pub_key_map, current_state, + &extremities, + &pub_key_map, &mut auth_cache, ) .await @@ -767,6 +786,9 @@ pub async fn send_transaction_message_route<'a>( } }; + // Make this the state after (since we appended_incoming_pdu this should agree with our servers + // current state). + state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // add the incoming events to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets fork_states.insert(state_at_event.clone()); @@ -840,7 +862,7 @@ pub async fn send_transaction_message_route<'a>( ); let res = match state_res::StateResolution::resolve( - &pdu.room_id, + pdu.room_id(), &RoomVersionId::Version6, &fork_states .into_iter() @@ -865,6 +887,7 @@ pub async fn send_transaction_message_route<'a>( continue 'main_pdu_loop; } }; + let mut resolved = BTreeMap::new(); for (k, id) in res { // We should know of the event but just incase @@ -890,10 +913,9 @@ pub async fn send_transaction_message_route<'a>( }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( + update_resolved_state( &db, - &pdu, - &extremities, + pdu.room_id(), if update_state { Some(state_at_forks) } else { @@ -905,7 +927,10 @@ pub async fn send_transaction_message_route<'a>( resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { + pdus: dbg!(resolved_map), + } + .into()) } /// An async function that can recursively calls itself. @@ -1036,13 +1061,14 @@ async fn fetch_check_auth_events( Ok(()) } -/// Find the event and auth it. +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. /// /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? -async fn fetch_events( +pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, key_map: &PublicKeyMap, @@ -1071,6 +1097,13 @@ async fn fetch_events( .await .map_err(|_| Error::Conflict("Authentication of event failed"))?; + // create the pduid for this event but stick it in the outliers DB + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1084,7 +1117,7 @@ async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -async fn fetch_signing_keys( +pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, ) -> Result> { @@ -1108,26 +1141,28 @@ async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. -async fn calculate_forward_extremities( +pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, - origin: &ServerName, - pub_key_map: &PublicKeyMap, - current_state: BTreeMap<(EventType, Option), Arc>, - auth_cache: &mut EventMap>, -) -> Result<(BTreeSet>>, Vec)> { +) -> Result> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - // + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = false; - // Not sure what to do here + // + // We first check if know of the event and then don't include it as a forward + // extremity if it is a timeline event + if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); } // TODO: @@ -1144,11 +1179,34 @@ async fn calculate_forward_extremities( } } - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + + Ok(current_leaves) +} + +/// This should always be called after the incoming event has been appended to the DB. +/// +/// This guarentees that the incoming event will be in the state sets (at least our servers +/// and the sending server). +pub(crate) async fn build_forward_extremity_snapshots( + db: &Database, + room_id: &RoomId, + origin: &ServerName, + current_state: StateMap>, + current_leaves: &[EventId], + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let current_hash = db.rooms.current_state_hash(room_id)?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); - for id in ¤t_leaves { + for id in current_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1158,14 +1216,21 @@ async fn calculate_forward_extremities( if current_hash.as_ref() == Some(&state_hash) { includes_current_state = true; } - let state = db + + let mut state_before = db .rooms - .state_full(&pdu.room_id, &state_hash)? + .state_full(room_id, &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(state); + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } else { let res = db .sending @@ -1173,7 +1238,7 @@ async fn calculate_forward_extremities( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: pdu.room_id(), + room_id, event_id: id, }, ) @@ -1181,41 +1246,38 @@ async fn calculate_forward_extremities( // TODO: This only adds events to the auth_cache, there is for sure a better way to // do this... - fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?; + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache) + let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) .await? .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); + .collect::>(); - fork_states.insert(state); + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } } - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { fork_states.insert(current_state); } - Ok((fork_states, current_leaves)) + Ok(fork_states) } -/// Update the room state to be the resolved state and add the fully auth'ed event -/// to the DB. -/// -/// TODO: Since all these events passed state resolution can we trust them to add -fn append_incoming_pdu( +pub(crate) fn update_resolved_state( db: &Database, - pdu: &PduEvent, - new_room_leaves: &[EventId], + room_id: &RoomId, state: Option>>, ) -> Result<()> { // Update the state of the room if needed @@ -1236,44 +1298,50 @@ fn append_incoming_pdu( ); } None => { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // TODO: can we use are current state if we just add this event to the end of our - // pduid_pdu tree?? - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &*pdu, - utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &new_room_leaves, - &db, - )?; - // TODO: is this ok... - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pdu_id.to_vec(), - ); + error!("We didn't append an event as an outlier\n{:?}", pdu); } } } - info!("Force update of state for {:?}", pdu); - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; + db.rooms.force_state(room_id, new_state, &db.globals)?; } + Ok(()) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +pub(crate) fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: &StateMap>, +) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + pduid.to_vec(), + ); + } + None => { + error!("We didn't append an event as an outlier\n{:?}", pdu); + } + } + } + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1281,7 +1349,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1292,9 +1360,7 @@ fn append_incoming_pdu( &db, )?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + db.rooms.set_room_state(pdu.room_id(), &state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From e09be2f7ee31a97b615a86e5bdae8ac75ec93ff6 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 30 Jan 2021 12:43:43 -0500 Subject: [PATCH 051/103] Add incoming event to the current room state then resolve All state snapshots that are used in the resolve call are state after snapshots, they have the event inserted. --- src/server_server.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ad0a1a4..f55b377 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -768,9 +768,11 @@ pub async fn send_transaction_message_route<'a>( // find the leaves otherwise we would do this sooner append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + // This will create the state after any state snapshot it builds + // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( &db, - pdu.room_id(), + pdu.clone(), server_name, current_state, &extremities, @@ -1195,14 +1197,14 @@ pub(crate) async fn calculate_forward_extremities( /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, - room_id: &RoomId, + pdu: Arc, origin: &ServerName, - current_state: StateMap>, + mut current_state: StateMap>, current_leaves: &[EventId], pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, ) -> Result>>> { - let current_hash = db.rooms.current_state_hash(room_id)?; + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); @@ -1219,7 +1221,7 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut state_before = db .rooms - .state_full(room_id, &state_hash)? + .state_full(pdu.room_id(), &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); @@ -1238,7 +1240,7 @@ pub(crate) async fn build_forward_extremity_snapshots( &db.globals, origin, get_room_state_ids::v1::Request { - room_id, + room_id: pdu.room_id(), event_id: id, }, ) @@ -1269,6 +1271,9 @@ pub(crate) async fn build_forward_extremity_snapshots( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { + error!("Did not include current state"); + current_state.insert((pdu.kind(), pdu.state_key()), pdu); + fork_states.insert(current_state); } From 64374b4679f6b63dc36d3da7ab5d58753c1980d8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 12:44:30 -0500 Subject: [PATCH 052/103] Use eventId when saving outliers --- src/client_server/sync.rs | 2 +- src/database.rs | 2 +- src/database/rooms.rs | 76 +++++++++++++++++++++++++-------------- src/server_server.rs | 69 +++++++++++++++++------------------ 4 files changed, 84 insertions(+), 65 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 97b6ad2..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay, if delay.is_elapsed() => {} + _ = &mut delay => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index a9cc362..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d459aee..ee8f0ab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,10 +27,9 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::Duration, }; -use super::{admin::AdminCommand, sending::Sending}; +use super::admin::AdminCommand; /// The unique identifier of each state group. /// @@ -67,13 +66,16 @@ pub struct Rooms { pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) - /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) pduid_outlierpdu: sled::Tree, + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + /// + /// TODO: Should this check for outliers, it does now. pub fn state_full( &self, room_id: &RoomId, @@ -90,7 +92,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -118,6 +120,8 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + /// + /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -149,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -260,6 +264,8 @@ impl Rooms { } }; + // Because of outliers this could also be an eventID but that + // is handled by `state_full` let pdu_id_short = pdu_id .splitn(2, |&b| b == 0xff) .nth(1) @@ -325,9 +331,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -342,6 +351,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -349,9 +360,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -443,27 +457,34 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { - self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) - } else { - Ok(None) - } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { + log::info!( + "Number of outlier pdu's {:#?}", + self.eventid_outlierpdu + .iter() + .map(|pair| { + let (_k, v) = pair.unwrap(); + serde_json::from_slice::(&v).unwrap() + }) + .collect::>() + ); - // we need to be able to find it by event_id - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); let res = self - .pduid_outlierpdu + .eventid_outlierpdu .insert( - pdu_id, + &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -511,9 +532,10 @@ impl Rooms { } // We no longer keep this pdu as an outlier - if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { - self.pduid_outlierpdu.remove(id)?; - } + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.eventid_outlierpdu.remove(key)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index f55b377..5177f96 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -638,12 +637,8 @@ pub async fn send_transaction_message_route<'a>( None }; - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; + db.rooms.append_pdu_outlier(&pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -1079,37 +1074,28 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { + // `get_pdu` checks the outliers tree for us let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => Arc::new(pdu), - None => match db.rooms.get_pdu_outlier(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - // create the pduid for this event but stick it in the outliers DB - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; pdus.push(pdu); @@ -1193,7 +1179,7 @@ pub(crate) async fn calculate_forward_extremities( /// This should always be called after the incoming event has been appended to the DB. /// -/// This guarentees that the incoming event will be in the state sets (at least our servers +/// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, @@ -1303,7 +1289,18 @@ pub(crate) fn update_resolved_state( ); } None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); + let mut pduid = pdu.room_id().as_bytes().to_vec(); + pduid.push(0xff); + pduid.extend_from_slice(pdu.event_id().as_bytes()); + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid, + ); } } } From 591769d5f3fde6314ba84130898dc6202b9b5c98 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 17:02:56 -0500 Subject: [PATCH 053/103] Fiter PDU's before main incoming PDU loop --- src/database/rooms.rs | 11 +---- src/server_server.rs | 103 ++++++++++++++++++++++++++---------------- 2 files changed, 66 insertions(+), 48 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ee8f0ab..6ee29a6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -466,16 +466,7 @@ impl Rooms { /// Returns true if the event_id was previously inserted. pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!( - "Number of outlier pdu's {:#?}", - self.eventid_outlierpdu - .iter() - .map(|pair| { - let (_k, v) = pair.unwrap(); - serde_json::from_slice::(&v).unwrap() - }) - .collect::>() - ); + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 5177f96..2cfbc6e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -533,6 +533,54 @@ pub async fn send_transaction_message_route<'a>( } } + let mut resolved_map = BTreeMap::new(); + + let pdus_to_resolve = body + .pdus + .iter() + .filter_map(|pdu| { + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + return None; + } + }; + + // 1. check the server is in the room (optional) + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".to_string())); + return None; + } + } + + // If we know of this pdu we don't need to continue processing it + // + // This check is essentially + if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { + return None; + } + + Some((event_id, value)) + }) + .collect::>(); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. @@ -540,35 +588,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - let mut resolved_map = BTreeMap::new(); - 'main_pdu_loop: for pdu in &body.pdus { - // 1. Is a valid event, otherwise it is dropped. - // Ruma/PduEvent/StateEvent satisfies this - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); - - // If we have no idea about this room skip the PDU - let room_id = match value - .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() - { - Some(id) => id, - None => { - resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); - continue; - } - }; - - // 1. check the server is in the room (optional) - if !db.rooms.exists(&room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); - continue; - } - + 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -921,13 +941,13 @@ pub async fn send_transaction_message_route<'a>( )?; // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { - pdus: dbg!(resolved_map), + if !resolved_map.is_empty() { + warn!("These PDU's failed {:?}", resolved_map); } - .into()) + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1139,6 +1159,7 @@ pub(crate) async fn calculate_forward_extremities( // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? if current_leaves.contains(pdu.event_id()) { + error!("The incoming event is already present in get_pdu_leaves BUG"); is_incoming_leaf = false; // Not sure what to do here } @@ -1147,11 +1168,12 @@ pub(crate) async fn calculate_forward_extremities( // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. // - // We first check if know of the event and then don't include it as a forward - // extremity if it is a timeline event - if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - } + // We check this in the filter just before the main incoming PDU for loop + // so no already known event can make it this far. + // + // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); + // } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1219,7 +1241,12 @@ pub(crate) async fn build_forward_extremity_snapshots( } fork_states.insert(state_before); + } else if id == pdu.event_id() { + // We add this snapshot after `build_forward_extremity_snapshots` is + // called which we requested from the sending server } else { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); + let res = db .sending .send_federation_request( From 74d530ae0eff76bbdd7a130cd17a645b5455676f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 20:00:01 -0500 Subject: [PATCH 054/103] Address review issues, fix forward extremity calc Keep track of all prev_events since if we know that an event is a prev_event it is referenced and does not qualify as a forward extremity. --- src/client_server/push.rs | 5 +- src/database.rs | 3 +- src/database/globals.rs | 8 +- src/database/rooms.rs | 79 +++++---- src/server_server.rs | 341 ++++++++++++++++++-------------------- 5 files changed, 220 insertions(+), 216 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..7c3e9d9 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -93,7 +93,10 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )) } } diff --git a/src/database.rs b/src/database.rs index b841ab9..3fb8442 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + prevevent_parent: db.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 2ed6a9f..00b4568 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,7 +27,11 @@ pub struct Globals { } impl Globals { - pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { + pub fn load( + globals: sled::Tree, + servertimeout_signingkey: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -84,7 +88,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, - servertimeout_signingkey: server_keys, + servertimeout_signingkey, }) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6ee29a6..abe8c65 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -68,7 +68,9 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. + pub(super) prevevent_parent: sled::Tree, } impl Rooms { @@ -92,7 +94,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -120,8 +122,6 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - /// - /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -153,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -203,7 +203,7 @@ impl Rooms { &event_type, &state_key .as_deref() - .expect("found a non state event in auth events"), + .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); } @@ -248,7 +248,7 @@ impl Rooms { let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), pdu_id) in state { + for ((event_type, state_key), id_long) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -266,7 +266,7 @@ impl Rooms { // Because of outliers this could also be an eventID but that // is handled by `state_full` - let pdu_id_short = pdu_id + let pdu_id_short = id_long .splitn(2, |&b| b == 0xff) .nth(1) .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; @@ -332,7 +332,7 @@ impl Rooms { serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(event_id.as_bytes())? .ok_or_else(|| { Error::bad_database("Event is not in pdu tree or outliers.") @@ -360,12 +360,10 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self - .eventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { + Some(b) => b, + None => return Ok(None), + }, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -373,6 +371,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -436,7 +436,7 @@ impl Rooms { /// Replace the leaves of a room. /// - /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); @@ -455,31 +455,42 @@ impl Rooms { Ok(()) } + pub fn is_pdu_referenced(&self, pdu: &PduEvent) -> Result { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.prevevent_parent.contains_key(key).map_err(Into::into) + } + /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu + self.roomeventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) } - /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + /// Append the PDU as an outlier. + /// + /// Any event given to this will be processed (state-res) on another thread. + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + log::info!( + "Number of outlier pdu's {}", + self.roomeventid_outlierpdu.len() + ); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - let res = self - .eventid_outlierpdu - .insert( - &key, - &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), - ) - .map(|op| op.is_some())?; - Ok(res) + self.eventid_pduid + .insert(pdu.event_id().as_bytes(), key.as_slice())?; + + self.roomeventid_outlierpdu.insert( + &key, + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + )?; + Ok(()) } /// Creates a new persisted data unit and adds it to a room. @@ -526,7 +537,15 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.eventid_outlierpdu.remove(key)?; + self.roomeventid_outlierpdu.remove(key)?; + + // We must keep track of all events that have been referenced. + for leaf in leaves { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(leaf.as_bytes()); + self.prevevent_parent + .insert(key, pdu.event_id().as_bytes())?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; @@ -541,6 +560,8 @@ impl Rooms { .expect("CanonicalJsonObject is always a valid String"), )?; + // This also replaces the eventid of any outliers with the correct + // pduid, removing the place holder. self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; diff --git a/src/server_server.rs b/src/server_server.rs index 2cfbc6e..48d5956 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -571,8 +571,6 @@ pub async fn send_transaction_message_route<'a>( } // If we know of this pdu we don't need to continue processing it - // - // This check is essentially if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { return None; } @@ -664,64 +662,66 @@ pub async fn send_transaction_message_route<'a>( // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (mut state_at_event, incoming_auth_events): ( - StateMap>, - Vec>, - ) = match db - .sending - .send_federation_request( - &db.globals, - server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = fetch_events( - &db, + // + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, server_name, - &pub_key_map, - &res.pdu_ids, - &mut auth_cache, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( + .await + { + Ok(res) => { + let state = fetch_events( &db, server_name, &pub_key_map, - &res.auth_chain_ids, + &res.pdu_ids, &mut auth_cache, ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + error!("Server sent us an invalid state"); + continue; + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -764,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( pdu.event_id().clone(), Err("Event has been soft failed".into()), ); + continue; }; // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res @@ -779,10 +780,6 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that the event has passed all auth it is added into the timeline, we do have to - // find the leaves otherwise we would do this sooner - append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - // This will create the state after any state snapshot it builds // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( @@ -805,10 +802,11 @@ pub async fn send_transaction_message_route<'a>( // Make this the state after (since we appended_incoming_pdu this should agree with our servers // current state). - state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); - // add the incoming events to the mix of state snapshots + let mut state_after = state_at_event.clone(); + state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); + // Add the incoming event to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets - fork_states.insert(state_at_event.clone()); + fork_states.insert(state_after.clone()); let fork_states = fork_states.into_iter().collect::>(); @@ -826,39 +824,27 @@ pub async fn send_transaction_message_route<'a>( update_state = true; // TODO: remove this is for current debugging Jan, 15 2021 - let mut number_fetches = 0_u32; let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), - // We should know about every event at this point but just incase... - None => match fetch_events( - &db, - server_name, - &pub_key_map, - &[auth_id.clone()], - &mut auth_cache, - ) - .await - .map(|mut vec| { - number_fetches += 1; - vec.pop() - }) { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - }, + // The only events that haven't been added to the auth cache are + // events we have knowledge of previously + None => { + error!("Event was not present in auth_cache {}", auth_id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; + } }; state_auth.push(event); } auth_events.push(state_auth); } - info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map auth_cache.extend( @@ -873,7 +859,7 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| (pdu.event_id().clone(), pdu)), ); auth_cache.extend( - state_at_event + state_after .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); @@ -911,17 +897,12 @@ pub async fn send_transaction_message_route<'a>( let pdu = match auth_cache.get(&id) { Some(pdu) => pdu.clone(), None => { - match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) - .await - .map(|mut vec| vec.pop()) - { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - } + error!("Event was not present in auth_cache {}", id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; } }; resolved.insert(k, pdu); @@ -929,7 +910,12 @@ pub async fn send_transaction_message_route<'a>( resolved }; - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + // Set the new room state to the resolved state update_resolved_state( &db, pdu.room_id(), @@ -1046,8 +1032,6 @@ fn validate_event<'a>( /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// The check in `fetch_check_auth_events` is that a complete chain is found for the /// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. async fn fetch_check_auth_events( db: &Database, origin: &ServerName, @@ -1073,7 +1057,6 @@ async fn fetch_check_auth_events( })??; stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); } Ok(()) } @@ -1085,6 +1068,9 @@ async fn fetch_check_auth_events( /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? +/// +/// If the event is unknown to the `auth_cache` it is added. This guarantees that any +/// event we need to know of will be present. pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, @@ -1118,6 +1104,7 @@ pub(crate) async fn fetch_events( Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; + auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); pdus.push(pdu); } Ok(pdus) @@ -1167,13 +1154,9 @@ pub(crate) async fn calculate_forward_extremities( // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - // - // We check this in the filter just before the main incoming PDU for loop - // so no already known event can make it this far. - // - // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - // } + if db.rooms.is_pdu_referenced(pdu)? { + is_incoming_leaf = false; + } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1217,74 +1200,79 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); + match db.rooms.get_pdu_id(id)? { + // We can skip this because it is handled outside of this function + // The current server state and incoming event state are built to be + // the state after. + // This would be the incoming state from the server. + Some(_) if id == pdu.event_id() => {} + Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { + let state_hash = db + .rooms + .pdu_state_hash(&pduid)? + .expect("found pdu with no statehash"); - if current_hash.as_ref() == Some(&state_hash) { - includes_current_state = true; + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } + + let mut state_before = db + .rooms + .state_full(pdu.room_id(), &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect::>(); + + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&pduid)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } + _ => { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - let mut state_before = db - .rooms - .state_full(pdu.room_id(), &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect::>(); + let res = db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: id, + }, + ) + .await?; - // Now it's the state after - if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, Arc::new(pdu)); + // TODO: This only adds events to the auth_cache, there is for sure a better way to + // do this... + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; + + let mut state_before = + fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) + .await? + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect::>(); + + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } - - fork_states.insert(state_before); - } else if id == pdu.event_id() { - // We add this snapshot after `build_forward_extremity_snapshots` is - // called which we requested from the sending server - } else { - error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); } } // This guarantees that our current room state is included - if !includes_current_state && current_hash.is_some() { - error!("Did not include current state"); + if !includes_current_state { current_state.insert((pdu.kind(), pdu.state_key()), pdu); fork_states.insert(current_state); @@ -1316,18 +1304,7 @@ pub(crate) fn update_resolved_state( ); } None => { - let mut pduid = pdu.room_id().as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(pdu.event_id().as_bytes()); - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid, - ); + error!("We are missing a state event for the current room state."); } } } @@ -1349,9 +1326,9 @@ pub(crate) fn append_incoming_pdu( // Update the state of the room if needed // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); - for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { + for ((ev_type, state_k), state_pdu) in state { + match db.rooms.get_pdu_id(state_pdu.event_id())? { + Some(state_pduid) => { new_state.insert( ( ev_type.clone(), @@ -1359,12 +1336,10 @@ pub(crate) fn append_incoming_pdu( .clone() .ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - pduid.to_vec(), + state_pduid.to_vec(), ); } - None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); - } + None => error!("We are missing a state event for the incoming event snapshot"), } } From 48601142f8afe96042eec0bdade94056f4054a99 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 22:48:43 -0500 Subject: [PATCH 055/103] Use auth_cache to avoid db, save state for every event when joining --- src/client_server/membership.rs | 14 +++------ src/database.rs | 1 + src/database/rooms.rs | 55 ++++++++++++++++++++++++++++++++- src/server_server.rs | 53 ++++++++++++++++++------------- 4 files changed, 90 insertions(+), 33 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 1159185..99c0b62 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -643,8 +643,6 @@ async fn join_room_by_id_helper( ) .expect("iterative auth check failed on resolved events"); - let mut state = HashMap::new(); - // filter the events that failed the auth check keeping the remaining events // sorted correctly for ev_id in sorted_event_ids @@ -660,24 +658,20 @@ async fn join_room_by_id_helper( let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + + let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - // TODO: can we simplify the DAG or should we copy it exactly?? &pdu.prev_events, &db, )?; - if state_events.contains(ev_id) { - if let Some(key) = &pdu.state_key { - state.insert((pdu.kind(), key.to_string()), pdu_id); - } - } + db.rooms.set_room_state(room_id, &hash)?; } - - db.rooms.force_state(room_id, state, &db.globals)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database.rs b/src/database.rs index 3fb8442..35b7bcd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -161,6 +161,7 @@ impl Database { roomid_statehash: db.open_tree("roomid_statehash")?, roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, + roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index abe8c65..43d5f7d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -69,6 +69,10 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. + /// This allows us to skip any state snapshots that would for sure not have the outlier. + pub(super) roomeventid_outlierpducount: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, } @@ -323,6 +327,15 @@ impl Rooms { .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) } + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + self.pduid_pdu + .scan_prefix(room_id.as_bytes()) + .last() + .map(|b| self.pdu_count(&b?.0)) + .transpose() + .map(|op| op.unwrap_or_default()) + } + /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -490,6 +503,8 @@ impl Rooms { &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; + self.roomeventid_outlierpducount + .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; Ok(()) } @@ -537,7 +552,45 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.roomeventid_outlierpdu.remove(key)?; + if self.roomeventid_outlierpdu.remove(&key)?.is_some() { + if let Some(state_key) = pdu.state_key.as_deref() { + let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_short.get(&statekey)? { + Some(short) => utils::u64_from_bytes(&short).map_err(|_| { + Error::bad_database("Invalid short bytes in statekey_short.") + })?, + None => { + error!( + "This event has been inserted into the state snapshot tree previously." + ); + let short = db.globals.next_count()?; + self.statekey_short + .insert(&statekey, &short.to_be_bytes())?; + short + } + }; + + let mut start = pdu.room_id().as_bytes().to_vec(); + start.extend_from_slice( + &self + .roomeventid_outlierpducount + .get(&key)? + .unwrap_or_default(), + ); + for hash in self.pduid_statehash.range(start..).values() { + let mut hash = hash?.to_vec(); + hash.extend_from_slice(&short.to_be_bytes()); + + let _ = self.stateid_pduid.compare_and_swap( + hash, + Some(pdu.event_id().as_bytes()), + Some(pdu_id.as_ref()), + )?; + } + } + } // We must keep track of all events that have been referenced. for leaf in leaves { diff --git a/src/server_server.rs b/src/server_server.rs index 48d5956..780109c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -971,6 +971,7 @@ fn validate_event<'a>( } } Err(_e) => { + error!("{}", _e); return Err("Signature verification failed".to_string()); } }; @@ -988,7 +989,7 @@ fn validate_event<'a>( fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await - .map_err(|_| "Event failed auth chain check".to_string())?; + .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); @@ -1064,6 +1065,7 @@ async fn fetch_check_auth_events( /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// +/// 0. Look in the auth_cache /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation @@ -1080,28 +1082,35 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { - // `get_pdu` checks the outliers tree for us - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let pdu = match auth_cache.get(id) { + Some(pdu) => pdu.clone(), + // `get_pdu` checks the outliers tree for us + None => match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("{:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); From 8f283510aa93189f6845a2950da32c8fb49fc1f5 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 23:01:31 -0500 Subject: [PATCH 056/103] Fix unused import clippy warning --- Cargo.lock | 52 ++++++++++++++++++++++++++------- src/client_server/membership.rs | 6 ++-- src/client_server/session.rs | 3 +- src/server_server.rs | 3 +- 4 files changed, 46 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7381be..2565a35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,6 +97,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -172,7 +178,7 @@ dependencies = [ "num-integer", "num-traits", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -185,7 +191,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "directories", "http", "image", @@ -227,7 +233,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.24", + "time 0.2.25", "version_check", ] @@ -871,6 +877,20 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1509,7 +1529,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", @@ -1584,8 +1604,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "ubyte", "version_check", "yansi", @@ -1622,8 +1642,8 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -1819,7 +1839,7 @@ name = "ruma-signatures" version = "0.6.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -1833,7 +1853,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1854,7 +1874,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64", + "base64 0.13.0", "log", "ring", "sct", @@ -2189,6 +2209,16 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "time" version = "0.2.25" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 99c0b62..211388e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,9 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::Event; +// use state_res::Event; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashSet}, convert::TryFrom, iter, sync::Arc, @@ -557,7 +557,7 @@ async fn join_room_by_id_helper( let room_state = send_join_response.room_state.state.iter().map(add_event_id); - let state_events = room_state + let _state_events = room_state .clone() .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 1b2583c..f8d64f0 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -117,8 +117,7 @@ pub async fn login_route( let device_exists = body.device_id.as_ref().map_or(false, |device_id| { db.users .all_device_ids(&user_id) - .find(|x| x.as_ref().map_or(false, |v| v == device_id)) - .is_some() + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { diff --git a/src/server_server.rs b/src/server_server.rs index 780109c..1e81d5e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -800,8 +800,7 @@ pub async fn send_transaction_message_route<'a>( } }; - // Make this the state after (since we appended_incoming_pdu this should agree with our servers - // current state). + // Make this the state after. let mut state_after = state_at_event.clone(); state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // Add the incoming event to the mix of state snapshots From 0cc6448dbe1d31d7e4f84f27ab9ca957b69ebe0f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 9 Feb 2021 17:58:40 -0500 Subject: [PATCH 057/103] Temp disable rust_2018_idioms for CI --- Cargo.lock | 212 +++++++++++++--------------------------------------- src/main.rs | 2 +- 2 files changed, 53 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2565a35..956e372 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,27 +33,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-stream" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" -dependencies = [ - "async-stream-impl", - "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-trait" version = "0.1.42" @@ -134,9 +113,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" +checksum = "099e596ef14349721d9016f6b80dd3419ea1bf289ab9b44df8e4dfd3a005d5d9" [[package]] name = "bytemuck" @@ -195,7 +174,6 @@ dependencies = [ "directories", "http", "image", - "js_int", "jsonwebtoken", "log", "rand 0.7.3", @@ -211,7 +189,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.0.2", + "tokio", "trust-dns-resolver", ] @@ -373,9 +351,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if", ] @@ -602,28 +580,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 0.2.24", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - -[[package]] -name = "h2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.0.2", - "tokio-util 0.6.1", + "tokio", + "tokio-util", "tracing", "tracing-futures", ] @@ -684,21 +642,11 @@ dependencies = [ "http", ] -[[package]] -name = "http-body" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" -dependencies = [ - "bytes 1.0.1", - "http", -] - [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -708,47 +656,23 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", + "h2", "http", - "http-body 0.3.1", + "http-body", "httparse", "httpdate", "itoa", - "pin-project 1.0.4", + "pin-project 1.0.5", "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" -dependencies = [ - "bytes 1.0.1", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.0", - "http", - "http-body 0.4.0", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.4", - "socket2", - "tokio 1.0.2", + "tokio", "tower-service", "tracing", "want", @@ -769,9 +693,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "de910d521f7cc3135c4de8db1cb910e0b5ed1dc6f57c381cd07e8e661ce10094" dependencies = [ "matches", "unicode-bidi", @@ -780,9 +704,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" +checksum = "293f07a1875fa7e9c5897b51aa68b2d8ed8271b87e1a44cb64b9c3d98aabbc0d" dependencies = [ "bytemuck", "byteorder", @@ -899,9 +823,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "linked-hash-map" @@ -1007,16 +931,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - [[package]] name = "native-tls" version = "0.2.7" @@ -1044,15 +958,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1246,11 +1151,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.4", + "pin-project-internal 1.0.5", ] [[package]] @@ -1266,9 +1171,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1535,8 +1440,8 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.9", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", @@ -1569,9 +1474,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1631,7 +1536,7 @@ dependencies = [ "cookie", "either", "http", - "hyper 0.14.2", + "hyper", "indexmap", "log", "mime", @@ -1973,9 +1878,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" dependencies = [ "itoa", "ryu", @@ -1996,9 +1901,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" +checksum = "bdd2af560da3c1fdc02cb80965289254fc35dff869810061e2d8290ee48848ae" dependencies = [ "dtoa", "linked-hash-map", @@ -2079,9 +1984,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2202,9 +2107,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ "once_cell", ] @@ -2274,9 +2179,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" +checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" dependencies = [ "autocfg", "bytes", @@ -2293,9 +2198,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2319,35 +2224,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.0.2", + "tokio", "webpki", ] -[[package]] -name = "tokio-stream" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "async-stream", "bytes", "futures-core", "futures-sink", "log", "pin-project-lite", "tokio", - "tokio-stream", ] [[package]] @@ -2367,9 +2259,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "f7d40a22fd029e33300d8d89a5cc8ffce18bb7c587662f54629e94c9de5487f3" dependencies = [ "cfg-if", "pin-project-lite", @@ -2415,7 +2307,7 @@ dependencies = [ "rand 0.8.3", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "url", ] @@ -2435,7 +2327,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "trust-dns-proto", ] @@ -2474,9 +2366,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] diff --git a/src/main.rs b/src/main.rs index e5c0399..9b64506 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +// #![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; From 0dd8a15c4900ec9ba6fe1764b5ca31c4575bb199 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 27 Feb 2021 16:09:22 -0500 Subject: [PATCH 058/103] Fix leaves not being replaced by correct eventId in membership Update ruma --- Cargo.lock | 62 ++++++++++++++++++++++++--------- Cargo.toml | 5 +-- src/client_server/membership.rs | 2 +- src/database/key_backups.rs | 32 ++++++++++------- 4 files changed, 69 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 956e372..d9dbbf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,7 +1558,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1569,6 +1569,8 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-identity-service-api", + "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", ] @@ -1576,7 +1578,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1591,7 +1593,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1602,7 +1604,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1616,7 +1618,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1635,7 +1637,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1648,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1662,7 +1664,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1673,7 +1675,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1688,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand 0.8.3", @@ -1702,7 +1704,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1713,12 +1715,40 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" + +[[package]] +name = "ruma-identity-service-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-push-gateway-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1731,7 +1761,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1742,7 +1772,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2000,7 +2030,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" +source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", diff --git a/Cargo.toml b/Cargo.toml index de6a966..4a901e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,15 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 211388e..e3b1827 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -666,7 +666,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &pdu.prev_events, + &[pdu.event_id.clone()], &db, )?; diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index a50e45e..4c65354 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -2,7 +2,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyData, Sessions}, + r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, RoomId, UserId, }; @@ -129,7 +129,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyData, + key_data: &KeyBackupData, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); @@ -153,7 +153,7 @@ impl KeyBackups { self.backupkeyid_backup.insert( &key, - &*serde_json::to_string(&key_data).expect("KeyData::to_string always works"), + &*serde_json::to_string(&key_data).expect("KeyBackupData::to_string always works"), )?; Ok(()) @@ -182,13 +182,17 @@ impl KeyBackups { .to_string()) } - pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + pub fn get_all( + &self, + user_id: &UserId, + version: &str, + ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::::new(); for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { let (key, value) = r?; @@ -211,15 +215,16 @@ impl KeyBackups { ) .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; - let key_data = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))?; + let key_data = serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + })?; Ok::<_, Error>((room_id, session_id, key_data)) }) { let (room_id, session_id, key_data) = result?; rooms .entry(room_id) - .or_insert_with(|| Sessions { + .or_insert_with(|| RoomKeyBackup { sessions: BTreeMap::new(), }) .sessions @@ -234,7 +239,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> BTreeMap { + ) -> BTreeMap { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -257,7 +262,7 @@ impl KeyBackups { })?; let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyData in backupkeyid_backup is invalid.") + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; Ok::<_, Error>((session_id, key_data)) @@ -272,7 +277,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -284,8 +289,9 @@ impl KeyBackups { self.backupkeyid_backup .get(&key)? .map(|value| { - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid.")) + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + }) }) .transpose() } From f3253f2033691ec47719335d8e0c04b684c48899 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 28 Feb 2021 18:53:17 -0500 Subject: [PATCH 059/103] Move comments about Rooms trees to doc comments --- src/database/rooms.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 43d5f7d..4ad499c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -63,8 +63,11 @@ pub struct Rooms { /// Remember the state hash at events in the past. pub(super) pduid_statehash: sled::Tree, /// The state for a given state hash. - pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count - pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + /// + /// StateKey = EventType + StateKey, Short = Count + pub(super) statekey_short: sled::Tree, + /// StateId = StateHash + Short, PduId = Count (without roomid) + pub(super) stateid_pduid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -583,11 +586,11 @@ impl Rooms { let mut hash = hash?.to_vec(); hash.extend_from_slice(&short.to_be_bytes()); - let _ = self.stateid_pduid.compare_and_swap( + let _ = dbg!(self.stateid_pduid.compare_and_swap( hash, Some(pdu.event_id().as_bytes()), Some(pdu_id.as_ref()), - )?; + )?); } } } @@ -921,12 +924,12 @@ impl Rooms { content.clone(), prev_event, None, // TODO: third party invite - &auth_events + dbg!(&auth_events .iter() .map(|((ty, key), pdu)| { Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) - .collect::>>()?, + .collect::>>()?), ) .map_err(|e| { log::error!("{}", e); From c9f4ff5cf8e20dba0e6dfc24de6acb83458e3b2d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 08:23:28 -0500 Subject: [PATCH 060/103] Ask multiple servers for keys when not known or sending server failed --- src/database/rooms.rs | 2 +- src/server_server.rs | 61 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 53 insertions(+), 10 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4ad499c..992c97c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1605,7 +1605,7 @@ impl Rooms { }) } - /// Returns an iterator over all joined members of a room. + /// Returns an iterator of all servers participating in this room. pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 1e81d5e..58c4b33 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -8,8 +8,8 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, - VerifyKey, + get_remote_server_keys, get_server_keys, + get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, @@ -575,7 +575,7 @@ pub async fn send_transaction_message_route<'a>( return None; } - Some((event_id, value)) + Some((event_id, room_id, value)) }) .collect::>(); @@ -586,7 +586,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -595,7 +595,7 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - let keys = match fetch_signing_keys(&db, origin).await { + let keys = match fetch_signing_keys(&db, &room_id, origin).await { Ok(keys) => keys, Err(_) => { resolved_map.insert( @@ -1122,18 +1122,61 @@ pub(crate) async fn fetch_events( /// fetch them from the server and save to our DB. pub(crate) async fn fetch_signing_keys( db: &Database, + room_id: &RoomId, origin: &ServerName, ) -> Result> { match db.globals.signing_keys_for(origin)? { keys if !keys.is_empty() => Ok(keys), _ => { - let keys = db + match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) .await - .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; - db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + { + Ok(keys) => { + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + _ => { + for server in db.rooms.room_servers(room_id) { + let server = server?; + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + &server, + get_remote_server_keys::v2::Request::new( + &server, + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ), + ) + .await + { + let keys: Vec = keys.server_keys; + let key = keys.into_iter().fold(None, |mut key, next| { + if let Some(verified) = &key { + // rustc cannot elide this type for some reason + let v: &ServerSigningKeys = verified; + if v.verify_keys + .iter() + .zip(next.verify_keys.iter()) + .all(|(a, b)| a.1.key == b.1.key) + { + } + } else { + key = Some(next) + } + key + }); + } + } + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } + } } } } From 79c9de98cd6699df8647b70ef24d9dd0889a497a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 09:17:53 -0500 Subject: [PATCH 061/103] Add trusted_servers, filter servers to query keys by trusted_servers --- src/database.rs | 2 ++ src/database/globals.rs | 4 ++++ src/server_server.rs | 16 +++++++++++++--- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/database.rs b/src/database.rs index 35b7bcd..20cc7e1 100644 --- a/src/database.rs +++ b/src/database.rs @@ -39,6 +39,8 @@ pub struct Config { #[serde(default = "false_fn")] allow_federation: bool, jwt_secret: Option, + #[serde(default = "Vec::new")] + trusted_servers: Vec>, } fn false_fn() -> bool { diff --git a/src/database/globals.rs b/src/database/globals.rs index 00b4568..3c65e74 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -139,6 +139,10 @@ impl Globals { self.config.allow_federation } + pub fn trusted_servers(&self) -> &[Box] { + &self.config.trusted_servers + } + pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } diff --git a/src/server_server.rs b/src/server_server.rs index 58c4b33..dcd72f7 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1138,7 +1138,9 @@ pub(crate) async fn fetch_signing_keys( Ok(keys.server_key.verify_keys) } _ => { - for server in db.rooms.room_servers(room_id) { + for server in db.rooms.room_servers(room_id).filter( + |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), + ) { let server = server?; if let Ok(keys) = db .sending @@ -1154,8 +1156,9 @@ pub(crate) async fn fetch_signing_keys( ) .await { + let mut trust = 0; let keys: Vec = keys.server_keys; - let key = keys.into_iter().fold(None, |mut key, next| { + let key = keys.iter().fold(None, |mut key, next| { if let Some(verified) = &key { // rustc cannot elide this type for some reason let v: &ServerSigningKeys = verified; @@ -1164,12 +1167,19 @@ pub(crate) async fn fetch_signing_keys( .zip(next.verify_keys.iter()) .all(|(a, b)| a.1.key == b.1.key) { + trust += 1; } } else { - key = Some(next) + key = Some(next.clone()) } key }); + + if trust == (keys.len() - 1) && key.is_some() { + let k = key.unwrap(); + db.globals.add_signing_key(origin, &k)?; + return Ok(k.verify_keys); + } } } Err(Error::BadServerResponse( From e239014fa3935c523565b21344fec0b926c7fccf Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:02:41 -0500 Subject: [PATCH 062/103] Query for the correct server --- src/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index dcd72f7..12b60b9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1148,7 +1148,7 @@ pub(crate) async fn fetch_signing_keys( &db.globals, &server, get_remote_server_keys::v2::Request::new( - &server, + origin, SystemTime::now() .checked_add(Duration::from_secs(3600)) .expect("SystemTime to large"), From 0d55964d241c00b36341f1843bb515f9241e8463 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:45:23 -0500 Subject: [PATCH 063/103] Run nightly cargo fmt --- rustfmt.toml | 2 +- src/database.rs | 8 +++++--- src/database/appservice.rs | 6 ++++-- src/database/globals.rs | 9 +++++---- src/database/sending.rs | 3 +-- src/error.rs | 6 +++++- src/main.rs | 14 +++++++++----- 7 files changed, 30 insertions(+), 18 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 7d2cf54..e86028b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1 @@ -merge_imports = true +imports_granularity="Crate" diff --git a/src/database.rs b/src/database.rs index 34b74be..bf3e0f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -17,9 +17,11 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::collections::HashMap; -use std::fs::remove_dir_all; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + fs::remove_dir_all, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 26ea5b9..764291d 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; #[derive(Clone)] pub struct Appservice { diff --git a/src/database/globals.rs b/src/database/globals.rs index 8d7f104..8c0463d 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,10 +4,11 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, + time::Duration, +}; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; diff --git a/src/database/sending.rs b/src/database/sending.rs index 9793971..00073af 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -16,8 +16,7 @@ use ruma::{ ServerName, }; use sled::IVec; -use tokio::select; -use tokio::sync::Semaphore; +use tokio::{select, sync::Semaphore}; #[derive(Clone)] pub struct Sending { diff --git a/src/error.rs b/src/error.rs index 13efce6..d8f10f4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; +use std::{ + collections::HashMap, + sync::RwLock, + time::{Duration, Instant}, +}; use log::error; use ruma::{ diff --git a/src/main.rs b/src/main.rs index b469f4d..a2c020f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -// #![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; @@ -19,11 +19,15 @@ pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use rocket::figment::{ - providers::{Env, Format, Toml}, - Figment, +use rocket::{ + catch, catchers, + fairing::AdHoc, + figment::{ + providers::{Env, Format, Toml}, + Figment, + }, + routes, Request, }; -use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; use tracing::span; use tracing_subscriber::{prelude::*, Registry}; From 6da40225bb7363b9c76c4574820843faf587b43b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 13 Mar 2021 16:30:12 +0100 Subject: [PATCH 064/103] improvement: make state res actually work --- Cargo.lock | 97 +++++-- Cargo.toml | 11 +- src/client_server/account.rs | 13 +- src/client_server/capabilities.rs | 35 ++- src/client_server/config.rs | 7 +- src/client_server/membership.rs | 189 ++++++-------- src/client_server/sync.rs | 21 +- src/database.rs | 3 +- src/database/globals.rs | 56 ++-- src/database/rooms.rs | 219 +++++++--------- src/database/sending.rs | 7 +- src/main.rs | 2 + src/server_server.rs | 418 +++++++++++++++--------------- 13 files changed, 537 insertions(+), 541 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7538e0..e157565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,6 +187,7 @@ dependencies = [ "log", "opentelemetry", "opentelemetry-jaeger", + "pretty_env_logger", "rand", "regex", "reqwest", @@ -383,6 +384,19 @@ dependencies = [ "syn", ] +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "figment" version = "0.10.3" @@ -664,6 +678,15 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + [[package]] name = "hyper" version = "0.14.4" @@ -688,6 +711,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "futures-util", + "hyper", + "log", + "rustls", + "tokio", + "tokio-rustls", + "webpki", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1259,6 +1297,16 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +dependencies = [ + "env_logger", + "log", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -1454,6 +1502,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -1463,14 +1512,17 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite", + "rustls", "serde", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.7.0", ] @@ -1570,7 +1622,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1590,7 +1641,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1605,7 +1655,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1616,7 +1665,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1630,7 +1678,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1649,7 +1696,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1662,7 +1708,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1676,7 +1721,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1687,7 +1731,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1702,7 +1745,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand", @@ -1716,7 +1758,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1727,12 +1768,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1745,7 +1784,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1798,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1773,7 +1810,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1784,7 +1820,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2051,7 +2086,6 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", @@ -2136,6 +2170,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + [[package]] name = "thiserror" version = "1.0.24" @@ -2656,6 +2699,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +dependencies = [ + "webpki", +] + [[package]] name = "weezl" version = "0.1.4" @@ -2684,6 +2736,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 2293b62..9c08776 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -# ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" @@ -50,7 +50,7 @@ rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.11.1" +reqwest = { version = "0.11.1", features = ["rustls-tls"] } # Used for conduit::Error type thiserror = "1.0.24" # Used to generate thumbnails for images @@ -71,6 +71,7 @@ opentelemetry = "0.12.0" tracing-subscriber = "0.2.16" tracing-opentelemetry = "0.11.0" opentelemetry-jaeger = "0.11.0" +pretty_env_logger = "0.4.0" [features] default = ["conduit_bin"] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 7d3067e..1c6f517 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -455,16 +455,9 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMessage, - content: serde_json::to_value(message::MessageEventContent::Text( - message::TextMessageEventContent { - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - formatted: Some(message::FormattedBody { - format: message::MessageFormat::Html, - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - }), - relates_to: None, - new_content: None, - }, + content: serde_json::to_value(message::MessageEventContent::text_html( + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index b4fdf69..a3c0db6 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,5 +1,10 @@ use crate::ConduitResult; -use ruma::{api::client::r0::capabilities::get_capabilities, RoomVersionId}; +use ruma::{ + api::client::r0::capabilities::{ + get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, + }, + RoomVersionId, +}; use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] @@ -12,24 +17,14 @@ use rocket::get; #[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); - available.insert( - RoomVersionId::Version5, - get_capabilities::RoomVersionStability::Stable, - ); - available.insert( - RoomVersionId::Version6, - get_capabilities::RoomVersionStability::Stable, - ); + available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); + available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); - Ok(get_capabilities::Response { - capabilities: get_capabilities::Capabilities { - change_password: get_capabilities::ChangePasswordCapability::default(), // enabled by default - room_versions: get_capabilities::RoomVersionsCapability { - default: RoomVersionId::Version6, - available, - }, - custom_capabilities: BTreeMap::new(), - }, - } - .into()) + let mut capabilities = Capabilities::new(); + capabilities.room_versions = RoomVersionsCapability { + default: RoomVersionId::Version6, + available, + }; + + Ok(get_capabilities::Response { capabilities }.into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index aece96e..a53b7cd 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -23,7 +23,7 @@ pub async fn set_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let content = serde_json::from_str::(body.data.get()) + let data = serde_json::from_str(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -33,10 +33,7 @@ pub async fn set_global_account_data_route( sender_user, event_type.clone().into(), &BasicEvent { - content: CustomEventContent { - event_type, - json: content, - }, + content: CustomEventContent { event_type, data }, }, &db.globals, )?; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b7b2d4b..d63fa02 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::warn; +use log::{info, warn}; use ruma::{ api::{ client::{ @@ -21,11 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -// use state_res::Event; use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap}, convert::TryFrom, - iter, sync::Arc, }; @@ -439,6 +437,7 @@ pub async fn joined_members_route( Ok(joined_members::Response { joined }.into()) } +#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, @@ -566,23 +565,22 @@ async fn join_room_by_id_helper( Ok((event_id, value)) }; - let room_state = send_join_response.room_state.state.iter().map(add_event_id); + let count = db.globals.next_count()?; - let _state_events = room_state - .clone() - .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) - .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created - .collect::>>()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); - let auth_chain = send_join_response + let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; + + let mut state = HashMap::new(); + + for pdu in send_join_response .room_state - .auth_chain + .state .iter() - .map(add_event_id); - - let mut event_map = room_state - .chain(auth_chain) - .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created + .map(add_event_id) .map(|r| { let (event_id, value) = r?; PduEvent::from_id_val(&event_id, value.clone()) @@ -592,97 +590,78 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; - - let control_events = event_map - .values() - .filter(|pdu| state_res::is_power_event(pdu)) - .map(|pdu| pdu.event_id.clone()) - .collect::>(); - - // These events are not guaranteed to be sorted but they are resolved according to spec - // we auth them anyways to weed out faulty/malicious server. The following is basically the - // full state resolution algorithm. - let event_ids = event_map.keys().cloned().collect::>(); - - let sorted_control_events = state_res::StateResolution::reverse_topological_power_sort( - &room_id, - &control_events, - &mut event_map, - &event_ids, - ); - - // Auth check each event against the "partial" state created by the preceding events - let resolved_control_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_control_events, - &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // This removes the control events that failed auth, leaving the resolved - // to be mainline sorted. In the actual `state_res::StateResolution::resolve` - // function both are removed since these are all events we don't know of - // we must keep track of everything to add to our DB. - let events_to_sort = event_map - .keys() - .filter(|id| { - !sorted_control_events.contains(id) - || resolved_control_events.values().any(|rid| *id == rid) - }) - .cloned() - .collect::>(); - - let power_level = - resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); - // Sort the remaining non control events - let sorted_event_ids = state_res::StateResolution::mainline_sort( - room_id, - &events_to_sort, - power_level, - &mut event_map, - ); - - let resolved_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_event_ids, - &resolved_control_events, - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // filter the events that failed the auth check keeping the remaining events - // sorted correctly - for ev_id in sorted_event_ids - .iter() - .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - let pdu = event_map - .get(ev_id) - .expect("Found event_id in sorted events that is not in resolved state"); + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + if let Some(state_key) = &pdu.state_key { + if pdu.kind == EventType::RoomMember { + let target_user_id = UserId::try_from(state_key.clone()).map_err(|_| { + Error::BadServerResponse("Invalid user id in send_join response.") + })?; - // We do not rebuild the PDU in this case only insert to DB - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &[pdu.event_id.clone()], - &db, - )?; - - db.rooms.set_room_state(room_id, &hash)?; + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + db.rooms.update_membership( + &pdu.room_id, + &target_user_id, + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content.", + ) + })?, + &pdu.sender, + &db.account_data, + &db.globals, + )?; + } + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(id.as_bytes()); + state.insert((pdu.kind.clone(), state_key.clone()), long_id); + } } + + state.insert( + ( + pdu.kind.clone(), + pdu.state_key.clone().expect("join event has state key"), + ), + pdu_id.clone(), + ); + + db.rooms.force_state(room_id, state, &db.globals)?; + + for pdu in send_join_response + .room_state + .auth_chain + .iter() + .map(add_event_id) + .map(|r| { + let (event_id, value) = r?; + PduEvent::from_id_val(&event_id, value.clone()) + .map(|ev| (event_id, Arc::new(ev))) + .map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + }) + }) + { + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + } + + db.rooms.append_pdu( + &pdu, + utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), + db.globals.next_count()?, + pdu_id.into(), + &[pdu.event_id.clone()], + db, + )?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fac6b15..f01eb39 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -102,9 +102,14 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_after_since = db.rooms.pdus_after(sender_user, &room_id, since).next(); + let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); + let pdus_after_since = db + .rooms + .pdus_after(sender_user, &room_id, since) + .next() + .is_some(); - let since_state_hash = first_pdu_after_since + let since_state_hash = first_pdu_before_since .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); @@ -114,7 +119,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if since_state_hash != None && Some(¤t_state_hash) != since_state_hash.as_ref() { + ) = if pdus_after_since && Some(¤t_state_hash) != since_state_hash.as_ref() { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -138,9 +143,9 @@ pub async fn sync_events_route( // Calculations: let new_encrypted_room = - encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); + encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none()); - let send_member_count = since_state.as_ref().map_or(false, |since_state| { + let send_member_count = since_state.as_ref().map_or(true, |since_state| { since_state.as_ref().map_or(true, |since_state| { current_members.len() != since_state @@ -179,7 +184,7 @@ pub async fn sync_events_route( let since_membership = since_state .as_ref() - .map_or(MembershipState::Join, |since_state| { + .map_or(MembershipState::Leave, |since_state| { since_state .as_ref() .and_then(|since_state| { @@ -221,7 +226,7 @@ pub async fn sync_events_route( } } - let joined_since_last_sync = since_sender_member.map_or(false, |member| { + let joined_since_last_sync = since_sender_member.map_or(true, |member| { member.map_or(true, |member| member.membership != MembershipState::Join) }); @@ -310,7 +315,7 @@ pub async fn sync_events_route( (None, None, Vec::new()) }; - let state_events = if joined_since_last_sync { + let state_events = if dbg!(joined_since_last_sync) { current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) diff --git a/src/database.rs b/src/database.rs index bf3e0f0..0f5e4b4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -165,9 +165,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, - roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 8c0463d..dd594c5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -24,7 +24,7 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey + pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey } impl Globals { @@ -157,37 +157,31 @@ impl Globals { /// /// This doesn't actually check that the keys provided are newer than the old set. pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { - // Remove outdated keys - let now = crate::utils::millis_since_unix_epoch(); - for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { - let (k, _) = item?; - let valid_until = k - .splitn(2, |&b| b == 0xff) - .nth(1) - .map(crate::utils::u64_from_bytes) - .ok_or_else(|| Error::bad_database("Invalid signing keys."))? - .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + let mut key1 = origin.as_bytes().to_vec(); + key1.push(0xff); - if now > valid_until { - self.servertimeout_signingkey.remove(k)?; - } - } + let mut key2 = key1.clone(); - let mut key = origin.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice( - &(keys - .valid_until_ts - .duration_since(std::time::UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .to_be_bytes(), - ); + let ts = keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64; + + key1.extend_from_slice(&ts.to_be_bytes()); + key2.extend_from_slice(&(ts + 1).to_be_bytes()); self.servertimeout_signingkey.insert( - key, + key1, serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), )?; + + self.servertimeout_signingkey.insert( + key2, + serde_json::to_vec(&keys.old_verify_keys) + .expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) } @@ -196,7 +190,10 @@ impl Globals { &self, origin: &ServerName, ) -> Result> { + let mut response = BTreeMap::new(); + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { let (k, bytes) = item?; let valid_until = k @@ -207,10 +204,11 @@ impl Globals { .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; // If these keys are still valid use em! if valid_until > now { - return serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?; + response.extend(btree); } } - Ok(BTreeMap::default()) + Ok(response) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d48494b..2a88628 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::error; +use log::{error, info, warn}; use regex::Regex; use ring::digest; use ruma::{ @@ -71,10 +71,7 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) roomeventid_outlierpdu: sled::Tree, - /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. - /// This allows us to skip any state snapshots that would for sure not have the outlier. - pub(super) roomeventid_outlierpducount: sled::Tree, + pub(super) eventid_outlierpdu: sled::Tree, /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, @@ -89,19 +86,21 @@ impl Rooms { room_id: &RoomId, state_hash: &StateHashId, ) -> Result> { - self.stateid_pduid + let r = self + .stateid_pduid .scan_prefix(&state_hash) .values() - .map(|pduid_short| { - let mut pduid = room_id.as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(&pduid_short?); - match self.pduid_pdu.get(&pduid)? { + .map(|short_id| { + let short_id = short_id?; + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); + match self.pduid_pdu.get(&long_id)? { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .roomeventid_outlierpdu - .get(pduid)? + .eventid_outlierpdu + .get(short_id)? .map(|b| { serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -124,7 +123,9 @@ impl Rooms { pdu, )) }) - .collect() + .collect(); + + r } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -140,6 +141,8 @@ impl Rooms { key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); + info!("Looking for {} {:?}", event_type, state_key); + let short = self.statekey_short.get(&key)?; if let Some(short) = short { @@ -147,32 +150,40 @@ impl Rooms { stateid.push(0xff); stateid.extend_from_slice(&short); + info!("trying to find pduid/eventid. short: {:?}", stateid); self.stateid_pduid .get(&stateid)? - .map_or(Ok(None), |pdu_id_short| { - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&pdu_id_short); + .map_or(Ok(None), |short_id| { + info!("found in stateid_pduid"); + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); - Ok::<_, Error>(Some(( - pdu_id.clone().into(), - match self.pduid_pdu.get(&pdu_id)? { - Some(b) => serde_json::from_slice::(&b) + Ok::<_, Error>(Some(match self.pduid_pdu.get(&long_id)? { + Some(b) => ( + long_id.clone().into(), + serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - None => self - .roomeventid_outlierpdu - .get(pdu_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })??, - }, - ))) + ), + None => { + info!("looking in outliers"); + ( + short_id.clone().into(), + self.eventid_outlierpdu + .get(&short_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + ) + } + })) }) } else { + info!("short id not found"); Ok(None) } } @@ -215,6 +226,8 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); + } else { + warn!("Could not find {} {:?} in state", event_type, state_key); } } Ok(events) @@ -253,11 +266,11 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result<()> { let state_hash = - self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; + self.calculate_hash(&state.values().map(|long_id| &**long_id).collect::>())?; let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), id_long) in state { + for ((event_type, state_key), long_id) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -273,16 +286,13 @@ impl Rooms { } }; - // Because of outliers this could also be an eventID but that - // is handled by `state_full` - let pdu_id_short = id_long - .splitn(2, |&b| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; + // If it's a pdu id we remove the room id, if it's an event id we leave it the same + let short_id = long_id.splitn(2, |&b| b == 0xff).nth(1).unwrap_or(&long_id); let mut state_id = prefix.clone(); state_id.extend_from_slice(&short.to_be_bytes()); - self.stateid_pduid.insert(state_id, pdu_id_short)?; + info!("inserting {:?} into {:?}", short_id, state_id); + self.stateid_pduid.insert(state_id, short_id)?; } self.roomid_statehash @@ -348,20 +358,19 @@ impl Rooms { pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => self - .roomeventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu's id. @@ -371,24 +380,31 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } + pub fn get_long_id(&self, event_id: &EventId) -> Result> { + Ok(self + .get_pdu_id(event_id)? + .map_or_else(|| event_id.as_bytes().to_vec(), |pduid| pduid.to_vec())) + } + /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { - Some(b) => b, - None => return Ok(None), - }, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu. @@ -484,7 +500,7 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.roomeventid_outlierpdu + self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -494,25 +510,12 @@ impl Rooms { /// Append the PDU as an outlier. /// /// Any event given to this will be processed (state-res) on another thread. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { - log::info!( - "Number of outlier pdu's {}", - self.roomeventid_outlierpdu.len() - ); - - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - - self.eventid_pduid - .insert(pdu.event_id().as_bytes(), key.as_slice())?; - - self.roomeventid_outlierpdu.insert( - &key, + pub fn add_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + self.eventid_outlierpdu.insert( + &pdu.event_id.as_bytes(), &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; - self.roomeventid_outlierpducount - .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; + Ok(()) } @@ -557,50 +560,6 @@ impl Rooms { } } - // We no longer keep this pdu as an outlier - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - if self.roomeventid_outlierpdu.remove(&key)?.is_some() { - if let Some(state_key) = pdu.state_key.as_deref() { - let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_short.get(&statekey)? { - Some(short) => utils::u64_from_bytes(&short).map_err(|_| { - Error::bad_database("Invalid short bytes in statekey_short.") - })?, - None => { - error!( - "This event has been inserted into the state snapshot tree previously." - ); - let short = db.globals.next_count()?; - self.statekey_short - .insert(&statekey, &short.to_be_bytes())?; - short - } - }; - - let mut start = pdu.room_id().as_bytes().to_vec(); - start.extend_from_slice( - &self - .roomeventid_outlierpducount - .get(&key)? - .unwrap_or_default(), - ); - for hash in self.pduid_statehash.range(start..).values() { - let mut hash = hash?.to_vec(); - hash.extend_from_slice(&short.to_be_bytes()); - - let _ = dbg!(self.stateid_pduid.compare_and_swap( - hash, - Some(pdu.event_id().as_bytes()), - Some(pdu_id.as_ref()), - )?); - } - } - } - // We must keep track of all events that have been referenced. for leaf in leaves { let mut key = pdu.room_id().as_bytes().to_vec(); @@ -1275,7 +1234,7 @@ impl Rooms { } /// Update current membership data. - fn update_membership( + pub fn update_membership( &self, room_id: &RoomId, user_id: &UserId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 00073af..f96e489 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -346,6 +346,8 @@ impl Sending { .collect::>(); let permit = maximum_requests.acquire().await; + + info!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &globals, &*server, @@ -361,7 +363,10 @@ impl Sending { }, ) .await - .map(|_response| (server.clone(), is_appservice)) + .map(|response| { + info!("server response: {:?}", response); + (server.clone(), is_appservice) + }) .map_err(|e| (server, is_appservice, e)); drop(permit); diff --git a/src/main.rs b/src/main.rs index a2c020f..eb89fea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -204,6 +204,8 @@ async fn main() { rocket.launch().await.unwrap(); } else { + pretty_env_logger::init(); + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); diff --git a/src/server_server.rs b/src/server_server.rs index a665fe9..02610e8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -509,7 +509,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - // dbg!(&*body); + info!("Incoming PDUs: {:?}", &body.pdus); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -600,37 +600,11 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { + info!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { - let sender = - UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); - let origin = sender.server_name(); - - let keys = match fetch_signing_keys(&db, &room_id, origin).await { - Ok(keys) => keys, - Err(_) => { - resolved_map.insert( - event_id, - Err("Could not find signing keys for this server".to_string()), - ); - continue; - } - }; - - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } else { - resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); - continue; - } - // TODO: make this persist but not a DB Tree... // This is all the auth_events that have been recursively fetched so they don't have to be // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) @@ -645,11 +619,11 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (Arc, Vec>) = match validate_event( + let (pdu, previous_create): (Arc, Option>) = match validate_event( &db, value, event_id.clone(), - &pub_key_map, + &mut pub_key_map, server_name, // All the auth events gathered will be here &mut auth_cache, @@ -662,15 +636,11 @@ pub async fn send_transaction_message_route<'a>( continue; } }; - - let single_prev = if previous.len() == 1 { - previous.first().cloned() - } else { - None - }; + info!("Validated event."); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu)?; + db.rooms.add_pdu_outlier(&pdu)?; + info!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -679,6 +649,7 @@ pub async fn send_transaction_message_route<'a>( // // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event + info!("Requesting state at event."); let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -693,14 +664,20 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events( + info!("Fetching state events at event."); + let state = match fetch_events( &db, server_name, - &pub_key_map, + &mut pub_key_map, &res.pdu_ids, &mut auth_cache, ) - .await?; + .await + { + Ok(state) => state, + Err(_) => continue, + }; + // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -716,17 +693,21 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, + let incoming_auth_events = match fetch_events( + &db, + server_name, + &mut pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, ) + .await + { + Ok(state) => state, + Err(_) => continue, + }; + + info!("Fetching auth events of state events at event."); + (state, incoming_auth_events) } Err(_) => { resolved_map.insert( @@ -741,7 +722,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create.clone(), &state_at_event, None, // TODO: third party invite ) @@ -754,6 +735,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + info!("Auth check succeeded."); // End of step 10. // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -764,10 +746,12 @@ pub async fn send_transaction_message_route<'a>( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); + info!("current state: {:#?}", current_state); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create, ¤t_state, None, ) @@ -780,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; }; + info!("Auth check with current state succeeded."); // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) @@ -787,7 +772,10 @@ pub async fn send_transaction_message_route<'a>( // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. let extremities = match calculate_forward_extremities(&db, &pdu).await { - Ok(fork_ids) => fork_ids, + Ok(fork_ids) => { + info!("Calculated new forward extremities: {:?}", fork_ids); + fork_ids + } Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); continue; @@ -836,7 +824,6 @@ pub async fn send_transaction_message_route<'a>( // We do need to force an update to this rooms state update_state = true; - // TODO: remove this is for current debugging Jan, 15 2021 let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; @@ -877,6 +864,8 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); + info!("auth events: {:?}", auth_cache); + let res = match state_res::StateResolution::resolve( pdu.room_id(), &RoomVersionId::Version6, @@ -927,6 +916,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + info!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -938,6 +928,7 @@ pub async fn send_transaction_message_route<'a>( None }, )?; + info!("Updated resolved state"); // Event has passed all auth/stateres checks } @@ -962,17 +953,52 @@ type AsyncRecursiveResult<'a, T> = Pin( db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &'a PublicKeyMap, + pub_key_map: &'a mut PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { +) -> AsyncRecursiveResult<'a, (Arc, Option>)> { Box::pin(async move { + for signature_server in match value + .get("signatures") + .ok_or_else(|| "No signatures in server response pdu.".to_string())? + { + CanonicalJsonValue::Object(map) => map, + _ => return Err("Invalid signatures object in server response pdu.".to_string()), + } + .keys() + { + info!("Fetching signing keys for {}", signature_server); + let keys = match fetch_signing_keys( + &db, + &Box::::try_from(&**signature_server).map_err(|_| { + "Invalid servername in signatures of server response pdu.".to_string() + })?, + ) + .await + { + Ok(keys) => { + info!("Keys: {:?}", keys); + keys + } + Err(_) => { + return Err( + "Signature verification failed: Could not fetch signing key.".to_string(), + ); + } + }; + + pub_key_map.insert(signature_server.clone(), keys); + + info!("Fetched signing keys"); + } + let mut val = - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version5) { Ok(ver) => { if let ruma::signatures::Verified::Signatures = ver { match ruma::signatures::redact(&value, &RoomVersionId::Version6) { @@ -1000,26 +1026,34 @@ fn validate_event<'a>( ) .map_err(|_| "Event is not a valid PDU".to_string())?; + info!("Fetching auth events."); fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); + /* // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + info!("Fetching prev events."); + let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) .await .map_err(|e| e.to_string())?; + */ + + // if the previous event was the create event special rules apply + let previous_create = if pdu.auth_events.len() == 1 && pdu.prev_events == pdu.auth_events { + auth_cache.get(&pdu.auth_events[0]).cloned() + } else { + None + }; // Check that the event passes auth based on the auth_events + info!("Checking auth."); let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - if previous.len() == 1 { - previous.first().cloned() - } else { - None - }, + previous_create.clone(), &pdu.auth_events .iter() .map(|id| { @@ -1039,39 +1073,20 @@ fn validate_event<'a>( return Err("Event has failed auth check with auth events".to_string()); } - Ok((pdu, previous)) + info!("Validation successful."); + Ok((pdu, previous_create)) }) } -/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. +#[tracing::instrument(skip(db))] async fn fetch_check_auth_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, event_ids: &[EventId], auth_cache: &mut EventMap>, ) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - // TODO: Batch these async calls so we can wait on multiple at once - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| { - vec.pop() - .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) - })??; - - stack.extend(ev.auth_events()); - } + fetch_events(db, origin, key_map, event_ids, auth_cache).await?; Ok(()) } @@ -1086,44 +1101,58 @@ async fn fetch_check_auth_events( /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, events: &[EventId], auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; for id in events { + info!("Fetching event: {}", id); let pdu = match auth_cache.get(id) { - Some(pdu) => pdu.clone(), + Some(pdu) => { + info!("Event found in cache"); + pdu.clone() + } // `get_pdu` checks the outliers tree for us None => match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|e| { - error!("{:?}", e); - Error::Conflict("Authentication of event failed") - })?; + Some(pdu) => { + info!("Event found in outliers"); + Arc::new(pdu) + } + None => { + info!("Fetching event over federation"); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + info!("Got event over federation: {:?}", res); + let (event_id, value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("ERROR: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu + info!("Added fetched pdu as outlier."); + db.rooms.add_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + } }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); @@ -1134,14 +1163,23 @@ pub(crate) async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_signing_keys( db: &Database, - room_id: &RoomId, origin: &ServerName, -) -> Result> { +) -> Result> { + let mut result = BTreeMap::new(); + match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => Ok(keys), + keys if !keys.is_empty() => { + info!("we knew the signing keys already: {:?}", keys); + Ok(keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect()) + } _ => { + info!("Asking {} for it's signing key", origin); match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1149,13 +1187,24 @@ pub(crate) async fn fetch_signing_keys( { Ok(keys) => { db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + + result.extend( + keys.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + keys.server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + return Ok(result); } _ => { - for server in db.rooms.room_servers(room_id).filter( - |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), - ) { - let server = server?; + for server in db.globals.trusted_servers() { + info!("Asking {} for {}'s signing key", server, origin); if let Ok(keys) = db .sending .send_federation_request( @@ -1170,30 +1219,21 @@ pub(crate) async fn fetch_signing_keys( ) .await { - let mut trust = 0; - let keys: Vec = keys.server_keys; - let key = keys.iter().fold(None, |mut key, next| { - if let Some(verified) = &key { - // rustc cannot elide this type for some reason - let v: &ServerSigningKeys = verified; - if v.verify_keys - .iter() - .zip(next.verify_keys.iter()) - .all(|(a, b)| a.1.key == b.1.key) - { - trust += 1; - } - } else { - key = Some(next.clone()) - } - key - }); - - if trust == (keys.len() - 1) && key.is_some() { - let k = key.unwrap(); + info!("Got signing keys: {:?}", keys); + for k in keys.server_keys.into_iter() { db.globals.add_signing_key(origin, &k)?; - return Ok(k.verify_keys); + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); } + return Ok(result); } } Err(Error::BadServerResponse( @@ -1211,6 +1251,7 @@ pub(crate) async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. +#[tracing::instrument(skip(db))] pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, @@ -1261,6 +1302,7 @@ pub(crate) async fn calculate_forward_extremities( /// /// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). +#[tracing::instrument(skip(db))] pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, @@ -1275,12 +1317,14 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { + if id == &pdu.event_id { + continue; + } match db.rooms.get_pdu_id(id)? { // We can skip this because it is handled outside of this function // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(_) if id == pdu.event_id() => {} Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { let state_hash = db .rooms @@ -1308,40 +1352,7 @@ pub(crate) async fn build_forward_extremity_snapshots( } _ => { error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = - fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); + return Err(Error::BadDatabase("Missing state snapshot.")); } } } @@ -1353,9 +1364,11 @@ pub(crate) async fn build_forward_extremity_snapshots( fork_states.insert(current_state); } + info!("Fork states: {:?}", fork_states); Ok(fork_states) } +#[tracing::instrument(skip(db))] pub(crate) fn update_resolved_state( db: &Database, room_id: &RoomId, @@ -1366,22 +1379,14 @@ pub(crate) fn update_resolved_state( if let Some(state) = state { let mut new_state = HashMap::new(); for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid.to_vec(), - ); - } - None => { - error!("We are missing a state event for the current room state."); - } - } + let long_id = db.rooms.get_long_id(&pdu.event_id)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id, + ); } db.rooms.force_state(room_id, new_state, &db.globals)?; @@ -1392,6 +1397,7 @@ pub(crate) fn update_resolved_state( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. +#[tracing::instrument(skip(db))] pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, @@ -1402,20 +1408,16 @@ pub(crate) fn append_incoming_pdu( // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); for ((ev_type, state_k), state_pdu) in state { - match db.rooms.get_pdu_id(state_pdu.event_id())? { - Some(state_pduid) => { - new_state.insert( - ( - ev_type.clone(), - state_k - .clone() - .ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - state_pduid.to_vec(), - ); - } - None => error!("We are missing a state event for the incoming event snapshot"), - } + let long_id = db.rooms.get_long_id(state_pdu.event_id())?; + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id.to_vec(), + ); } db.rooms From 44425a903a27a0ca0e1f9ff7bc65ea1b13ded54a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 16 Mar 2021 18:00:26 +0100 Subject: [PATCH 065/103] fix: multiple federation/pusher fixes --- src/client_server/push.rs | 8 +-- src/client_server/sync.rs | 9 +--- src/database/pusher.rs | 22 +++----- src/database/rooms.rs | 22 ++++---- src/database/sending.rs | 24 ++++----- src/error.rs | 60 --------------------- src/server_server.rs | 107 +++++++++++++++++--------------------- 7 files changed, 85 insertions(+), 167 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 4dc9769..a7ddbb6 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -686,10 +686,10 @@ pub async fn get_pushers_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { - pushers: db.pusher.get_pusher(sender)?, + pushers: db.pusher.get_pusher(sender_user)?, } .into()) } @@ -703,10 +703,10 @@ pub async fn set_pushers_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { - let sender = body.sender_user.as_ref().expect("authenticated endpoint"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); - db.pusher.set_pusher(sender, pusher)?; + db.pusher.set_pusher(sender_user, pusher)?; db.flush().await?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 0fc98ec..6551b2a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -315,7 +315,7 @@ pub async fn sync_events_route( (None, None, Vec::new()) }; - let state_events = if dbg!(joined_since_last_sync) { + let state_events = if joined_since_last_sync { current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) @@ -703,12 +703,7 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let delay = tokio::time::sleep(duration); - tokio::pin!(delay); - tokio::select! { - _ = &mut delay, if delay.is_elapsed() => {} - _ = watcher => {} - } + let _ = tokio::time::timeout(duration, watcher).await; } Ok(response.into()) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 2bf6bf7..59ccbef 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -35,8 +35,6 @@ impl PushData { } pub fn set_pusher(&self, sender: &UserId, pusher: Pusher) -> Result<()> { - println!("CCCCCCCCCCCCCCCCCCCCCc"); - dbg!(&pusher); let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -51,7 +49,7 @@ impl PushData { } self.senderkey_pusher.insert( - dbg!(key), + key, &*serde_json::to_string(&pusher).expect("Pusher is valid JSON string"), )?; @@ -63,12 +61,10 @@ impl PushData { prefix.push(0xff); self.senderkey_pusher - .scan_prefix(dbg!(prefix)) + .scan_prefix(prefix) .values() .map(|push| { - println!("DDDDDDDDDDDDDDDDDDDDDDDDDD"); - let push = - dbg!(push).map_err(|_| Error::bad_database("Invalid push bytes in db."))?; + let push = push.map_err(|_| Error::bad_database("Invalid push bytes in db."))?; Ok(serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) }) @@ -100,10 +96,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let reqwest_response = globals - .reqwest_client() - .execute(dbg!(reqwest_request)) - .await; + let reqwest_response = globals.reqwest_client().execute(reqwest_request).await; // Because reqwest::Response -> http::Response is complicated: match reqwest_response { @@ -182,7 +175,7 @@ pub async fn send_push_notice( continue; } - match dbg!(rule.rule_id.as_str()) { + match rule.rule_id.as_str() { ".m.rule.master" => {} ".m.rule.suppress_notices" => { if pdu.kind == EventType::RoomMessage @@ -454,8 +447,7 @@ async fn send_notice( db: &Database, name: &str, ) -> Result<()> { - println!("BBBBBBBBBBBBBBBr"); - let (http, _emails): (Vec<&Pusher>, _) = dbg!(pushers) + let (http, _emails): (Vec<&Pusher>, _) = pushers .iter() .partition(|pusher| pusher.kind == Some(PusherKind::Http)); @@ -463,7 +455,7 @@ async fn send_notice( // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in dbg!(http) { + for pusher in http { let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); let url = if let Some(url) = pusher.data.url.as_ref() { url diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 648f080..c908d51 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use regex::Regex; use ring::digest; use ruma::{ @@ -67,7 +67,7 @@ pub struct Rooms { /// StateKey = EventType + StateKey, Short = Count pub(super) statekey_short: sled::Tree, /// StateId = StateHash + Short, PduId = Count (without roomid) - pub(super) stateid_pduid: sled::Tree, + pub(super) stateid_eventid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -138,7 +138,7 @@ impl Rooms { key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - info!("Looking for {} {:?}", event_type, state_key); + debug!("Looking for {} {:?}", event_type, state_key); let short = self.statekey_short.get(&key)?; @@ -147,11 +147,11 @@ impl Rooms { stateid.push(0xff); stateid.extend_from_slice(&short); - info!("trying to find pduid/eventid. short: {:?}", stateid); + debug!("trying to find pduid/eventid. short: {:?}", stateid); self.stateid_pduid .get(&stateid)? .map_or(Ok(None), |short_id| { - info!("found in stateid_pduid"); + debug!("found in stateid_pduid"); let mut long_id = room_id.as_bytes().to_vec(); long_id.push(0xff); long_id.extend_from_slice(&short_id); @@ -163,7 +163,7 @@ impl Rooms { .map_err(|_| Error::bad_database("Invalid PDU in db."))?, ), None => { - info!("looking in outliers"); + debug!("looking in outliers"); ( short_id.clone().into(), self.eventid_outlierpdu @@ -180,7 +180,7 @@ impl Rooms { })) }) } else { - info!("short id not found"); + warn!("short id not found"); Ok(None) } } @@ -288,7 +288,7 @@ impl Rooms { let mut state_id = prefix.clone(); state_id.extend_from_slice(&short.to_be_bytes()); - info!("inserting {:?} into {:?}", short_id, state_id); + debug!("inserting {:?} into {:?}", short_id, state_id); self.stateid_pduid.insert(state_id, short_id)?; } @@ -574,7 +574,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(&pdu_json) + &*serde_json::to_string(dbg!(&pdu_json)) .expect("CanonicalJsonObject is always a valid String"), )?; @@ -889,12 +889,12 @@ impl Rooms { content.clone(), prev_event, None, // TODO: third party invite - dbg!(&auth_events + &auth_events .iter() .map(|((ty, key), pdu)| { Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) - .collect::>>()?), + .collect::>>()?, ) .map_err(|e| { log::error!("{}", e); diff --git a/src/database/sending.rs b/src/database/sending.rs index fc1d27d..b35f7c5 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{info, warn}; +use log::{debug, error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -308,8 +308,6 @@ impl Sending { key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; - println!("AAAA"); - Ok(()) } @@ -348,7 +346,7 @@ impl Sending { pdu_ids: Vec, db: &Database, ) -> std::result::Result { - match dbg!(&kind) { + match &kind { OutgoingKind::Appservice(server) => { let pdu_jsons = pdu_ids .iter() @@ -414,21 +412,23 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - for pdu in dbg!(&pdus) { + for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) if pdu.unsigned.get("redacted_because").is_some() { continue; } - for user in db.rooms.room_members(&pdu.room_id) { - let user = user.map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - + for user in db.users.iter().filter_map(|r| r.ok()).filter(|user_id| { + db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false) + }) { // Don't notify the user of their own events if user == pdu.sender { continue; } - let pushers = dbg!(db.pusher.get_pusher(&user)) + let pushers = db + .pusher + .get_pusher(&user) .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; let rules_for_user = db @@ -467,7 +467,7 @@ impl Sending { unread, &pushers, rules_for_user, - pdu, + &pdu, db, ) .await @@ -510,7 +510,7 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - info!("sending pdus to {}: {:#?}", server, pdu_jsons); + error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &db.globals, &*server, @@ -527,7 +527,7 @@ impl Sending { ) .await .map(|response| { - info!("server response: {:?}", response); + error!("server response: {:?}", response); kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/error.rs b/src/error.rs index d8f10f4..8a64e63 100644 --- a/src/error.rs +++ b/src/error.rs @@ -111,63 +111,3 @@ where .respond_to(r) } } - -pub struct ConduitLogger { - pub db: Database, - pub last_logs: RwLock>, -} - -impl log::Log for ConduitLogger { - fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { - true - } - - fn log(&self, record: &log::Record<'_>) { - let output = format!("{} - {}", record.level(), record.args()); - - let match_mod_path = - |path: &str| path.starts_with("conduit::") || path.starts_with("state"); - - if self.enabled(record.metadata()) - && (record.module_path().map_or(false, match_mod_path) - || record - .module_path() - .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying - && record.metadata().level() <= log::Level::Warn) - { - let first_line = output - .lines() - .next() - .expect("lines always returns one item"); - - eprintln!("{}", output); - - let mute_duration = match record.metadata().level() { - log::Level::Error => Duration::from_secs(60 * 5), // 5 minutes - log::Level::Warn => Duration::from_secs(60 * 60 * 24), // A day - _ => Duration::from_secs(60 * 60 * 24 * 7), // A week - }; - - if self - .last_logs - .read() - .unwrap() - .get(first_line) - .map_or(false, |i| i.elapsed() < mute_duration) - // Don't post this log again for some time - { - return; - } - - if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { - mut_last_logs.insert(first_line.to_owned(), Instant::now()); - } - - self.db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::notice_plain(output), - )); - } - } - - fn flush(&self) {} -} diff --git a/src/server_server.rs b/src/server_server.rs index 02610e8..919d12f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ @@ -27,7 +27,7 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, future::Future, net::{IpAddr, SocketAddr}, @@ -601,7 +601,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - info!("Working on incoming pdu: {:?}", value); + debug!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -636,11 +636,11 @@ pub async fn send_transaction_message_route<'a>( continue; } }; - info!("Validated event."); + debug!("Validated event."); // 6. persist the event as an outlier. db.rooms.add_pdu_outlier(&pdu)?; - info!("Added pdu as outlier."); + debug!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -649,7 +649,7 @@ pub async fn send_transaction_message_route<'a>( // // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - info!("Requesting state at event."); + debug!("Requesting state at event."); let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -664,7 +664,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - info!("Fetching state events at event."); + debug!("Fetching state events at event."); let state = match fetch_events( &db, server_name, @@ -706,7 +706,7 @@ pub async fn send_transaction_message_route<'a>( Err(_) => continue, }; - info!("Fetching auth events of state events at event."); + debug!("Fetching auth events of state events at event."); (state, incoming_auth_events) } Err(_) => { @@ -735,7 +735,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - info!("Auth check succeeded."); + debug!("Auth check succeeded."); // End of step 10. // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -746,8 +746,6 @@ pub async fn send_transaction_message_route<'a>( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - info!("current state: {:#?}", current_state); - if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, @@ -764,7 +762,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; }; - info!("Auth check with current state succeeded."); + debug!("Auth check with current state succeeded."); // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) @@ -773,7 +771,7 @@ pub async fn send_transaction_message_route<'a>( // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. let extremities = match calculate_forward_extremities(&db, &pdu).await { Ok(fork_ids) => { - info!("Calculated new forward extremities: {:?}", fork_ids); + debug!("Calculated new forward extremities: {:?}", fork_ids); fork_ids } Err(_) => { @@ -828,20 +826,21 @@ pub async fn send_transaction_message_route<'a>( for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - let event = match auth_cache.get(auth_id) { - Some(aev) => aev.clone(), - // The only events that haven't been added to the auth cache are - // events we have knowledge of previously - None => { - error!("Event was not present in auth_cache {}", auth_id); - resolved_map.insert( - event_id.clone(), - Err("Event was not present in auth cache".into()), - ); - continue 'main_pdu_loop; + match fetch_events( + &db, + server_name, + &mut pub_key_map, + &[auth_id.clone()], + &mut auth_cache, + ) + .await + { + // This should always contain exactly one element when Ok + Ok(events) => state_auth.push(events[0].clone()), + Err(e) => { + debug!("Event was not present: {}", e); } - }; - state_auth.push(event); + } } auth_events.push(state_auth); } @@ -864,7 +863,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - info!("auth events: {:?}", auth_cache); + debug!("auth events: {:?}", auth_cache); let res = match state_res::StateResolution::resolve( pdu.room_id(), @@ -916,7 +915,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - info!("Appended incoming pdu."); + debug!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -928,7 +927,7 @@ pub async fn send_transaction_message_route<'a>( None }, )?; - info!("Updated resolved state"); + debug!("Updated resolved state"); // Event has passed all auth/stateres checks } @@ -972,7 +971,7 @@ fn validate_event<'a>( } .keys() { - info!("Fetching signing keys for {}", signature_server); + debug!("Fetching signing keys for {}", signature_server); let keys = match fetch_signing_keys( &db, &Box::::try_from(&**signature_server).map_err(|_| { @@ -981,10 +980,7 @@ fn validate_event<'a>( ) .await { - Ok(keys) => { - info!("Keys: {:?}", keys); - keys - } + Ok(keys) => keys, Err(_) => { return Err( "Signature verification failed: Could not fetch signing key.".to_string(), @@ -993,8 +989,6 @@ fn validate_event<'a>( }; pub_key_map.insert(signature_server.clone(), keys); - - info!("Fetched signing keys"); } let mut val = @@ -1026,7 +1020,7 @@ fn validate_event<'a>( ) .map_err(|_| "Event is not a valid PDU".to_string())?; - info!("Fetching auth events."); + debug!("Fetching auth events."); fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await .map_err(|e| e.to_string())?; @@ -1035,7 +1029,7 @@ fn validate_event<'a>( /* // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - info!("Fetching prev events."); + debug!("Fetching prev events."); let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) .await .map_err(|e| e.to_string())?; @@ -1049,7 +1043,7 @@ fn validate_event<'a>( }; // Check that the event passes auth based on the auth_events - info!("Checking auth."); + debug!("Checking auth."); let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, @@ -1073,7 +1067,7 @@ fn validate_event<'a>( return Err("Event has failed auth check with auth events".to_string()); } - info!("Validation successful."); + debug!("Validation successful."); Ok((pdu, previous_create)) }) } @@ -1111,20 +1105,19 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { - info!("Fetching event: {}", id); let pdu = match auth_cache.get(id) { Some(pdu) => { - info!("Event found in cache"); + debug!("Event found in cache"); pdu.clone() } // `get_pdu` checks the outliers tree for us None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - info!("Event found in outliers"); + debug!("Event found in outliers"); Arc::new(pdu) } None => { - info!("Fetching event over federation"); + debug!("Fetching event over federation"); match db .sending .send_federation_request( @@ -1135,7 +1128,7 @@ pub(crate) async fn fetch_events( .await { Ok(res) => { - info!("Got event over federation: {:?}", res); + debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); let (pdu, _) = @@ -1146,7 +1139,7 @@ pub(crate) async fn fetch_events( Error::Conflict("Authentication of event failed") })?; - info!("Added fetched pdu as outlier."); + debug!("Added fetched pdu as outlier."); db.rooms.add_pdu_outlier(&pdu)?; pdu } @@ -1171,15 +1164,11 @@ pub(crate) async fn fetch_signing_keys( let mut result = BTreeMap::new(); match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => { - info!("we knew the signing keys already: {:?}", keys); - Ok(keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect()) - } + keys if !keys.is_empty() => Ok(keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect()), _ => { - info!("Asking {} for it's signing key", origin); match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1204,7 +1193,7 @@ pub(crate) async fn fetch_signing_keys( } _ => { for server in db.globals.trusted_servers() { - info!("Asking {} for {}'s signing key", server, origin); + debug!("Asking {} for {}'s signing key", server, origin); if let Ok(keys) = db .sending .send_federation_request( @@ -1219,7 +1208,7 @@ pub(crate) async fn fetch_signing_keys( ) .await { - info!("Got signing keys: {:?}", keys); + debug!("Got signing keys: {:?}", keys); for k in keys.server_keys.into_iter() { db.globals.add_signing_key(origin, &k)?; result.extend( @@ -1364,7 +1353,6 @@ pub(crate) async fn build_forward_extremity_snapshots( fork_states.insert(current_state); } - info!("Fork states: {:?}", fork_states); Ok(fork_states) } @@ -1548,7 +1536,10 @@ pub fn get_missing_events_route<'a>( ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(serde_json::from_value(pdu).expect("Raw<..> is always valid")); + events.push(PduEvent::convert_to_outgoing_federation_event( + serde_json::from_value(pdu) + .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + )); } i += 1; } From 100307c9366383d8c612a464dfcee542e97f9d44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 17 Mar 2021 22:30:25 +0100 Subject: [PATCH 066/103] improvement: optimize state storage --- src/client_server/membership.rs | 12 +- src/client_server/profile.rs | 2 - src/client_server/room.rs | 4 +- src/client_server/state.rs | 12 +- src/client_server/sync.rs | 45 +-- src/database.rs | 12 +- src/database/pusher.rs | 3 +- src/database/rooms.rs | 469 +++++++++++++++++++------------- src/server_server.rs | 36 ++- 9 files changed, 341 insertions(+), 254 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d63fa02..d571eaa 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -106,7 +106,6 @@ pub async fn leave_room_route( ErrorKind::BadState, "Cannot leave a room you are not a member of.", ))? - .1 .content, ) .expect("from_value::> can never fail") @@ -195,7 +194,6 @@ pub async fn kick_user_route( ErrorKind::BadState, "Cannot kick member that's not in the room.", ))? - .1 .content, ) .expect("Raw::from_value always works") @@ -251,7 +249,7 @@ pub async fn ban_user_route( is_direct: None, third_party_invite: None, }), - |(_, event)| { + |event| { let mut event = serde_json::from_value::>(event.content) .expect("Raw::from_value always works") @@ -302,7 +300,6 @@ pub async fn unban_user_route( ErrorKind::BadState, "Cannot unban a user who is not banned.", ))? - .1 .content, ) .expect("from_value::> can never fail") @@ -617,10 +614,7 @@ async fn join_room_by_id_helper( &db.globals, )?; } - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(id.as_bytes()); - state.insert((pdu.kind.clone(), state_key.clone()), long_id); + state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); } } @@ -629,7 +623,7 @@ async fn join_room_by_id_helper( pdu.kind.clone(), pdu.state_key.clone().expect("join event has state key"), ), - pdu_id.clone(), + pdu.event_id.clone(), ); db.rooms.force_state(room_id, state, &db.globals)?; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 7e57c1e..9bcb289 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -49,7 +49,6 @@ pub async fn set_displayname_route( "Tried to send displayname update for user not in the room.", ) })? - .1 .content .clone(), ) @@ -144,7 +143,6 @@ pub async fn set_avatar_url_route( "Tried to send avatar url update for user not in the room.", ) })? - .1 .content .clone(), ) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 409028c..399677f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -380,7 +380,6 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .1 .content, ) .expect("Raw::from_value always works") @@ -452,7 +451,7 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { - Some((_, v)) => v.content.clone(), + Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; @@ -482,7 +481,6 @@ pub async fn upgrade_room_route( db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .1 .content, ) .expect("database contains invalid PDU") diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 57bf7e5..54c5fa5 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -112,7 +112,7 @@ pub async fn get_state_events_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -159,7 +159,7 @@ pub async fn get_state_events_for_key_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -183,8 +183,7 @@ pub async fn get_state_events_for_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))? - .1; + ))?; Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) @@ -211,7 +210,7 @@ pub async fn get_state_events_for_empty_key_route( && !matches!( db.rooms .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? - .map(|(_, event)| { + .map(|event| { serde_json::from_value::(event.content) .map_err(|_| { Error::bad_database( @@ -235,8 +234,7 @@ pub async fn get_state_events_for_empty_key_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", - ))? - .1; + ))?; Ok(get_state_events_for_empty_key::Response { content: serde_json::value::to_raw_value(&event.content) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6551b2a..280632b 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -96,7 +96,7 @@ pub async fn sync_events_route( // Database queries: - let current_state_hash = db.rooms.current_state_hash(&room_id)?; + let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?; // These type is Option>. The outer Option is None when there is no event between // since and the current room state, meaning there should be no updates. @@ -109,9 +109,11 @@ pub async fn sync_events_route( .next() .is_some(); - let since_state_hash = first_pdu_before_since - .as_ref() - .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); + let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { + db.rooms + .pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id) + .ok()? + }); let ( heroes, @@ -119,7 +121,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if pdus_after_since && Some(¤t_state_hash) != since_state_hash.as_ref() { + ) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -129,11 +131,18 @@ pub async fn sync_events_route( let encrypted_room = current_state .get(&(EventType::RoomEncryption, "".to_owned())) .is_some(); - let since_state = since_state_hash.as_ref().map(|state_hash| { - state_hash - .as_ref() - .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) - }); + let since_state = since_shortstatehash + .as_ref() + .map(|since_shortstatehash| { + Ok::<_, Error>( + since_shortstatehash + .map(|since_shortstatehash| { + db.rooms.state_full(&room_id, since_shortstatehash) + }) + .transpose()?, + ) + }) + .transpose()?; let since_encryption = since_state.as_ref().map(|state| { state @@ -496,16 +505,16 @@ pub async fn sync_events_route( .and_then(|pdu| pdu.ok()) .and_then(|pdu| { db.rooms - .pdu_state_hash(&pdu.0) + .pdu_shortstatehash(&pdu.1.event_id) .ok()? .ok_or_else(|| Error::bad_database("Pdu in db doesn't have a state hash.")) .ok() }) - .and_then(|state_hash| { + .and_then(|shortstatehash| { db.rooms .state_get( &room_id, - &state_hash, + shortstatehash, &EventType::RoomMember, sender_user.as_str(), ) @@ -513,14 +522,14 @@ pub async fn sync_events_route( .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() }) - .and_then(|(pdu_id, pdu)| { + .and_then(|pdu| { serde_json::from_value::>( pdu.content.clone(), ) .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .map(|content| (pdu_id, pdu, content)) + .map(|content| (pdu, content)) .ok() }) { since_member @@ -529,7 +538,7 @@ pub async fn sync_events_route( continue; }; - let left_since_last_sync = since_member.2.membership == MembershipState::Join; + let left_since_last_sync = since_member.1.membership == MembershipState::Join; let left_room = if left_since_last_sync { device_list_left.extend( @@ -550,10 +559,10 @@ pub async fn sync_events_route( let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; let mut room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(pdu_id, _)| since_member.0 != pdu_id) + .take_while(|(pdu_id, pdu)| &since_member.0 != pdu) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); - room_events.push(since_member.1.to_sync_room_event()); + room_events.push(since_member.0.to_sync_room_event()); sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, diff --git a/src/database.rs b/src/database.rs index 17177e8..f65d5e0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -163,10 +163,14 @@ impl Database { roomuserid_invited: db.open_tree("roomuserid_invited")?, userroomid_left: db.open_tree("userroomid_left")?, - statekey_short: db.open_tree("statekey_short")?, - stateid_pduid: db.open_tree("stateid_pduid")?, - pduid_statehash: db.open_tree("pduid_statehash")?, - roomid_statehash: db.open_tree("roomid_statehash")?, + statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, + stateid_shorteventid: db.open_tree("stateid_shorteventid")?, + eventid_shorteventid: db.open_tree("eventid_shorteventid")?, + shorteventid_eventid: db.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: db.open_tree("eventid_shortstatehash")?, + roomid_shortstatehash: db.open_tree("roomid_shortstatehash")?, + statehash_shortstatehash: db.open_tree("statehash_shortstatehash")?, + eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, }, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 59ccbef..b6c6cf4 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -312,7 +312,6 @@ pub async fn send_push_notice( && db .rooms .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|(_, pl)| pl) .map(deserialize) .flatten() .map_or(false, power_level_cmp) @@ -514,7 +513,7 @@ async fn send_notice( let room_name = db .rooms .room_state_get(&event.room_id, &EventType::RoomName, "")? - .map(|(_, pdu)| match pdu.content.get("name") { + .map(|pdu| match pdu.content.get("name") { Some(serde_json::Value::String(s)) => Some(s.to_string()), _ => None, }) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c908d51..a342566 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -59,15 +59,19 @@ pub struct Rooms { pub(super) userroomid_left: sled::Tree, /// Remember the current state hash of a room. - pub(super) roomid_statehash: sled::Tree, + pub(super) roomid_shortstatehash: sled::Tree, /// Remember the state hash at events in the past. - pub(super) pduid_statehash: sled::Tree, - /// The state for a given state hash. - /// - /// StateKey = EventType + StateKey, Short = Count - pub(super) statekey_short: sled::Tree, - /// StateId = StateHash + Short, PduId = Count (without roomid) - pub(super) stateid_eventid: sled::Tree, + pub(super) shorteventid_shortstatehash: sled::Tree, + /// StateKey = EventType + StateKey, ShortStateKey = Count + pub(super) statekey_shortstatekey: sled::Tree, + pub(super) shorteventid_eventid: sled::Tree, + /// ShortEventId = Count + pub(super) eventid_shorteventid: sled::Tree, + /// ShortEventId = Count + pub(super) statehash_shortstatehash: sled::Tree, + /// ShortStateHash = Count + /// StateId = ShortStateHash + ShortStateKey + pub(super) stateid_shorteventid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -81,37 +85,65 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full( + pub fn state_full_ids( &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { - self.stateid_pduid - .scan_prefix(&state_hash) + ) -> Result> { + let shortstatehash = self + .statehash_shortstatehash + .get(state_hash)? + .ok_or_else(|| Error::bad_database("Asked for statehash that does not exist."))?; + + Ok(self + .stateid_shorteventid + .scan_prefix(&shortstatehash) .values() - .map(|short_id| { - let short_id = short_id?; - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(&short_id); - match self.pduid_pdu.get(&long_id)? { - Some(b) => serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")), - None => self - .eventid_outlierpdu - .get(short_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, - } + .filter_map(|r| r.ok()) + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) }) .filter_map(|r| r.ok()) + .collect()) + } + + #[tracing::instrument(skip(self))] + pub fn state_full( + &self, + room_id: &RoomId, + shortstatehash: u64, + ) -> Result> { + Ok(self + .stateid_shorteventid + .scan_prefix(shortstatehash.to_be_bytes()) + .values() + .filter_map(|r| r.ok()) + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in stateid_shorteventid is invalid unicode.") + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) + }) + .filter_map(|r| r.ok()) + .map(|eventid| self.get_pdu(&eventid)) + .filter_map(|r| r.ok().flatten()) .map(|pdu| { - Ok(( + Ok::<_, Error>(( ( pdu.kind.clone(), pdu.state_key @@ -122,7 +154,8 @@ impl Rooms { pdu, )) }) - .collect() + .filter_map(|r| r.ok()) + .collect()) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -130,71 +163,73 @@ impl Rooms { pub fn state_get( &self, room_id: &RoomId, - state_hash: &StateHashId, + shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { let mut key = event_type.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); - debug!("Looking for {} {:?}", event_type, state_key); + let shortstatekey = self.statekey_shortstatekey.get(&key)?; - let short = self.statekey_short.get(&key)?; + if let Some(shortstatekey) = shortstatekey { + let mut stateid = shortstatehash.to_be_bytes().to_vec(); + stateid.extend_from_slice(&shortstatekey); - if let Some(short) = short { - let mut stateid = state_hash.to_vec(); - stateid.push(0xff); - stateid.extend_from_slice(&short); - - debug!("trying to find pduid/eventid. short: {:?}", stateid); - self.stateid_pduid + self.stateid_shorteventid .get(&stateid)? - .map_or(Ok(None), |short_id| { - debug!("found in stateid_pduid"); - let mut long_id = room_id.as_bytes().to_vec(); - long_id.push(0xff); - long_id.extend_from_slice(&short_id); - - Ok::<_, Error>(Some(match self.pduid_pdu.get(&long_id)? { - Some(b) => ( - long_id.clone().into(), - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - ), - None => { - debug!("looking in outliers"); - ( - short_id.clone().into(), - self.eventid_outlierpdu - .get(&short_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })??, + .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) + .flatten() + .map(|bytes| { + Ok::<_, Error>( + EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "EventID in stateid_shorteventid is invalid unicode.", ) - } - })) + })?) + .map_err(|_| { + Error::bad_database("EventId in stateid_shorteventid is invalid.") + })?, + ) }) + .map(|r| r.ok()) + .flatten() + .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) } else { - warn!("short id not found"); Ok(None) } } /// Returns the state hash for this pdu. #[tracing::instrument(skip(self))] - pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { - Ok(self.pduid_statehash.get(pdu_id)?) + pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + self.eventid_shorteventid + .get(event_id.as_bytes())? + .map_or(Ok(None), |shorteventid| { + Ok(self.shorteventid_shortstatehash.get(shorteventid)?.map_or( + Ok::<_, Error>(None), + |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Invalid shortstatehash bytes in shorteventid_shortstatehash", + ) + })?)) + }, + )?) + }) } /// Returns the last state hash key added to the db for the given room. #[tracing::instrument(skip(self))] - pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { - Ok(self.roomid_statehash.get(room_id.as_bytes())?) + pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) } /// This fetches auth events from the current state. @@ -215,7 +250,7 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get( + if let Some(pdu) = self.room_state_get( room_id, &event_type, &state_key @@ -233,9 +268,9 @@ impl Rooms { /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, pdu_id_bytes: &[&[u8]]) -> Result { + fn calculate_hash(&self, bytes_list: &[&[u8]]) -> Result { // We only hash the pdu's event ids, not the whole pdu - let bytes = pdu_id_bytes.join(&0xff); + let bytes = bytes_list.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); Ok(hash.as_ref().into()) } @@ -259,41 +294,65 @@ impl Rooms { pub fn force_state( &self, room_id: &RoomId, - state: HashMap<(EventType, String), Vec>, + state: HashMap<(EventType, String), EventId>, globals: &super::globals::Globals, ) -> Result<()> { - let state_hash = - self.calculate_hash(&state.values().map(|long_id| &**long_id).collect::>())?; - let mut prefix = state_hash.to_vec(); - prefix.push(0xff); + let state_hash = self.calculate_hash( + &state + .values() + .map(|event_id| event_id.as_bytes()) + .collect::>(), + )?; - for ((event_type, state_key), long_id) in state { + let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => { + warn!("state hash already existed?!"); + shortstatehash.to_vec() + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash.to_be_bytes().to_vec() + } + }; + + for ((event_type, state_key), eventid) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); - let short = match self.statekey_short.get(&statekey)? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => shortstatekey.to_vec(), None => { - let short = globals.next_count()?; - self.statekey_short - .insert(&statekey, &short.to_be_bytes())?; - short + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() } }; - // If it's a pdu id we remove the room id, if it's an event id we leave it the same - let short_id = long_id.splitn(2, |&b| b == 0xff).nth(1).unwrap_or(&long_id); + let shorteventid = match self.eventid_shorteventid.get(eventid.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(eventid.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), eventid.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; - let mut state_id = prefix.clone(); - state_id.extend_from_slice(&short.to_be_bytes()); - debug!("inserting {:?} into {:?}", short_id, state_id); - self.stateid_pduid.insert(state_id, short_id)?; + let mut state_id = shortstatehash.clone(); + state_id.extend_from_slice(&shortstatekey); + + self.stateid_shorteventid + .insert(&*state_id, &*shorteventid)?; } - self.roomid_statehash - .insert(room_id.as_bytes(), &*state_hash)?; + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &*shortstatehash)?; Ok(()) } @@ -304,8 +363,8 @@ impl Rooms { &self, room_id: &RoomId, ) -> Result> { - if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_full(&room_id, ¤t_state_hash) + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_full(&room_id, current_shortstatehash) } else { Ok(BTreeMap::new()) } @@ -318,9 +377,9 @@ impl Rooms { room_id: &RoomId, event_type: &EventType, state_key: &str, - ) -> Result> { - if let Some(current_state_hash) = self.current_state_hash(room_id)? { - self.state_get(&room_id, ¤t_state_hash, event_type, state_key) + ) -> Result> { + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_get(&room_id, current_shortstatehash, event_type, state_key) } else { Ok(None) } @@ -377,12 +436,6 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } - pub fn get_long_id(&self, event_id: &EventId) -> Result> { - Ok(self - .get_pdu_id(event_id)? - .map_or_else(|| event_id.as_bytes().to_vec(), |pduid| pduid.to_vec())) - } - /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. @@ -538,15 +591,15 @@ impl Rooms { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(prev_state_hash) = self.pdu_state_hash(&pdu_id).unwrap() { + if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(&pdu.room_id, &prev_state_hash, &pdu.kind, &state_key) + .state_get(&pdu.room_id, shortstatehash, &pdu.kind, &state_key) .unwrap() { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.1.content) + utils::to_canonical_object(prev_state.content) .expect("event is valid, we just created it"), ), ); @@ -574,7 +627,7 @@ impl Rooms { self.pduid_pdu.insert( &pdu_id, - &*serde_json::to_string(dbg!(&pdu_json)) + &*serde_json::to_string(&pdu_json) .expect("CanonicalJsonObject is always a valid String"), )?; @@ -706,71 +759,112 @@ impl Rooms { /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `pduid_statehash`. - /// The incoming event is the `pdu_id` passed to this method. + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. pub fn append_to_state( &self, - new_pdu_id: &[u8], new_pdu: &PduEvent, globals: &super::globals::Globals, - ) -> Result { - let old_state = - if let Some(old_state_hash) = self.roomid_statehash.get(new_pdu.room_id.as_bytes())? { - // Store state for event. The state does not include the event itself. - // Instead it's the state before the pdu, so the room's old state. - self.pduid_statehash.insert(new_pdu_id, &old_state_hash)?; - if new_pdu.state_key.is_none() { - return Ok(old_state_hash); - } + ) -> Result { + let old_state = if let Some(old_shortstatehash) = + self.roomid_shortstatehash.get(new_pdu.room_id.as_bytes())? + { + // Store state for event. The state does not include the event itself. + // Instead it's the state before the pdu, so the room's old state. - let mut prefix = old_state_hash.to_vec(); - prefix.push(0xff); - self.stateid_pduid - .scan_prefix(&prefix) - .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) - // Chop the old state_hash out leaving behind the short key (u64) - .map(|(k, v)| (k.subslice(prefix.len(), k.len() - prefix.len()), v)) - .collect::>() - } else { - HashMap::new() - }; - - if let Some(state_key) = &new_pdu.state_key { - let mut new_state = old_state; - let mut pdu_key = new_pdu.kind.as_ref().as_bytes().to_vec(); - pdu_key.push(0xff); - pdu_key.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_short.get(&pdu_key)? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid short bytes in statekey_short."))?, + let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), None => { - let short = globals.next_count()?; - self.statekey_short.insert(&pdu_key, &short.to_be_bytes())?; - short + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() } }; - let new_pdu_id_short = new_pdu_id - .splitn(2, |&b| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; - - new_state.insert((&short.to_be_bytes()).into(), new_pdu_id_short.into()); - - let new_state_hash = - self.calculate_hash(&new_state.values().map(|b| &**b).collect::>())?; - - let mut key = new_state_hash.to_vec(); - key.push(0xff); - - for (short, short_pdu_id) in new_state { - let mut state_id = key.clone(); - state_id.extend_from_slice(&short); - self.stateid_pduid.insert(&state_id, &short_pdu_id)?; + self.shorteventid_shortstatehash + .insert(shorteventid, &old_shortstatehash)?; + if new_pdu.state_key.is_none() { + return utils::u64_from_bytes(&old_shortstatehash).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash.") + }); } - Ok(new_state_hash) + self.stateid_shorteventid + .scan_prefix(&old_shortstatehash) + .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) + // Chop the old_shortstatehash out leaving behind the short state key + .map(|(k, v)| { + ( + k.subslice(old_shortstatehash.len(), k.len() - old_shortstatehash.len()), + v, + ) + }) + .collect::>() + } else { + HashMap::new() + }; + + if let Some(state_key) = &new_pdu.state_key { + let mut new_state: HashMap = old_state; + + let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); + new_state_key.push(0xff); + new_state_key.extend_from_slice(state_key.as_bytes()); + + let shortstatekey = match self.statekey_shortstatekey.get(&new_state_key)? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&new_state_key, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() + } + }; + + let shorteventid = match self.eventid_shorteventid.get(new_pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(new_pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), new_pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + new_state.insert(shortstatekey.into(), shorteventid.into()); + + let new_state_hash = self.calculate_hash( + &new_state + .values() + .map(|event_id| &**event_id) + .collect::>(), + )?; + + let shortstatehash = match self.statehash_shortstatehash.get(&new_state_hash)? { + Some(shortstatehash) => { + warn!("state hash already existed?!"); + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))? + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&new_state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash + } + }; + + for (shortstatekey, shorteventid) in new_state { + let mut state_id = shortstatehash.to_be_bytes().to_vec(); + state_id.extend_from_slice(&shortstatekey); + self.stateid_shorteventid.insert(&state_id, &shorteventid)?; + } + + Ok(shortstatehash) } else { Err(Error::bad_database( "Tried to insert non-state event into room without a state.", @@ -778,9 +872,9 @@ impl Rooms { } } - pub fn set_room_state(&self, room_id: &RoomId, state_hash: &StateHashId) -> Result<()> { - self.roomid_statehash - .insert(room_id.as_bytes(), state_hash)?; + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -833,7 +927,7 @@ impl Rooms { }, }) }, - |(_, power_levels)| { + |power_levels| { Ok(serde_json::from_value::>( power_levels.content, ) @@ -844,18 +938,15 @@ impl Rooms { )?; let sender_membership = self .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? - .map_or( - Ok::<_, Error>(member::MembershipState::Leave), - |(_, pdu)| { - Ok( - serde_json::from_value::>(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, - ) - }, - )?; + .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { + Ok( + serde_json::from_value::>(pdu.content) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid Member event in db."))? + .membership, + ) + })?; let sender_power = power_levels.users.get(&sender).map_or_else( || { @@ -936,7 +1027,7 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some((_, prev_pdu)) = self.room_state_get(&room_id, &event_type, &state_key)? { + if let Some(prev_pdu) = self.room_state_get(&room_id, &event_type, &state_key)? { unsigned.insert("prev_content".to_owned(), prev_pdu.content); unsigned.insert( "prev_sender".to_owned(), @@ -1014,7 +1105,7 @@ impl Rooms { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; + let statehashid = self.append_to_state(&pdu, &db.globals)?; // remove the self.append_pdu( @@ -1030,7 +1121,7 @@ impl Rooms { // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - self.set_room_state(&room_id, &statehashid)?; + self.set_room_state(&room_id, statehashid)?; for server in self .room_servers(room_id) @@ -1267,7 +1358,7 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self .room_state_get(&room_id, &EventType::RoomCreate, "")? - .and_then(|(_, create)| { + .and_then(|create| { serde_json::from_value::< Raw, >(create.content) diff --git a/src/server_server.rs b/src/server_server.rs index 919d12f..2f32b63 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1301,7 +1301,7 @@ pub(crate) async fn build_forward_extremity_snapshots( pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, ) -> Result>>> { - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); @@ -1309,39 +1309,37 @@ pub(crate) async fn build_forward_extremity_snapshots( if id == &pdu.event_id { continue; } - match db.rooms.get_pdu_id(id)? { + match db.rooms.get_pdu(id)? { // We can skip this because it is handled outside of this function // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { - let state_hash = db + Some(leave_pdu) => { + let pdu_shortstatehash = db .rooms - .pdu_state_hash(&pduid)? - .expect("found pdu with no statehash"); + .pdu_shortstatehash(&leave_pdu.event_id)? + .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; - if current_hash.as_ref() == Some(&state_hash) { + if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { includes_current_state = true; } let mut state_before = db .rooms - .state_full(pdu.room_id(), &state_hash)? + .state_full(pdu.room_id(), pdu_shortstatehash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); // Now it's the state after - if let Some(pdu) = db.rooms.get_pdu_from_id(&pduid)? { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, Arc::new(pdu)); - } + let key = (leave_pdu.kind.clone(), leave_pdu.state_key.clone()); + state_before.insert(key, Arc::new(leave_pdu)); fork_states.insert(state_before); } _ => { - error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - return Err(Error::BadDatabase("Missing state snapshot.")); + error!("Missing state snapshot for {:?}", id); + return Err(Error::bad_database("Missing state snapshot.")); } } } @@ -1367,13 +1365,12 @@ pub(crate) fn update_resolved_state( if let Some(state) = state { let mut new_state = HashMap::new(); for ((ev_type, state_k), pdu) in state { - let long_id = db.rooms.get_long_id(&pdu.event_id)?; new_state.insert( ( ev_type, state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - long_id, + pdu.event_id.clone(), ); } @@ -1396,7 +1393,6 @@ pub(crate) fn append_incoming_pdu( // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); for ((ev_type, state_k), state_pdu) in state { - let long_id = db.rooms.get_long_id(state_pdu.event_id())?; new_state.insert( ( ev_type.clone(), @@ -1404,7 +1400,7 @@ pub(crate) fn append_incoming_pdu( .clone() .ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - long_id.to_vec(), + state_pdu.event_id.clone(), ); } @@ -1418,7 +1414,7 @@ pub(crate) fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1429,7 +1425,7 @@ pub(crate) fn append_incoming_pdu( &db, )?; - db.rooms.set_room_state(pdu.room_id(), &state_hash)?; + db.rooms.set_room_state(pdu.room_id(), state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { From a77fcd106e5f3f0c7bb3ada59ebd86d548d56213 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 00:09:57 +0100 Subject: [PATCH 067/103] feat: implement /state_ids and fix federation stuff --- src/database.rs | 12 ++++----- src/database/rooms.rs | 16 +++--------- src/main.rs | 1 + src/server_server.rs | 57 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+), 18 deletions(-) diff --git a/src/database.rs b/src/database.rs index f65d5e0..67f888f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -110,11 +110,6 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load( - db.open_tree("global")?, - db.open_tree("servertimeout_signingkey")?, - config, - )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -191,7 +186,7 @@ impl Database { sending: sending::Sending { servernamepduids: db.open_tree("servernamepduids")?, servercurrentpdus: db.open_tree("servercurrentpdus")?, - maximum_requests: Arc::new(Semaphore::new(10)), + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), }, admin: admin::Admin { sender: admin_sender, @@ -201,6 +196,11 @@ impl Database { id_appserviceregistrations: db.open_tree("id_appserviceregistrations")?, }, pusher: pusher::PushData::new(&db)?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + )?, _db: db, }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a342566..2450622 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -85,19 +85,10 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids( - &self, - room_id: &RoomId, - state_hash: &StateHashId, - ) -> Result> { - let shortstatehash = self - .statehash_shortstatehash - .get(state_hash)? - .ok_or_else(|| Error::bad_database("Asked for statehash that does not exist."))?; - + pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid - .scan_prefix(&shortstatehash) + .scan_prefix(&shortstatehash.to_be_bytes()) .values() .filter_map(|r| r.ok()) .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) @@ -895,7 +886,8 @@ impl Rooms { redacts, } = pdu_builder; // TODO: Make sure this isn't called twice in parallel - let prev_events = self.get_pdu_leaves(&room_id)?; + let mut prev_events = self.get_pdu_leaves(&room_id)?; + prev_events.truncate(20); let auth_events = self.get_auth_events( &room_id, diff --git a/src/main.rs b/src/main.rs index 893273f..1aa4d54 100644 --- a/src/main.rs +++ b/src/main.rs @@ -165,6 +165,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, server_server::get_missing_events_route, + server_server::get_room_state_ids_route, server_server::get_profile_information_route, ], ) diff --git a/src/server_server.rs b/src/server_server.rs index 2f32b63..da9928c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -6,6 +6,7 @@ use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::error::ErrorKind, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -1543,6 +1544,62 @@ pub fn get_missing_events_route<'a>( Ok(get_missing_events::v1::Response { events }.into()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/state_ids/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_room_state_ids_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdu_ids = db.rooms.state_full_ids(shortstatehash)?; + + let mut auth_chain_ids = BTreeSet::::new(); + let mut todo = BTreeSet::new(); + todo.insert(body.event_id.clone()); + + loop { + if let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); + } else { + warn!("Could not find pdu mentioned in auth events."); + } + + todo.remove(&event_id); + } else { + break; + } + } + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.into_iter().collect(), + pdu_ids, + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") From a0fa0ee7a0da0cddf5471ededf19330bab56ee5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 12:03:15 +0100 Subject: [PATCH 068/103] fix: join appservice room with alias --- src/client_server/alias.rs | 20 ++++++++++++++++---- src/database/rooms.rs | 24 ++++++++++++++++-------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 03d4909..07b4977 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -90,11 +90,23 @@ pub async fn get_alias_helper( let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) - .and_then(|users| users.get("regex")) - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()); + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .map(|aliases| { + aliases + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); - if aliases.map_or(false, |aliases| aliases.is_match(room_alias.as_str())) + if aliases + .iter() + .any(|aliases| aliases.is_match(room_alias.as_str())) && db .sending .send_appservice_request( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2450622..e6c3b93 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1142,15 +1142,23 @@ impl Rooms { }); let aliases = namespaces .get("aliases") - .and_then(|users| users.get("regex")) - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()); + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .map(|aliases| { + aliases + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let rooms = namespaces .get("rooms") .and_then(|rooms| rooms.as_sequence()); - let room_aliases = self.room_aliases(&room_id); - let bridge_user_id = appservice .1 .get("sender_localpart") @@ -1170,15 +1178,15 @@ impl Rooms { .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) }; - let matching_aliases = |aliases: Regex| { - room_aliases + let matching_aliases = |aliases: &Regex| { + self.room_aliases(&room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; if bridge_user_id.map_or(false, user_is_joined) || users.iter().any(matching_users) - || aliases.map_or(false, matching_aliases) + || aliases.iter().any(matching_aliases) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 5cb15551f345843033e71d1ec1eaff5c0263a7aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 13:13:08 +0100 Subject: [PATCH 069/103] improvement: use db compression --- Cargo.lock | 55 ++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 2 +- src/database.rs | 1 + 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c70fa7e..d5010da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -149,6 +149,9 @@ name = "cc" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -801,6 +804,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.10.0" @@ -816,6 +828,15 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +[[package]] +name = "jobserver" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +dependencies = [ + "libc", +] + [[package]] name = "jpeg-decoder" version = "0.1.22" @@ -2023,6 +2044,7 @@ dependencies = [ "libc", "log", "parking_lot", + "zstd", ] [[package]] @@ -2067,7 +2089,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" name = "state-res" version = "0.1.0" dependencies = [ - "itertools", + "itertools 0.10.0", "log", "maplit", "ruma", @@ -2754,3 +2776,34 @@ name = "yansi" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + +[[package]] +name = "zstd" +version = "0.5.4+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "2.0.6+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "1.4.18+zstd.1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +dependencies = [ + "cc", + "glob", + "itertools 0.9.0", + "libc", +] diff --git a/Cargo.toml b/Cargo.toml index ae0dd1d..6750d70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-event # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", default-features = false } +sled = { version = "0.34.6", default-features = false, features = ["compression"] } # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions diff --git a/src/database.rs b/src/database.rs index 67f888f..138efbe 100644 --- a/src/database.rs +++ b/src/database.rs @@ -103,6 +103,7 @@ impl Database { let db = sled::Config::default() .path(&config.database_path) .cache_capacity(config.cache_capacity as u64) + .use_compression(true) .open()?; info!("Opened sled database at {}", config.database_path); From 7b3fe88345038938780c5a0e222f4ffa92e6e8ef Mon Sep 17 00:00:00 2001 From: Gabriel Souza Franco Date: Sat, 13 Mar 2021 22:31:41 -0300 Subject: [PATCH 070/103] Send proper Host header in federation requests --- src/database/globals.rs | 4 +--- src/server_server.rs | 19 +++++++------------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index c7e53ca..bad9c89 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -13,9 +13,7 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; -pub type DestinationCache = Arc, (String, Option)>>>; -type WellKnownMap = HashMap, (String, Option)>; - +type WellKnownMap = HashMap, (String, String)>; #[derive(Clone)] pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host diff --git a/src/server_server.rs b/src/server_server.rs index da9928c..59befde 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -139,11 +139,9 @@ where } } - if let Some(host) = host { - http_request - .headers_mut() - .insert(HOST, HeaderValue::from_str(&host).unwrap()); - } + http_request + .headers_mut() + .insert(HOST, HeaderValue::from_str(&host).unwrap()); let mut reqwest_request = reqwest::Request::try_from(http_request) .expect("all http requests are valid reqwest requests"); @@ -231,10 +229,9 @@ fn add_port_to_hostname(destination_str: String) -> String { async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &'_ ServerName, -) -> (String, Option) { - let mut host = None; - +) -> (String, String) { let destination_str = destination.as_str().to_owned(); + let mut host = destination_str.clone(); let actual_destination = "https://".to_owned() + &match get_ip_with_port(destination_str.clone()) { Some(host_port) => { @@ -249,6 +246,7 @@ async fn find_actual_destination( match request_well_known(globals, &destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { + host = delegated_hostname.clone(); match get_ip_with_port(delegated_hostname.clone()) { Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file None => { @@ -270,10 +268,7 @@ async fn find_actual_destination( None => { match query_srv_record(globals, &destination_str).await { // 4: SRV record found - Some(hostname) => { - host = Some(destination_str.to_owned()); - hostname - } + Some(hostname) => hostname, // 5: No SRV record found None => add_port_to_hostname(destination_str.to_string()), } From f775c76d8a73df472e7b93bf7cb5988f89cbf5bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 18:33:43 +0100 Subject: [PATCH 071/103] chore: get rid of warnings --- rust-toolchain | 2 +- rustfmt.toml | 1 + src/client_server/read_marker.rs | 4 +--- src/client_server/sync.rs | 2 +- src/database/pusher.rs | 14 ++++++++------ src/database/rooms.rs | 2 +- src/database/sending.rs | 2 +- src/error.rs | 13 +------------ src/main.rs | 1 - src/ruma_wrapper.rs | 9 ++++----- src/server_server.rs | 7 +++++-- 11 files changed, 24 insertions(+), 33 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index 21998d3..5a5c721 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.47.0 +1.50.0 diff --git a/rustfmt.toml b/rustfmt.toml index e86028b..739b454 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1,2 @@ +unstable_features = true imports_granularity="Crate" diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 20464db..555b7e7 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -3,9 +3,7 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - capabilities::get_capabilities, read_marker::set_read_marker, receipt::create_receipt, - }, + r0::{read_marker::set_read_marker, receipt::create_receipt}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 280632b..da2ddf4 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -559,7 +559,7 @@ pub async fn sync_events_route( let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; let mut room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(pdu_id, pdu)| &since_member.0 != pdu) + .take_while(|(_, pdu)| &since_member.0 != pdu) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect::>(); room_events.push(since_member.0.to_sync_room_event()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index b6c6cf4..cc421db 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,17 +9,19 @@ use ruma::{ }, OutgoingRequest, }, - events::room::{ - member::{MemberEventContent, MembershipState}, - message::{MessageEventContent, MessageType, TextMessageEventContent}, - power_levels::PowerLevelsEventContent, + events::{ + room::{ + member::{MemberEventContent, MembershipState}, + message::{MessageEventContent, MessageType, TextMessageEventContent}, + power_levels::PowerLevelsEventContent, + }, + EventType, }, - events::EventType, push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; -use std::{convert::TryFrom, fmt::Debug, time::Duration}; +use std::{convert::TryFrom, fmt::Debug}; #[derive(Debug, Clone)] pub struct PushData { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e6c3b93..d494d33 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{debug, error, info, warn}; +use log::{error, warn}; use regex::Regex; use ring::digest; use ruma::{ diff --git a/src/database/sending.rs b/src/database/sending.rs index b35f7c5..50bbc8b 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{debug, error, info, warn}; +use log::{error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ diff --git a/src/error.rs b/src/error.rs index 8a64e63..65c5b4f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,18 +1,7 @@ -use std::{ - collections::HashMap, - sync::RwLock, - time::{Duration, Instant}, -}; - use log::error; -use ruma::{ - api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}, - events::room::message, -}; +use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; -use crate::{database::admin::AdminCommand, Database}; - #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, diff --git a/src/main.rs b/src/main.rs index 1aa4d54..2ec3a42 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,7 +19,6 @@ pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use log::LevelFilter; use rocket::{ catch, catchers, fairing::AdHoc, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 640771f..8c72529 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,13 +1,10 @@ use crate::Error; use ruma::{ - api::{AuthScheme, IncomingRequest, OutgoingRequest}, + api::OutgoingRequest, identifiers::{DeviceId, UserId}, Outgoing, }; -use std::{ - convert::{TryFrom, TryInto}, - ops::Deref, -}; +use std::{convert::TryInto, ops::Deref}; #[cfg(feature = "conduit_bin")] use { @@ -24,6 +21,8 @@ use { tokio::io::AsyncReadExt, Request, State, }, + ruma::api::{AuthScheme, IncomingRequest}, + std::convert::TryFrom, std::io::Cursor, }; diff --git a/src/server_server.rs b/src/server_server.rs index 59befde..d43588a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -3,7 +3,7 @@ use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{debug, error, info, warn}; use regex::Regex; -use rocket::{get, post, put, response::content::Json, State}; +use rocket::{response::content::Json, State}; use ruma::{ api::{ client::error::ErrorKind, @@ -28,7 +28,7 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, future::Future, net::{IpAddr, SocketAddr}, @@ -38,6 +38,9 @@ use std::{ time::{Duration, SystemTime}, }; +#[cfg(feature = "conduit_bin")] +use rocket::{get, post, put}; + #[tracing::instrument(skip(globals))] pub async fn send_request( globals: &crate::database::globals::Globals, From d4c76f4654cf453ee084c24f5955937d93754092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 18 Mar 2021 19:38:08 +0100 Subject: [PATCH 072/103] chore: update ruma --- Cargo.lock | 18 ++++++++++++++++++ Cargo.toml | 10 +++++----- src/client_server/session.rs | 7 +++++-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5010da..adcc27b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,6 +1623,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1642,6 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1656,6 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1666,6 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1679,6 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1697,6 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1709,6 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1722,6 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1732,6 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1746,6 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1759,6 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1769,10 +1780,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1785,6 +1798,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1799,6 +1813,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1811,6 +1826,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1821,6 +1837,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2088,6 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 6750d70..33f1d1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +#ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } +#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 7b3acfc..cb6442d 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -51,8 +51,11 @@ pub async fn login_route( // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password { password } => { - let username = if let login::IncomingUserInfo::MatrixId(matrix_id) = &body.user { + login::IncomingLoginInfo::Password { + identifier, + password, + } => { + let username = if let login::IncomingUserIdentifier::MatrixId(matrix_id) = identifier { matrix_id } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); From 363c629fafcaa202f296d0c1988cdb26950e40ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 22 Mar 2021 14:04:11 +0100 Subject: [PATCH 073/103] fix: signature key fetching, optimize push sending --- Cargo.lock | 18 --- Cargo.toml | 9 +- src/client_server/push.rs | 2 +- src/database/pusher.rs | 196 ++++++++++++++++------------- src/database/rooms.rs | 22 +++- src/database/sending.rs | 259 ++++++++++++++++++++------------------ src/server_server.rs | 226 ++++++++++++++++++--------------- 7 files changed, 396 insertions(+), 336 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adcc27b..d5010da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,7 +1623,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1643,7 +1642,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1656,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1666,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1679,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1702,7 +1697,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1715,7 +1709,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1722,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1732,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1755,7 +1746,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1769,7 +1759,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1780,12 +1769,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1798,7 +1785,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1813,7 +1799,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1826,7 +1811,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1837,7 +1821,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2105,7 +2088,6 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 33f1d1e..1476200 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,15 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/push.rs b/src/client_server/push.rs index a7ddbb6..9de8c16 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -689,7 +689,7 @@ pub async fn get_pushers_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { - pushers: db.pusher.get_pusher(sender_user)?, + pushers: db.pusher.get_pushers(sender_user)?, } .into()) } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index cc421db..b0b9e1e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -20,6 +20,7 @@ use ruma::{ push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; +use sled::IVec; use std::{convert::TryFrom, fmt::Debug}; @@ -58,7 +59,17 @@ impl PushData { Ok(()) } - pub fn get_pusher(&self, sender: &UserId) -> Result> { + pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + self.senderkey_pusher + .get(senderkey)? + .map(|push| { + Ok(serde_json::from_slice(&*push) + .map_err(|_| Error::bad_database("Invalid Pusher in db."))?) + }) + .transpose() + } + + pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -72,6 +83,16 @@ impl PushData { }) .collect() } + + pub fn get_pusher_senderkeys(&self, sender: &UserId) -> impl Iterator> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + + self.senderkey_pusher + .scan_prefix(prefix) + .keys() + .map(|r| Ok(r?)) + } } pub async fn send_request( @@ -155,7 +176,7 @@ where pub async fn send_push_notice( user: &UserId, unread: UInt, - pushers: &[Pusher], + pusher: &Pusher, ruleset: Ruleset, pdu: &PduEvent, db: &Database, @@ -194,7 +215,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -214,8 +235,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -246,7 +266,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -272,7 +292,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -289,7 +309,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -326,7 +346,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -352,7 +372,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()) + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) .await?; break; } @@ -369,7 +389,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -385,7 +405,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -401,7 +421,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -415,7 +435,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -429,7 +449,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pushers, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; break; } } @@ -442,98 +462,96 @@ pub async fn send_push_notice( async fn send_notice( unread: UInt, - pushers: &[Pusher], + pusher: &Pusher, tweaks: Vec, event: &PduEvent, db: &Database, name: &str, ) -> Result<()> { - let (http, _emails): (Vec<&Pusher>, _) = pushers - .iter() - .partition(|pusher| pusher.kind == Some(PusherKind::Http)); + // TODO: email + if pusher.kind == Some(PusherKind::Http) { + return Ok(()); + } // TODO: // Two problems with this // 1. if "event_id_only" is the only format kind it seems we should never add more info // 2. can pusher/devices have conflicting formats - for pusher in http { - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = pusher.data.url.as_ref() { - url - } else { - error!("Http Pusher must have URL specified."); - continue; - }; + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = pusher.data.url.as_ref() { + url + } else { + error!("Http Pusher must have URL specified."); + return Ok(()); + }; - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = Some(data_minus_url); + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = Some(data_minus_url); - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == EventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + error!("SEND PUSH NOTICE `{}`", name); + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + + if event.kind == EventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let d = &[device]; - let mut notifi = Notification::new(d); + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + let room_name = db + .rooms + .room_state_get(&event.room_id, &EventType::RoomName, "")? + .map(|pdu| match pdu.content.get("name") { + Some(serde_json::Value::String(s)) => Some(s.to_string()), + _ => None, + }) + .flatten(); + notifi.room_name = room_name.as_deref(); - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == EventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - error!("SEND PUSH NOTICE `{}`", name); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); - - if event.kind == EventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - let room_name = db - .rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? - .map(|pdu| match pdu.content.get("name") { - Some(serde_json::Value::String(s)) => Some(s.to_string()), - _ => None, - }) - .flatten(); - notifi.room_name = room_name.as_deref(); - - error!("SEND PUSH NOTICE Full `{}`", name); - send_request( - &db.globals, - &url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } + error!("SEND PUSH NOTICE Full `{}`", name); + send_request( + &db.globals, + &url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } // TODO: email - // for email in emails {} Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d494d33..2e2d486 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -84,7 +84,6 @@ pub struct Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] pub fn state_full_ids(&self, shortstatehash: u64) -> Result> { Ok(self .stateid_shorteventid @@ -107,7 +106,6 @@ impl Rooms { .collect()) } - #[tracing::instrument(skip(self))] pub fn state_full( &self, room_id: &RoomId, @@ -628,7 +626,25 @@ impl Rooms { .insert(pdu.event_id.as_bytes(), &*pdu_id)?; // See if the event matches any known pushers - db.sending.send_push_pdu(&*pdu_id)?; + for user in db + .users + .iter() + .filter_map(|r| r.ok()) + .filter(|user_id| db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + { + // Don't notify the user of their own events + if user == pdu.sender { + continue; + } + + for senderkey in db + .pusher + .get_pusher_senderkeys(&user) + .filter_map(|r| r.ok()) + { + db.sending.send_push_pdu(&*pdu_id, senderkey)?; + } + } match pdu.kind { EventType::RoomRedaction => { diff --git a/src/database/sending.rs b/src/database/sending.rs index 50bbc8b..9b74ed7 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -14,9 +14,9 @@ use log::{error, info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, federation, OutgoingRequest}, + api::{appservice, client::r0::push::Pusher, federation, OutgoingRequest}, events::{push_rules, EventType}, - uint, ServerName, UInt, + uint, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -24,14 +24,14 @@ use tokio::{select, sync::Semaphore}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(Box), - Push(Vec), + Push(Vec, Vec), // user and pushkey Normal(Box), } #[derive(Clone)] pub struct Sending { /// The state for a given state hash. - pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)ServerName / UserId + PduId + pub(super) servernamepduids: sled::Tree, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId pub(super) servercurrentpdus: sled::Tree, // ServerCurrentPdus = (+ / $)ServerName / UserId + PduId (pduid can be empty for reservation) pub(super) maximum_requests: Arc, } @@ -85,9 +85,11 @@ impl Sending { p.extend_from_slice(server.as_bytes()); p } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p } OutgoingKind::Normal(server) => { @@ -106,6 +108,7 @@ impl Sending { let mut subscriber = servernamepduids.watch_prefix(b""); loop { + println!("."); select! { Some(response) = futures.next() => { match response { @@ -116,9 +119,11 @@ impl Sending { p.extend_from_slice(server.as_bytes()); p } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(server) => { @@ -179,9 +184,11 @@ impl Sending { p.extend_from_slice(serv.as_bytes()); p }, - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(serv) => { @@ -208,7 +215,6 @@ impl Sending { Some(event) = &mut subscriber => { if let sled::Event::Insert { key, .. } = event { let servernamepduid = key.clone(); - let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); let exponential_backoff = |(tries, instant): &(u32, Instant)| { // Fail if a request has failed recently (exponential backoff) @@ -219,33 +225,8 @@ impl Sending { instant.elapsed() < min_elapsed_duration }; - if let Some((outgoing_kind, pdu_id)) = utils::string_from_bytes( - parts - .next() - .expect("splitn will always return 1 or more elements"), - ) - .map_err(|_| Error::bad_database("[Utf8] ServerName in servernamepduid bytes are invalid.")) - .and_then(|ident_str| { - // Appservices start with a plus - Ok(if ident_str.starts_with('+') { - OutgoingKind::Appservice( - Box::::try_from(&ident_str[1..]) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? - ) - } else if ident_str.starts_with('$') { - OutgoingKind::Push(ident_str[1..].as_bytes().to_vec()) - } else { - OutgoingKind::Normal( - Box::::try_from(ident_str) - .map_err(|_| Error::bad_database("ServerName in servernamepduid is invalid."))? - ) - }) - }) - .and_then(|outgoing_kind| parts - .next() - .ok_or_else(|| Error::bad_database("Invalid servernamepduid in db.")) - .map(|pdu_id| (outgoing_kind, pdu_id)) - ) + + if let Some((outgoing_kind, pdu_id)) = Self::parse_servercurrentpdus(&servernamepduid) .ok() .filter(|(outgoing_kind, _)| { if last_failed_try.get(outgoing_kind).map_or(false, exponential_backoff) { @@ -258,9 +239,11 @@ impl Sending { p.extend_from_slice(serv.as_bytes()); p }, - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(&id); + p.extend_from_slice(&user); + p.push(0xff); + p.extend_from_slice(&pushkey); p }, OutgoingKind::Normal(serv) => { @@ -279,6 +262,8 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); + dbg!("there is a future"); + futures.push( Self::handle_event( outgoing_kind, @@ -295,15 +280,9 @@ impl Sending { } #[tracing::instrument(skip(self))] - pub fn send_push_pdu(&self, pdu_id: &[u8]) -> Result<()> { - // Make sure we don't cause utf8 errors when parsing to a String... - let pduid = String::from_utf8_lossy(pdu_id).as_bytes().to_vec(); - - // these are valid ServerName chars - // (byte.is_ascii_alphanumeric() || byte == b'-' || byte == b'.') + pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: IVec) -> Result<()> { let mut key = b"$".to_vec(); - // keep each pdu push unique - key.extend_from_slice(pduid.as_slice()); + key.extend_from_slice(&senderkey); key.push(0xff); key.extend_from_slice(pdu_id); self.servernamepduids.insert(key, b"")?; @@ -313,6 +292,7 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { + dbg!(&server); let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); @@ -369,6 +349,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); let permit = db.sending.maximum_requests.acquire().await; + + error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = appservice_server::send_request( &db.globals, db.appservice @@ -391,17 +373,17 @@ impl Sending { response } - OutgoingKind::Push(id) => { + OutgoingKind::Push(user, pushkey) => { let pdus = pdu_ids .iter() .map(|pdu_id| { Ok::<_, (Vec, Error)>( db.rooms .get_pdu_from_id(pdu_id) - .map_err(|e| (id.clone(), e))? + .map_err(|e| (pushkey.clone(), e))? .ok_or_else(|| { ( - id.clone(), + pushkey.clone(), Error::bad_database( "[Push] Event in servernamepduids not found in db.", ), @@ -418,66 +400,80 @@ impl Sending { continue; } - for user in db.users.iter().filter_map(|r| r.ok()).filter(|user_id| { - db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false) - }) { - // Don't notify the user of their own events - if user == pdu.sender { - continue; - } - - let pushers = db - .pusher - .get_pusher(&user) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))?; - - let rules_for_user = db - .account_data - .get::(None, &user, EventType::PushRules) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&user)); - - let unread: UInt = if let Some(last_read) = db - .rooms - .edus - .private_read_get(&pdu.room_id, &user) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - { - (db.rooms - .pdus_since(&user, &pdu.room_id, last_read) - .map_err(|e| (OutgoingKind::Push(id.clone()), e))? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into() - } else { - // Just return zero unread messages - uint!(0) - }; - - let permit = db.sending.maximum_requests.acquire().await; - let _response = pusher::send_push_notice( - &user, - unread, - &pushers, - rules_for_user, - &pdu, - db, + let userid = UserId::try_from(utils::string_from_bytes(user).map_err(|e| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user string in db."), ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); + })?) + .map_err(|e| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user id in db."), + ) + })?; - drop(permit); - } + let mut senderkey = user.clone(); + senderkey.push(0xff); + senderkey.extend_from_slice(pushkey); + + let pusher = match db + .pusher + .get_pusher(&senderkey) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + { + Some(pusher) => pusher, + None => continue, + }; + + let rules_for_user = db + .account_data + .get::(None, &userid, EventType::PushRules) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .map(|ev| ev.content.global) + .unwrap_or_else(|| crate::push_rules::default_pushrules(&userid)); + + let unread: UInt = if let Some(last_read) = db + .rooms + .edus + .private_read_get(&pdu.room_id, &userid) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + { + (db.rooms + .pdus_since(&userid, &pdu.room_id, last_read) + .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .filter_map(|pdu| pdu.ok()) // Filter out buggy events + .filter(|(_, pdu)| { + matches!( + pdu.kind.clone(), + EventType::RoomMessage | EventType::RoomEncrypted + ) + }) + .count() as u32) + .into() + } else { + // Just return zero unread messages + uint!(0) + }; + + let permit = db.sending.maximum_requests.acquire().await; + + error!("sending pdu to {}: {:#?}", userid, pdu); + let _response = pusher::send_push_notice( + &userid, + unread, + &pusher, + rules_for_user, + &pdu, + db, + ) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); + + drop(permit); } - Ok(OutgoingKind::Push(id.clone())) + Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) } OutgoingKind::Normal(server) => { let pdu_jsons = pdu_ids @@ -540,30 +536,49 @@ impl Sending { } fn parse_servercurrentpdus(key: &IVec) -> Result<(OutgoingKind, IVec)> { - let mut parts = key.splitn(2, |&b| b == 0xff); - let server = parts.next().expect("splitn always returns one element"); - let pdu = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(&server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - // Appservices start with a plus - Ok::<_, Error>(if server.starts_with('+') { + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + ( OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), IVec::from(pdu), ) - } else if server.starts_with('$') { + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xff); + + let user = parts.next().expect("splitn always returns one element"); + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; ( - OutgoingKind::Push(server.as_bytes().to_vec()), + OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), IVec::from(pdu), ) } else { + let mut parts = key.splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let pdu = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(&server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + ( OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") diff --git a/src/server_server.rs b/src/server_server.rs index d43588a..82c5f82 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -21,9 +21,10 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, + identifiers::{KeyId, KeyName}, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, SigningKeyAlgorithm, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -600,7 +601,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - debug!("Working on incoming pdu: {:?}", value); + info!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -639,7 +640,7 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.add_pdu_outlier(&pdu)?; - debug!("Added pdu as outlier."); + info!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -914,7 +915,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - debug!("Appended incoming pdu."); + info!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -961,21 +962,31 @@ fn validate_event<'a>( auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, (Arc, Option>)> { Box::pin(async move { - for signature_server in match value + for (signature_server, signature) in match value .get("signatures") .ok_or_else(|| "No signatures in server response pdu.".to_string())? { CanonicalJsonValue::Object(map) => map, _ => return Err("Invalid signatures object in server response pdu.".to_string()), - } - .keys() - { + } { + let signature_object = match signature { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err( + "Invalid signatures content object in server response pdu.".to_string() + ) + } + }; + + let signature_ids = signature_object.keys().collect::>(); + debug!("Fetching signing keys for {}", signature_server); let keys = match fetch_signing_keys( &db, &Box::::try_from(&**signature_server).map_err(|_| { "Invalid servername in signatures of server response pdu.".to_string() })?, + signature_ids, ) .await { @@ -987,26 +998,29 @@ fn validate_event<'a>( } }; - pub_key_map.insert(signature_server.clone(), keys); + pub_key_map.insert(dbg!(signature_server.clone()), dbg!(keys)); } - let mut val = - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version5) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value + let mut val = match ruma::signatures::verify_event( + dbg!(&pub_key_map), + &value, + &RoomVersionId::Version5, + ) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), } + } else { + value } - Err(_e) => { - error!("{}", _e); - return Err("Signature verification failed".to_string()); - } - }; + } + Err(_e) => { + error!("{}", _e); + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -1116,7 +1130,7 @@ pub(crate) async fn fetch_events( Arc::new(pdu) } None => { - debug!("Fetching event over federation"); + debug!("Fetching event over federation: {:?}", id); match db .sending .send_federation_request( @@ -1159,78 +1173,93 @@ pub(crate) async fn fetch_events( pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, + signature_ids: Vec<&String>, ) -> Result> { - let mut result = BTreeMap::new(); + let contains_all_ids = |keys: &BTreeMap| { + signature_ids + .iter() + .all(|&id| dbg!(dbg!(&keys).contains_key(dbg!(id)))) + }; - match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => Ok(keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect()), - _ => { - match db - .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) - .await - { - Ok(keys) => { - db.globals.add_signing_key(origin, &keys.server_key)?; + let mut result = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); - result.extend( - keys.server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - keys.server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - return Ok(result); - } - _ => { - for server in db.globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - &server, - get_remote_server_keys::v2::Request::new( - origin, - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ), - ) - .await - { - debug!("Got signing keys: {:?}", keys); - for k in keys.server_keys.into_iter() { - db.globals.add_signing_key(origin, &k)?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - return Ok(result); - } - } - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) - } + if contains_all_ids(&result) { + return Ok(result); + } + + if let Ok(get_keys_response) = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + { + db.globals + .add_signing_key(origin, &get_keys_response.server_key)?; + + result.extend( + get_keys_response + .server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + get_keys_response + .server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in db.globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + &server, + get_remote_server_keys::v2::Request::new( + origin, + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ), + ) + .await + { + debug!("Got signing keys: {:?}", keys); + for k in keys.server_keys.into_iter() { + db.globals.add_signing_key(origin, &k)?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); } } } + + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) } /// Gather all state snapshots needed to resolve the current state of the room. @@ -1244,7 +1273,7 @@ pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, ) -> Result> { - let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut current_leaves = dbg!(db.rooms.get_pdu_leaves(pdu.room_id())?); let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity @@ -1290,7 +1319,6 @@ pub(crate) async fn calculate_forward_extremities( /// /// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). -#[tracing::instrument(skip(db))] pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, @@ -1316,7 +1344,7 @@ pub(crate) async fn build_forward_extremity_snapshots( Some(leave_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(&leave_pdu.event_id)? + .pdu_shortstatehash(dbg!(&leave_pdu.event_id))? .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { @@ -1367,7 +1395,9 @@ pub(crate) fn update_resolved_state( new_state.insert( ( ev_type, - state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + state_k.ok_or_else(|| { + Error::Conflict("update_resolved_state: State contained non state event") + })?, ), pdu.event_id.clone(), ); @@ -1395,9 +1425,9 @@ pub(crate) fn append_incoming_pdu( new_state.insert( ( ev_type.clone(), - state_k - .clone() - .ok_or_else(|| Error::Conflict("State contained non state event"))?, + state_k.clone().ok_or_else(|| { + Error::Conflict("append_incoming_pdu: State contained non state event") + })?, ), state_pdu.event_id.clone(), ); From 3ea7d162dba75de25a598be8b3762f4b5e8a61ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 12:59:27 +0100 Subject: [PATCH 074/103] fix: various improvements and fixes --- Cargo.lock | 18 ++++++++++++ Cargo.toml | 9 +++--- src/client_server/context.rs | 17 ++++++++---- src/database.rs | 12 ++++---- src/database/pusher.rs | 2 +- src/database/rooms.rs | 2 +- src/database/sending.rs | 9 +----- src/ruma_wrapper.rs | 1 + src/server_server.rs | 54 +++++++++++++++--------------------- 9 files changed, 67 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d5010da..adcc27b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1623,6 +1623,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "js_int", @@ -1642,6 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "http", "percent-encoding", @@ -1656,6 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1666,6 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1679,6 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "assign", "http", @@ -1697,6 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "maplit", @@ -1709,6 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-common", @@ -1722,6 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1732,6 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1746,6 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "paste", "rand", @@ -1759,6 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro2", "quote", @@ -1769,10 +1780,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" [[package]] name = "ruma-identity-service-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "ruma-api", "ruma-common", @@ -1785,6 +1798,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "js_int", "ruma-api", @@ -1799,6 +1813,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "form_urlencoded", "itoa", @@ -1811,6 +1826,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1821,6 +1837,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" dependencies = [ "base64 0.13.0", "ring", @@ -2088,6 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" +source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 1476200..33f1d1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,15 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/context.rs b/src/client_server/context.rs index cb9aaf9..1fee2f2 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -24,20 +24,25 @@ pub async fn get_context_route( )); } + let base_pdu_id = db + .rooms + .get_pdu_id(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event id not found.", + ))?; + + let base_token = db.rooms.pdu_count(&base_pdu_id)?; + let base_event = db .rooms - .get_pdu(&body.event_id)? + .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event not found.", ))? .to_room_event(); - let base_token = db - .rooms - .get_pdu_count(&body.event_id)? - .expect("event still exists"); - let events_before = db .rooms .pdus_until(&sender_user, &body.room_id, base_token) diff --git a/src/database.rs b/src/database.rs index 138efbe..47cee0d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -120,7 +120,7 @@ impl Database { token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, userid_lastonetimekeyupdate: db.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keychangeid_userid: db.open_tree("keychangeid_userid")?, keyid_key: db.open_tree("keyid_key")?, userid_masterkeyid: db.open_tree("userid_masterkeyid")?, userid_selfsigningkeyid: db.open_tree("userid_selfsigningkeyid")?, @@ -135,7 +135,7 @@ impl Database { readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?, roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt roomuserid_lastprivatereadupdate: db - .open_tree("roomid_lastprivatereadupdate")?, + .open_tree("roomuserid_lastprivatereadupdate")?, typingid_userid: db.open_tree("typingid_userid")?, roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?, presenceid_presence: db.open_tree("presenceid_presence")?, @@ -146,7 +146,7 @@ impl Database { roomid_pduleaves: db.open_tree("roomid_pduleaves")?, alias_roomid: db.open_tree("alias_roomid")?, - aliasid_alias: db.open_tree("alias_roomid")?, + aliasid_alias: db.open_tree("aliasid_alias")?, publicroomids: db.open_tree("publicroomids")?, tokenids: db.open_tree("tokenids")?, @@ -163,11 +163,11 @@ impl Database { stateid_shorteventid: db.open_tree("stateid_shorteventid")?, eventid_shorteventid: db.open_tree("eventid_shorteventid")?, shorteventid_eventid: db.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: db.open_tree("eventid_shortstatehash")?, + shorteventid_shortstatehash: db.open_tree("shorteventid_shortstatehash")?, roomid_shortstatehash: db.open_tree("roomid_shortstatehash")?, statehash_shortstatehash: db.open_tree("statehash_shortstatehash")?, - eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { @@ -179,7 +179,7 @@ impl Database { key_backups: key_backups::KeyBackups { backupid_algorithm: db.open_tree("backupid_algorithm")?, backupid_etag: db.open_tree("backupid_etag")?, - backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?, + backupkeyid_backup: db.open_tree("backupkeyid_backup")?, }, transaction_ids: transaction_ids::TransactionIds { userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index b0b9e1e..f4b35f2 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -469,7 +469,7 @@ async fn send_notice( name: &str, ) -> Result<()> { // TODO: email - if pusher.kind == Some(PusherKind::Http) { + if pusher.kind == Some(PusherKind::Email) { return Ok(()); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2e2d486..2144340 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1524,7 +1524,7 @@ impl Rooms { let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(aliasid, &*alias.alias())?; + self.aliasid_alias.insert(aliasid, &*alias.as_bytes())?; } else { // room_id=None means remove alias let room_id = self diff --git a/src/database/sending.rs b/src/database/sending.rs index 9b74ed7..a9204c5 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -108,7 +108,6 @@ impl Sending { let mut subscriber = servernamepduids.watch_prefix(b""); loop { - println!("."); select! { Some(response) = futures.next() => { match response { @@ -262,8 +261,6 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - dbg!("there is a future"); - futures.push( Self::handle_event( outgoing_kind, @@ -292,7 +289,6 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { - dbg!(&server); let mut key = server.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu_id); @@ -350,7 +346,6 @@ impl Sending { .collect::>(); let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = appservice_server::send_request( &db.globals, db.appservice @@ -458,7 +453,6 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdu to {}: {:#?}", userid, pdu); let _response = pusher::send_push_notice( &userid, unread, @@ -506,7 +500,6 @@ impl Sending { let permit = db.sending.maximum_requests.acquire().await; - error!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &db.globals, &*server, @@ -523,7 +516,7 @@ impl Sending { ) .await .map(|response| { - error!("server response: {:?}", response); + info!("server response: {:?}", response); kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8c72529..9787e2d 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -219,6 +219,7 @@ where "Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization", ); + response.raw_header("Access-Control-Max-Age", "86400"); response.ok() } Err(_) => Err(Status::InternalServerError), diff --git a/src/server_server.rs b/src/server_server.rs index 82c5f82..3c364db 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -863,8 +863,6 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - debug!("auth events: {:?}", auth_cache); - let res = match state_res::StateResolution::resolve( pdu.room_id(), &RoomVersionId::Version6, @@ -952,7 +950,7 @@ type AsyncRecursiveResult<'a, T> = Pin( db: &'a Database, value: CanonicalJsonObject, @@ -998,29 +996,26 @@ fn validate_event<'a>( } }; - pub_key_map.insert(dbg!(signature_server.clone()), dbg!(keys)); + pub_key_map.insert(signature_server.clone(), keys); } - let mut val = match ruma::signatures::verify_event( - dbg!(&pub_key_map), - &value, - &RoomVersionId::Version5, - ) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), + let mut val = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => { - error!("{}", _e); - return Err("Signature verification failed".to_string()); - } - }; + Err(e) => { + error!("{:?}: {}", value, e); + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -1085,7 +1080,7 @@ fn validate_event<'a>( }) } -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, key_map, auth_cache))] async fn fetch_check_auth_events( db: &Database, origin: &ServerName, @@ -1108,7 +1103,7 @@ async fn fetch_check_auth_events( /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. -#[tracing::instrument(skip(db))] +//#[tracing::instrument(skip(db, key_map, auth_cache))] pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, @@ -1175,11 +1170,8 @@ pub(crate) async fn fetch_signing_keys( origin: &ServerName, signature_ids: Vec<&String>, ) -> Result> { - let contains_all_ids = |keys: &BTreeMap| { - signature_ids - .iter() - .all(|&id| dbg!(dbg!(&keys).contains_key(dbg!(id)))) - }; + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|&id| keys.contains_key(id)); let mut result = db .globals @@ -1273,7 +1265,7 @@ pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, ) -> Result> { - let mut current_leaves = dbg!(db.rooms.get_pdu_leaves(pdu.room_id())?); + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity @@ -1344,7 +1336,7 @@ pub(crate) async fn build_forward_extremity_snapshots( Some(leave_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(dbg!(&leave_pdu.event_id))? + .pdu_shortstatehash(&leave_pdu.event_id)? .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { From 46d8f36a2ca6181b8cc8643dee3a19fc32877681 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 19:46:54 +0100 Subject: [PATCH 075/103] fix: media thumbnail calculation and appservice detection --- src/client_server/membership.rs | 8 +++++--- src/database/media.rs | 9 +++++---- src/database/rooms.rs | 5 ++++- src/pdu.rs | 10 +++++++--- src/server_server.rs | 10 ++++++++-- 5 files changed, 29 insertions(+), 13 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d571eaa..71be6ac 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{info, warn}; +use log::{error, info, warn}; use ruma::{ api::{ client::{ @@ -544,8 +544,10 @@ async fn join_room_by_id_helper( .await?; let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str(pdu.json().get()) - .expect("converting raw jsons to values always works"); + let mut value = serde_json::from_str(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = EventId::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) diff --git a/src/database/media.rs b/src/database/media.rs index 448d071..f958dc8 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -226,16 +226,17 @@ impl Media { } let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::Triangle) + image.resize_to_fill(width, height, FilterType::CatmullRom) } else { let (exact_width, exact_height) = { // Copied from image::dynimage::resize_dimensions let ratio = u64::from(original_width) * u64::from(height); let nratio = u64::from(width) * u64::from(original_height); - let use_width = nratio > ratio; + let use_width = nratio <= ratio; let intermediate = if use_width { - u64::from(original_height) * u64::from(width) / u64::from(width) + u64::from(original_height) * u64::from(width) + / u64::from(original_width) } else { u64::from(original_width) * u64::from(height) / u64::from(original_height) @@ -261,7 +262,7 @@ impl Media { } }; - image.thumbnail_exact(exact_width, exact_height) + image.thumbnail_exact(dbg!(exact_width), dbg!(exact_height)) }; let mut thumbnail_bytes = Vec::new(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2144340..3bf72d0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1193,6 +1193,9 @@ impl Rooms { .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) + || db.rooms.room_members(&room_id).any(|userid| { + userid.map_or(false, |userid| users.is_match(userid.as_str())) + }) }; let matching_aliases = |aliases: &Regex| { self.room_aliases(&room_id) @@ -1201,9 +1204,9 @@ impl Rooms { }; if bridge_user_id.map_or(false, user_is_joined) - || users.iter().any(matching_users) || aliases.iter().any(matching_aliases) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || users.iter().any(matching_users) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } diff --git a/src/pdu.rs b/src/pdu.rs index 6085581..009fde6 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,4 +1,5 @@ use crate::Error; +use log::error; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, @@ -322,8 +323,11 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, -) -> (EventId, CanonicalJsonObject) { - let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); +) -> crate::Result<(EventId, CanonicalJsonObject)> { + let value = serde_json::from_str(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = EventId::try_from(&*format!( "${}", @@ -332,7 +336,7 @@ pub(crate) fn gen_event_id_canonical_json( )) .expect("ruma's reference hashes are valid event ids"); - (event_id, value) + Ok((event_id, value)) } /// Build the start of a PDU in order to add it to the `Database`. diff --git a/src/server_server.rs b/src/server_server.rs index 3c364db..fa5706d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -556,7 +556,13 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return None; + } + }; // If we have no idea about this room skip the PDU let room_id = match value @@ -1138,7 +1144,7 @@ pub(crate) async fn fetch_events( Ok(res) => { debug!("Got event over federation: {:?}", res); let (event_id, value) = - crate::pdu::gen_event_id_canonical_json(&res.pdu); + crate::pdu::gen_event_id_canonical_json(&res.pdu)?; let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) .await From 1d00a8c41f92c0df5dac40299bf53134bcfa31b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 22:01:14 +0100 Subject: [PATCH 076/103] improvement: better logging Use CONDUIT_LOG or the log setting in the config --- conduit-example.toml | 1 + src/client_server/thirdparty.rs | 2 +- src/database.rs | 6 ++++++ src/main.rs | 3 ++- src/server_server.rs | 13 ++++++++----- 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 3aca538..fea84bd 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -35,6 +35,7 @@ max_request_size = 20_000_000 # in bytes #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time +#log = "info,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 3c07699..fe5b784 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -12,7 +12,7 @@ use std::collections::BTreeMap; )] #[tracing::instrument] pub async fn get_protocols_route() -> ConduitResult { - warn!("TODO: get_protocols_route"); + // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), } diff --git a/src/database.rs b/src/database.rs index 47cee0d..b14a912 100644 --- a/src/database.rs +++ b/src/database.rs @@ -46,6 +46,8 @@ pub struct Config { jwt_secret: Option, #[serde(default = "Vec::new")] trusted_servers: Vec>, + #[serde(default = "default_log")] + pub log: String, } fn false_fn() -> bool { @@ -68,6 +70,10 @@ fn default_max_concurrent_requests() -> u16 { 4 } +fn default_log() -> String { + "info,rocket=off,_=off,sled=off".to_owned() +} + #[derive(Clone)] pub struct Database { pub globals: globals::Globals, diff --git a/src/main.rs b/src/main.rs index 2ec3a42..327aefa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -205,7 +205,8 @@ async fn main() { rocket.launch().await.unwrap(); } else { - pretty_env_logger::init(); + std::env::set_var("CONDUIT_LOG", config.log); + pretty_env_logger::init_custom_env("CONDUIT_LOG"); let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); diff --git a/src/server_server.rs b/src/server_server.rs index fa5706d..4912878 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1339,11 +1339,14 @@ pub(crate) async fn build_forward_extremity_snapshots( // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(leave_pdu) => { + Some(leaf_pdu) => { let pdu_shortstatehash = db .rooms - .pdu_shortstatehash(&leave_pdu.event_id)? - .ok_or_else(|| Error::bad_database("Found pdu with no statehash in db."))?; + .pdu_shortstatehash(&leaf_pdu.event_id)? + .ok_or_else(|| { + warn!("Leaf pdu: {:?}", leaf_pdu); + Error::bad_database("Found pdu with no statehash in db.") + })?; if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { includes_current_state = true; @@ -1357,8 +1360,8 @@ pub(crate) async fn build_forward_extremity_snapshots( .collect::>(); // Now it's the state after - let key = (leave_pdu.kind.clone(), leave_pdu.state_key.clone()); - state_before.insert(key, Arc::new(leave_pdu)); + let key = (leaf_pdu.kind.clone(), leaf_pdu.state_key.clone()); + state_before.insert(key, Arc::new(leaf_pdu)); fork_states.insert(state_before); } From e305889b7250a97a7e83c96f98d4e65a570be35b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Mar 2021 08:48:28 +0100 Subject: [PATCH 077/103] feat: room_account_data endpoints --- src/client_server/config.rs | 54 ++++++++++++++++++++++++++++++++++++- src/main.rs | 2 ++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index a53b7cd..6abcba2 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -3,7 +3,7 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::config::{get_global_account_data, set_global_account_data}, + r0::config::{get_room_account_data, get_global_account_data, set_room_account_data, set_global_account_data}, }, events::{custom::CustomEventContent, BasicEvent}, serde::Raw, @@ -43,6 +43,37 @@ pub async fn set_global_account_data_route( Ok(set_global_account_data::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn set_room_account_data_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data = serde_json::from_str(body.data.get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + + db.account_data.update( + Some(&body.room_id), + sender_user, + event_type.clone().into(), + &BasicEvent { + content: CustomEventContent { event_type, data }, + }, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_room_account_data::Response.into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") @@ -63,3 +94,24 @@ pub async fn get_global_account_data_route( Ok(get_global_account_data::Response { account_data: data }.into()) } + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_room_account_data_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data = db + .account_data + .get::>(Some(&body.room_id), sender_user, body.event_type.clone().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + db.flush().await?; + + Ok(get_room_account_data::Response { account_data: data }.into()) +} diff --git a/src/main.rs b/src/main.rs index 327aefa..696ce5c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -77,7 +77,9 @@ fn setup_rocket() -> (rocket::Rocket, Config) { client_server::get_filter_route, client_server::create_filter_route, client_server::set_global_account_data_route, + client_server::set_room_account_data_route, client_server::get_global_account_data_route, + client_server::get_room_account_data_route, client_server::set_displayname_route, client_server::get_displayname_route, client_server::set_avatar_url_route, From e50f2864ded296d0143f10782f1b60cf41189514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 23 Mar 2021 22:31:19 +0100 Subject: [PATCH 078/103] improvement: save state for send_join pdu --- src/client_server/membership.rs | 8 ++++++++ src/database/rooms.rs | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 71be6ac..36bbced 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -650,6 +650,10 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&pdu)?; } + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), @@ -658,6 +662,10 @@ async fn join_room_by_id_helper( &[pdu.event_id.clone()], db, )?; + + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&room_id, statehashid)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3bf72d0..91f468f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1115,7 +1115,6 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu, &db.globals)?; - // remove the self.append_pdu( &pdu, pdu_json, From 16eed1d8c251748087dabcb42ab48b676c776224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 24 Mar 2021 11:52:10 +0100 Subject: [PATCH 079/103] chore: get rid of warnings --- src/client_server/backup.rs | 10 ++--- src/client_server/config.rs | 21 +++++++-- src/client_server/sync.rs | 11 +---- src/client_server/thirdparty.rs | 1 - src/database/appservice.rs | 4 +- src/database/media.rs | 2 +- src/database/rooms.rs | 16 +++---- src/database/sending.rs | 33 +++++++------- src/server_server.rs | 80 ++++++++++++++------------------- 9 files changed, 82 insertions(+), 96 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index f33d0de..12f3bfd 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -267,12 +267,10 @@ pub async fn get_backup_key_session_route( let key_data = db .key_backups .get_session(&sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::NotFound, - "Backup key not found for this user's session.", - ) - })?; + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Backup key not found for this user's session.", + ))?; Ok(get_backup_key_session::Response { key_data }.into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 6abcba2..68cd2e0 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -3,7 +3,10 @@ use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::config::{get_room_account_data, get_global_account_data, set_room_account_data, set_global_account_data}, + r0::config::{ + get_global_account_data, get_room_account_data, set_global_account_data, + set_room_account_data, + }, }, events::{custom::CustomEventContent, BasicEvent}, serde::Raw, @@ -45,7 +48,10 @@ pub async fn set_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") + put( + "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", + data = "" + ) )] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( @@ -97,7 +103,10 @@ pub async fn get_global_account_data_route( #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", data = "") + get( + "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", + data = "" + ) )] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( @@ -108,7 +117,11 @@ pub async fn get_room_account_data_route( let data = db .account_data - .get::>(Some(&body.room_id), sender_user, body.event_type.clone().into())? + .get::>( + Some(&body.room_id), + sender_user, + body.event_type.clone().into(), + )? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; db.flush().await?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index da2ddf4..d38699c 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -136,9 +136,7 @@ pub async fn sync_events_route( .map(|since_shortstatehash| { Ok::<_, Error>( since_shortstatehash - .map(|since_shortstatehash| { - db.rooms.state_full(&room_id, since_shortstatehash) - }) + .map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) .transpose()?, ) }) @@ -512,12 +510,7 @@ pub async fn sync_events_route( }) .and_then(|shortstatehash| { db.rooms - .state_get( - &room_id, - shortstatehash, - &EventType::RoomMember, - sender_user.as_str(), - ) + .state_get(shortstatehash, &EventType::RoomMember, sender_user.as_str()) .ok()? .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) .ok() diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index fe5b784..5d3c540 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,7 +1,6 @@ use crate::ConduitResult; use ruma::api::client::r0::thirdparty::get_protocols; -use log::warn; #[cfg(feature = "conduit_bin")] use rocket::get; use std::collections::BTreeMap; diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 764291d..222eb18 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -55,9 +55,7 @@ impl Appservice { }) } - pub fn iter_all<'a>( - &'a self, - ) -> impl Iterator> + 'a { + pub fn iter_all(&self) -> impl Iterator> + '_ { self.iter_ids().filter_map(|id| id.ok()).map(move |id| { Ok(( id.clone(), diff --git a/src/database/media.rs b/src/database/media.rs index f958dc8..37fcb74 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -262,7 +262,7 @@ impl Media { } }; - image.thumbnail_exact(dbg!(exact_width), dbg!(exact_height)) + image.thumbnail_exact(exact_width, exact_height) }; let mut thumbnail_bytes = Vec::new(); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 91f468f..175d4ac 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -108,7 +108,6 @@ impl Rooms { pub fn state_full( &self, - room_id: &RoomId, shortstatehash: u64, ) -> Result> { Ok(self @@ -151,7 +150,6 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn state_get( &self, - room_id: &RoomId, shortstatehash: u64, event_type: &EventType, state_key: &str, @@ -257,11 +255,11 @@ impl Rooms { /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> Result { + fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { // We only hash the pdu's event ids, not the whole pdu let bytes = bytes_list.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); - Ok(hash.as_ref().into()) + hash.as_ref().into() } /// Checks if a room exists. @@ -291,7 +289,7 @@ impl Rooms { .values() .map(|event_id| event_id.as_bytes()) .collect::>(), - )?; + ); let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { Some(shortstatehash) => { @@ -353,7 +351,7 @@ impl Rooms { room_id: &RoomId, ) -> Result> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(&room_id, current_shortstatehash) + self.state_full(current_shortstatehash) } else { Ok(BTreeMap::new()) } @@ -368,7 +366,7 @@ impl Rooms { state_key: &str, ) -> Result> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(&room_id, current_shortstatehash, event_type, state_key) + self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) } @@ -582,7 +580,7 @@ impl Rooms { { if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(&pdu.room_id, shortstatehash, &pdu.kind, &state_key) + .state_get(shortstatehash, &pdu.kind, &state_key) .unwrap() { unsigned.insert( @@ -849,7 +847,7 @@ impl Rooms { .values() .map(|event_id| &**event_id) .collect::>(), - )?; + ); let shortstatehash = match self.statehash_shortstatehash.get(&new_state_hash)? { Some(shortstatehash) => { diff --git a/src/database/sending.rs b/src/database/sending.rs index a9204c5..1cc2f91 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -10,11 +10,11 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{error, info, warn}; +use log::{info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ - api::{appservice, client::r0::push::Pusher, federation, OutgoingRequest}, + api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, uint, ServerName, UInt, UserId, }; @@ -264,7 +264,7 @@ impl Sending { futures.push( Self::handle_event( outgoing_kind, - vec![pdu_id.into()], + vec![pdu_id], &db, ) ); @@ -395,18 +395,19 @@ impl Sending { continue; } - let userid = UserId::try_from(utils::string_from_bytes(user).map_err(|e| { - ( - OutgoingKind::Push(user.clone(), pushkey.clone()), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|e| { - ( - OutgoingKind::Push(user.clone(), pushkey.clone()), - Error::bad_database("Invalid push user id in db."), - ) - })?; + let userid = + UserId::try_from(utils::string_from_bytes(user).map_err(|_| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user string in db."), + ) + })?) + .map_err(|_| { + ( + OutgoingKind::Push(user.clone(), pushkey.clone()), + Error::bad_database("Invalid push user id in db."), + ) + })?; let mut senderkey = user.clone(); senderkey.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 4912878..8babc89 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -21,10 +21,9 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, - identifiers::{KeyId, KeyName}, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, SigningKeyAlgorithm, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -788,23 +787,17 @@ pub async fn send_transaction_message_route<'a>( // This will create the state after any state snapshot it builds // So current_state will have the incoming event inserted to it - let mut fork_states = match build_forward_extremity_snapshots( - &db, - pdu.clone(), - server_name, - current_state, - &extremities, - &pub_key_map, - &mut auth_cache, - ) - .await - { - Ok(states) => states, - Err(_) => { - resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); - continue; - } - }; + let mut fork_states = + match build_forward_extremity_snapshots(&db, pdu.clone(), current_state, &extremities) + .await + { + Ok(states) => states, + Err(_) => { + resolved_map + .insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; // Make this the state after. let mut state_after = state_at_event.clone(); @@ -1320,11 +1313,8 @@ pub(crate) async fn calculate_forward_extremities( pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, - origin: &ServerName, mut current_state: StateMap>, current_leaves: &[EventId], - pub_key_map: &PublicKeyMap, - auth_cache: &mut EventMap>, ) -> Result>>> { let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; @@ -1354,7 +1344,7 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut state_before = db .rooms - .state_full(pdu.room_id(), pdu_shortstatehash)? + .state_full(pdu_shortstatehash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); @@ -1396,9 +1386,9 @@ pub(crate) fn update_resolved_state( new_state.insert( ( ev_type, - state_k.ok_or_else(|| { - Error::Conflict("update_resolved_state: State contained non state event") - })?, + state_k.ok_or(Error::Conflict( + "update_resolved_state: State contained non state event", + ))?, ), pdu.event_id.clone(), ); @@ -1426,9 +1416,9 @@ pub(crate) fn append_incoming_pdu( new_state.insert( ( ev_type.clone(), - state_k.clone().ok_or_else(|| { - Error::Conflict("append_incoming_pdu: State contained non state event") - })?, + state_k.clone().ok_or(Error::Conflict( + "append_incoming_pdu: State contained non state event", + ))?, ), state_pdu.event_id.clone(), ); @@ -1600,26 +1590,22 @@ pub fn get_room_state_ids_route<'a>( let mut todo = BTreeSet::new(); todo.insert(body.event_id.clone()); - loop { - if let Some(event_id) = todo.iter().next().cloned() { - if let Some(pdu) = db.rooms.get_pdu(&event_id)? { - todo.extend( - pdu.auth_events - .clone() - .into_iter() - .collect::>() - .difference(&auth_chain_ids) - .cloned(), - ); - auth_chain_ids.extend(pdu.auth_events.into_iter()); - } else { - warn!("Could not find pdu mentioned in auth events."); - } - - todo.remove(&event_id); + while let Some(event_id) = todo.iter().next().cloned() { + if let Some(pdu) = db.rooms.get_pdu(&event_id)? { + todo.extend( + pdu.auth_events + .clone() + .into_iter() + .collect::>() + .difference(&auth_chain_ids) + .cloned(), + ); + auth_chain_ids.extend(pdu.auth_events.into_iter()); } else { - break; + warn!("Could not find pdu mentioned in auth events."); } + + todo.remove(&event_id); } Ok(get_room_state_ids::v1::Response { From c213769d9f8448b21ae414db62a68ea0044701cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 25 Mar 2021 23:55:40 +0100 Subject: [PATCH 080/103] improvement: refactor state res and fix a few bugs in the process --- Cargo.lock | 2 +- Cargo.toml | 4 +- src/client_server/membership.rs | 8 +- src/client_server/sync.rs | 6 +- src/database/pusher.rs | 31 +- src/database/rooms.rs | 302 +++++---- src/database/sending.rs | 7 +- src/server_server.rs | 1124 +++++++++++++------------------ 8 files changed, 651 insertions(+), 833 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adcc27b..9580942 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2105,7 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488#34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488" +source = "git+https://github.com/timokoesters/state-res?branch=improvements#1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 33f1d1e..453bc8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,10 +23,10 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-a #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -# state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", branch = "improvements", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -state-res = { git = "https://github.com/ruma/state-res", rev = "34cd1cb4dcdd5fb84b5df9e48e63b2e4669a2488", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 36bbced..6a64ea4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,11 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{ - collections::{BTreeMap, HashMap}, - convert::TryFrom, - sync::Arc, -}; +use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -573,7 +569,7 @@ async fn join_room_by_id_helper( let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; - let mut state = HashMap::new(); + let mut state = BTreeMap::new(); for pdu in send_join_response .room_state diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d38699c..bd7046d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,5 +1,6 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; +use log::error; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -505,7 +506,10 @@ pub async fn sync_events_route( db.rooms .pdu_shortstatehash(&pdu.1.event_id) .ok()? - .ok_or_else(|| Error::bad_database("Pdu in db doesn't have a state hash.")) + .ok_or_else(|| { + error!("{:?}", pdu.1); + Error::bad_database("Pdu in db doesn't have a state hash.") + }) .ok() }) .and_then(|shortstatehash| { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f4b35f2..8e9b24e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -215,7 +215,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -235,7 +235,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -266,8 +266,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -292,8 +291,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -309,7 +307,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -346,8 +344,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -372,8 +369,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()) - .await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -389,7 +385,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -405,7 +401,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -421,7 +417,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -435,7 +431,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -449,7 +445,7 @@ pub async fn send_push_notice( _ => None, }) .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db, rule.rule_id.as_str()).await?; + send_notice(unread, pusher, tweaks, pdu, db).await?; break; } } @@ -466,7 +462,6 @@ async fn send_notice( tweaks: Vec, event: &PduEvent, db: &Database, - name: &str, ) -> Result<()> { // TODO: email if pusher.kind == Some(PusherKind::Email) { @@ -514,7 +509,6 @@ async fn send_notice( } if event_id_only { - error!("SEND PUSH NOTICE `{}`", name); send_request( &db.globals, &url, @@ -542,7 +536,6 @@ async fn send_notice( .flatten(); notifi.room_name = room_name.as_deref(); - error!("SEND PUSH NOTICE Full `{}`", name); send_request( &db.globals, &url, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 175d4ac..e1e97b4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,27 +3,24 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::{error, warn}; +use log::{debug, error, warn}; use regex::Regex; use ring::digest; use ruma::{ api::client::error::ErrorKind, events::{ ignored_user_list, - room::{ - member, message, - power_levels::{self, PowerLevelsEventContent}, - }, + room::{create::CreateEventContent, member, message}, EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Event, StateMap}; +use state_res::{Event, StateMap}; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, mem, sync::Arc, @@ -227,26 +224,24 @@ impl Rooms { sender: &UserId, state_key: Option<&str>, content: serde_json::Value, - ) -> Result> { + ) -> Result>> { let auth_events = state_res::auth_types_for_event( kind, sender, state_key.map(|s| s.to_string()), - content, + content.clone(), ); let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some(pdu) = self.room_state_get( - room_id, - &event_type, - &state_key - .as_deref() - .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, - )? { - events.insert((event_type, state_key), pdu); + if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { + events.insert((event_type, state_key), Arc::new(pdu)); } else { - warn!("Could not find {} {:?} in state", event_type, state_key); + // This is okay because when creating a new room some events were not created yet + debug!( + "{:?}: Could not find {} {:?} in state", + content, event_type, state_key + ); } } Ok(events) @@ -281,7 +276,7 @@ impl Rooms { pub fn force_state( &self, room_id: &RoomId, - state: HashMap<(EventType, String), EventId>, + state: BTreeMap<(EventType, String), EventId>, globals: &super::globals::Globals, ) -> Result<()> { let state_hash = self.calculate_hash( @@ -293,8 +288,10 @@ impl Rooms { let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { Some(shortstatehash) => { - warn!("state hash already existed?!"); - shortstatehash.to_vec() + // State already existed in db + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &*shortstatehash)?; + return Ok(()); } None => { let shortstatehash = globals.next_count()?; @@ -483,14 +480,11 @@ impl Rooms { } /// Returns the leaf pdus of a room. - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - let mut events = Vec::new(); - - for event in self - .roomid_pduleaves + self.roomid_pduleaves .scan_prefix(prefix) .values() .map(|bytes| { @@ -501,11 +495,7 @@ impl Rooms { .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?, ) }) - { - events.push(event?); - } - - Ok(events) + .collect() } /// Replace the leaves of a room. @@ -761,6 +751,90 @@ impl Rooms { Ok(()) } + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + pub fn set_event_state( + &self, + event_id: &EventId, + state: &StateMap>, + globals: &super::globals::Globals, + ) -> Result<()> { + let shorteventid = match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + let state_hash = self.calculate_hash( + &state + .values() + .map(|pdu| pdu.event_id.as_bytes()) + .collect::>(), + ); + + let shortstatehash = match self.statehash_shortstatehash.get(&state_hash)? { + Some(shortstatehash) => { + // State already existed in db + self.shorteventid_shortstatehash + .insert(shorteventid, &*shortstatehash)?; + return Ok(()); + } + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(&state_hash, &shortstatehash.to_be_bytes())?; + shortstatehash.to_be_bytes().to_vec() + } + }; + + for ((event_type, state_key), pdu) in state { + let mut statekey = event_type.as_ref().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(&state_key.as_bytes()); + + let shortstatekey = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => shortstatekey.to_vec(), + None => { + let shortstatekey = globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + shortstatekey.to_be_bytes().to_vec() + } + }; + + let shorteventid = match self.eventid_shorteventid.get(pdu.event_id.as_bytes())? { + Some(shorteventid) => shorteventid.to_vec(), + None => { + let shorteventid = globals.next_count()?; + self.eventid_shorteventid + .insert(pdu.event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), pdu.event_id.as_bytes())?; + shorteventid.to_be_bytes().to_vec() + } + }; + + let mut state_id = shortstatehash.clone(); + state_id.extend_from_slice(&shortstatekey); + + self.stateid_shorteventid + .insert(&*state_id, &*shorteventid)?; + } + + self.shorteventid_shortstatehash + .insert(shorteventid, &*shortstatehash)?; + + Ok(()) + } + /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) @@ -900,8 +974,37 @@ impl Rooms { redacts, } = pdu_builder; // TODO: Make sure this isn't called twice in parallel - let mut prev_events = self.get_pdu_leaves(&room_id)?; - prev_events.truncate(20); + let prev_events = self + .get_pdu_leaves(&room_id)? + .into_iter() + .take(20) + .collect::>(); + + let create_event = self.room_state_get(&room_id, &EventType::RoomCreate, "")?; + + let create_event_content = create_event + .as_ref() + .map(|create_event| { + Ok::<_, Error>( + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?, + ) + }) + .transpose()?; + + let create_prev_event = if prev_events.len() == 1 + && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) + { + create_event.map(Arc::new) + } else { + None + }; + + let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { + create_event.room_version + }); let auth_events = self.get_auth_events( &room_id, @@ -911,118 +1014,6 @@ impl Rooms { content.clone(), )?; - // Is the event authorized? - if let Some(state_key) = &state_key { - let power_levels = self - .room_state_get(&room_id, &EventType::RoomPowerLevels, "")? - .map_or_else( - || { - Ok::<_, Error>(power_levels::PowerLevelsEventContent { - ban: 50.into(), - events: BTreeMap::new(), - events_default: 0.into(), - invite: 50.into(), - kick: 50.into(), - redact: 50.into(), - state_default: 0.into(), - users: BTreeMap::new(), - users_default: 0.into(), - notifications: - ruma::events::room::power_levels::NotificationPowerLevels { - room: 50.into(), - }, - }) - }, - |power_levels| { - Ok(serde_json::from_value::>( - power_levels.content, - ) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?) - }, - )?; - let sender_membership = self - .room_state_get(&room_id, &EventType::RoomMember, &sender.to_string())? - .map_or(Ok::<_, Error>(member::MembershipState::Leave), |pdu| { - Ok( - serde_json::from_value::>(pdu.content) - .expect("Raw::from_value always works.") - .deserialize() - .map_err(|_| Error::bad_database("Invalid Member event in db."))? - .membership, - ) - })?; - - let sender_power = power_levels.users.get(&sender).map_or_else( - || { - if sender_membership != member::MembershipState::Join { - None - } else { - Some(&power_levels.users_default) - } - }, - // If it's okay, wrap with Some(_) - Some, - ); - - // Is the event allowed? - #[allow(clippy::blocks_in_if_conditions)] - if !match event_type { - EventType::RoomEncryption => { - // Only allow encryption events if it's allowed in the config - db.globals.allow_encryption() - } - EventType::RoomMember => { - let prev_event = self - .get_pdu(prev_events.get(0).ok_or(Error::BadRequest( - ErrorKind::Unknown, - "Membership can't be the first event", - ))?)? - .map(Arc::new); - event_auth::valid_membership_change( - Some(state_key.as_str()), - &sender, - content.clone(), - prev_event, - None, // TODO: third party invite - &auth_events - .iter() - .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) - }) - .collect::>>()?, - ) - .map_err(|e| { - log::error!("{}", e); - Error::Conflict("Found incoming PDU with invalid data.") - })? - } - EventType::RoomCreate => prev_events.is_empty(), - // Not allow any of the following events if the sender is not joined. - _ if sender_membership != member::MembershipState::Join => false, - _ => { - // TODO - sender_power.unwrap_or(&power_levels.users_default) - >= &power_levels.state_default - } - } { - error!("Unauthorized {}", event_type); - // Not authorized - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized", - )); - } - } else if !self.is_joined(&sender, &room_id)? { - // TODO: auth rules apply to all events, not only those with a state key - error!("Unauthorized {}", event_type); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized", - )); - } - // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() @@ -1057,8 +1048,8 @@ impl Rooms { .try_into() .map_err(|_| Error::bad_database("Depth is invalid"))?, auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) .collect(), redacts, unsigned, @@ -1068,6 +1059,23 @@ impl Rooms { signatures: BTreeMap::new(), }; + if !state_res::auth_check( + &room_version, + &Arc::new(pdu.clone()), + create_prev_event, + &auth_events, + None, // TODO: third_party_invite + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })? { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event is not authorized.", + )); + } + // Hash and sign let mut pdu_json = utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); diff --git a/src/database/sending.rs b/src/database/sending.rs index 1cc2f91..b792479 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -175,8 +175,7 @@ impl Sending { // servercurrentpdus with the prefix should be empty now } } - Err((outgoing_kind, e)) => { - info!("Couldn't send transaction to {:?}\n{}", outgoing_kind, e); + Err((outgoing_kind, _)) => { let mut prefix = match &outgoing_kind { OutgoingKind::Appservice(serv) => { let mut p = b"+".to_vec(); @@ -217,7 +216,7 @@ impl Sending { let exponential_backoff = |(tries, instant): &(u32, Instant)| { // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60*60*24) { min_elapsed_duration = Duration::from_secs(60*60*24); } @@ -261,6 +260,8 @@ impl Sending { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); + last_failed_try.remove(&outgoing_kind); + futures.push( Self::handle_event( outgoing_kind, diff --git a/src/server_server.rs b/src/server_server.rs index 8babc89..e461b5a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -22,12 +22,12 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, events::EventType, serde::to_canonical_value, - signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, + signatures::CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -180,13 +180,7 @@ where .collect::>(); if status != 200 { - info!( - "Server returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) - ); + info!("{} {}:\n{}", url, status, String::from_utf8_lossy(&body),); } let response = T::IncomingResponse::try_from( @@ -194,13 +188,7 @@ where .body(body) .expect("reqwest body is valid http body"), ); - response.map_err(|_| { - info!( - "Server returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Server returned bad response.") - }) + response.map_err(|_| Error::BadServerResponse("Server returned bad response.")) } Err(e) => Err(e.into()), } @@ -508,8 +496,6 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - info!("Incoming PDUs: {:?}", &body.pdus); - for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -548,385 +534,19 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); - let pdus_to_resolve = body - .pdus - .iter() - .filter_map(|pdu| { - // 1. Is a valid event, otherwise it is dropped. - // Ruma/PduEvent/StateEvent satisfies this - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return None; - } - }; - - // If we have no idea about this room skip the PDU - let room_id = match value - .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() - { - Some(id) => id, - None => { - resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); - return None; - } - }; - - // 1. check the server is in the room (optional) - match db.rooms.exists(&room_id) { - Ok(true) => {} - _ => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".to_string())); - return None; - } - } - - // If we know of this pdu we don't need to continue processing it - if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { - return None; - } - - Some((event_id, room_id, value)) - }) - .collect::>(); - - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? - // SPEC: - // Servers MUST strictly enforce the JSON format specified in the appendices. - // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of - // events over federation. For example, the Federation API's /send endpoint would - // discard the event whereas the Client Server API's /send/{eventType} endpoint - // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { - info!("Working on incoming pdu: {:?}", value); - let server_name = &body.body.origin; - let mut pub_key_map = BTreeMap::new(); - - // TODO: make this persist but not a DB Tree... - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) - // like structure so that once an auth event is known it would know (using indexes maybe) all of - // the auth events that it references. - let mut auth_cache = EventMap::new(); - - // 2. check content hash, redact if doesn't match - // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 7. if not timeline event: stop - // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous_create): (Arc, Option>) = match validate_event( - &db, - value, - event_id.clone(), - &mut pub_key_map, - server_name, - // All the auth events gathered will be here - &mut auth_cache, - ) - .await - { - Ok(pdu) => pdu, - Err(e) => { - resolved_map.insert(event_id, Err(e)); - continue; - } - }; - debug!("Validated event."); - - // 6. persist the event as an outlier. - db.rooms.add_pdu_outlier(&pdu)?; - info!("Added pdu as outlier."); - - // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all - // the checks in this list starting at 1. These are not timeline events. - // - // Step 10. check the auth of the event passes based on the calculated state of the event - // - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event."); - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, - server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - debug!("Fetching state events at event."); - let state = match fetch_events( - &db, - server_name, - &mut pub_key_map, - &res.pdu_ids, - &mut auth_cache, - ) - .await - { - Ok(state) => state, - Err(_) => continue, - }; - - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - error!("Server sent us an invalid state"); - continue; - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - let incoming_auth_events = match fetch_events( - &db, - server_name, - &mut pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await - { - Ok(state) => state, - Err(_) => continue, - }; - - debug!("Fetching auth events of state events at event."); - (state, incoming_auth_events) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; - - // 10. This is the actual auth check for state at the event - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous_create.clone(), - &state_at_event, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Event failed auth with state_at - resolved_map.insert( - event_id, - Err("Event has failed auth check with state at the event".into()), - ); - continue; - } - debug!("Auth check succeeded."); - // End of step 10. - - // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it - let current_state = db - .rooms - .room_state_full(pdu.room_id())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous_create, - ¤t_state, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - continue; - }; - debug!("Auth check with current state succeeded."); - - // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res - // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) - // - // calculate_forward_extremities takes care of adding the current state if not already in the state sets - // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let extremities = match calculate_forward_extremities(&db, &pdu).await { - Ok(fork_ids) => { - debug!("Calculated new forward extremities: {:?}", fork_ids); - fork_ids - } + for pdu in body.pdus.iter() { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + Ok(t) => t, Err(_) => { - resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + // Event could not be converted to canonical json continue; } }; - // This will create the state after any state snapshot it builds - // So current_state will have the incoming event inserted to it - let mut fork_states = - match build_forward_extremity_snapshots(&db, pdu.clone(), current_state, &extremities) - .await - { - Ok(states) => states, - Err(_) => { - resolved_map - .insert(event_id, Err("Failed to gather forward extremities".into())); - continue; - } - }; - - // Make this the state after. - let mut state_after = state_at_event.clone(); - state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); - // Add the incoming event to the mix of state snapshots - // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets - fork_states.insert(state_after.clone()); - - let fork_states = fork_states.into_iter().collect::>(); - - let mut update_state = false; - // 13. start state-res with all previous forward extremities minus the ones that are in - // the prev_events of this event plus the new one created by this event and use - // the result as the new room state - let state_at_forks = if fork_states.is_empty() { - // State is empty - Default::default() - } else if fork_states.len() == 1 { - fork_states[0].clone() - } else { - // We do need to force an update to this rooms state - update_state = true; - - let mut auth_events = vec![]; - for map in &fork_states { - let mut state_auth = vec![]; - for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_events( - &db, - server_name, - &mut pub_key_map, - &[auth_id.clone()], - &mut auth_cache, - ) - .await - { - // This should always contain exactly one element when Ok - Ok(events) => state_auth.push(events[0].clone()), - Err(e) => { - debug!("Event was not present: {}", e); - } - } - } - auth_events.push(state_auth); - } - - // Add everything we will need to event_map - auth_cache.extend( - auth_events - .iter() - .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) - .flatten(), - ); - auth_cache.extend( - incoming_auth_events - .into_iter() - .map(|pdu| (pdu.event_id().clone(), pdu)), - ); - auth_cache.extend( - state_after - .into_iter() - .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), - ); - - let res = match state_res::StateResolution::resolve( - pdu.room_id(), - &RoomVersionId::Version6, - &fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - auth_events - .into_iter() - .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) - .collect(), - &mut auth_cache, - ) { - Ok(res) => res, - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("State resolution failed, either an event could not be found or deserialization".into()), - ); - continue 'main_pdu_loop; - } - }; - - let mut resolved = BTreeMap::new(); - for (k, id) in res { - // We should know of the event but just incase - let pdu = match auth_cache.get(&id) { - Some(pdu) => pdu.clone(), - None => { - error!("Event was not present in auth_cache {}", id); - resolved_map.insert( - event_id.clone(), - Err("Event was not present in auth cache".into()), - ); - continue 'main_pdu_loop; - } - }; - resolved.insert(k, pdu); - } - resolved - }; - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - info!("Appended incoming pdu."); - - // Set the new room state to the resolved state - update_resolved_state( - &db, - pdu.room_id(), - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; - debug!("Updated resolved state"); - - // Event has passed all auth/stateres checks + if let Err(e) = handle_incoming_pdu(&body.origin, &event_id, value, true, &db).await { + resolved_map.insert(event_id, Err(e)); + } } if !resolved_map.is_empty() { @@ -939,26 +559,80 @@ pub async fn send_transaction_message_route<'a>( /// An async function that can recursively calls itself. type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; -/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events -/// Validate any event that is given to us by another server. -/// -/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. check content hash, redact if doesn't match -/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events -/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" -/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. if not timeline event: stop -/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events -#[tracing::instrument(skip(db, pub_key_map, auth_cache))] -fn validate_event<'a>( - db: &'a Database, - value: CanonicalJsonObject, - event_id: EventId, - pub_key_map: &'a mut PublicKeyMap, +/// When receiving an event one needs to: +/// 0. Skip the PDU if we already know about it +/// 1. Check the server is in the room +/// 2. Check signatures, otherwise drop +/// 3. Check content hash, redact if doesn't match +/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not +/// timeline events +/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are +/// also rejected "due to auth events" +/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. Persist this event as an outlier +/// 8. If not timeline event: stop +/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline +/// events +/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities +/// doing all the checks in this list starting at 1. These are not timeline events +/// 11. Check the auth of the event passes based on the state of the event +/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by +/// doing state res where one of the inputs was a previously trusted set of state, don't just +/// trust a set of state we got from a remote) +/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" +/// it +/// 14. Use state resolution to find new room state +// We use some AsyncRecursiveResult hacks here so we can call this async funtion recursively +fn handle_incoming_pdu<'a>( origin: &'a ServerName, - auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, (Arc, Option>)> { + event_id: &'a EventId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, +) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // 0. Skip the PDU if we already know about it + if let Ok(Some(pdu)) = db.rooms.get_pdu(&event_id) { + return Ok(Arc::new(pdu)); + } + + // 1. Check the server is in the room + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + // Event is invalid + return Err("Event needs a valid RoomId".to_string()); + } + }; + + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + return Err("Room is unknown to this server".to_string()); + } + } + + let mut pub_key_map = BTreeMap::new(); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + let mut auth_cache = EventMap::new(); + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys for (signature_server, signature) in match value .get("signatures") .ok_or_else(|| "No signatures in server response pdu.".to_string())? @@ -998,132 +672,443 @@ fn validate_event<'a>( pub_key_map.insert(signature_server.clone(), keys); } + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } Err(e) => { + // Drop error!("{:?}: {}", value, e); return Err("Signature verification failed".to_string()); } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } + Ok(ruma::signatures::Verified::All) => value, }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above + // to our PduEvent type val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let incoming_pdu = serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + .map_err(|_| "Event is not a valid PDU.".to_string())?; + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" debug!("Fetching auth events."); - fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &mut auth_cache) .await .map_err(|e| e.to_string())?; - let pdu = Arc::new(pdu.clone()); + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + debug!("Checking auth."); - /* - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - debug!("Fetching prev events."); - let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) - .await - .map_err(|e| e.to_string())?; - */ + // Build map of auth events + let mut auth_events = BTreeMap::new(); + for id in incoming_pdu.auth_events.iter() { + let auth_event = auth_cache.get(id).ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + })?; - // if the previous event was the create event special rules apply - let previous_create = if pdu.auth_events.len() == 1 && pdu.prev_events == pdu.auth_events { - auth_cache.get(&pdu.auth_events[0]).cloned() + match auth_events.entry(( + auth_event.kind.clone(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + Entry::Vacant(v) => { + v.insert(auth_event.clone()); + } + Entry::Occupied(_) => { + return Err( + "Auth event's type and state_key combination exists multiple times." + .to_owned(), + ) + } + } + } + + let create_event = db + .rooms + .room_state_get(&incoming_pdu.room_id, &EventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.")? + .ok_or_else(|| "Failed to find create event in db.")?; + + // The original create event must be in the auth events + if auth_events + .get(&(EventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(&create_event) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + // If the previous event was the create event special rules apply + let previous_create = if incoming_pdu.auth_events.len() == 1 + && incoming_pdu.prev_events == incoming_pdu.auth_events + { + auth_cache + .get(&incoming_pdu.auth_events[0]) + .cloned() + .filter(|maybe_create| **maybe_create == create_event) } else { None }; - // Check that the event passes auth based on the auth_events - debug!("Checking auth."); - let is_authed = state_res::event_auth::auth_check( + let incoming_pdu = Arc::new(incoming_pdu.clone()); + + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &pdu, + &incoming_pdu, previous_create.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - "Auth event not found, event failed recursive auth checks.".to_string() - }) - }) - .collect::, _>>()?, + &auth_events, None, // TODO: third party invite ) - .map_err(|_e| "Auth check failed".to_string())?; - - if !is_authed { - return Err("Event has failed auth check with auth events".to_string()); + .map_err(|_e| "Auth check failed".to_string())? + { + return Err("Event has failed auth check with auth events.".to_string()); } debug!("Validation successful."); - Ok((pdu, previous_create)) - }) -} -#[tracing::instrument(skip(db, key_map, auth_cache))] -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &mut PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - fetch_events(db, origin, key_map, event_ids, auth_cache).await?; - Ok(()) + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu) + .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + debug!("Added pdu as outlier."); + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(incoming_pdu); + } + + // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. + + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + + debug!("Requesting state at event."); + let (state_at_incoming_event, incoming_auth_events): (StateMap>, Vec>) = + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: &incoming_pdu.room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + debug!("Fetching state events at event."); + let state_vec = match fetch_and_handle_events( + &db, + origin, + &res.pdu_ids, + &mut auth_cache, + ) + .await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch state events.".to_owned()), + }; + + let mut state = BTreeMap::new(); + for pdu in state_vec.into_iter() { + match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { + Entry::Vacant(v) => { + v.insert(pdu); + } + Entry::Occupied(_) => { + return Err( + "State event's type and state_key combination exists multiple times.".to_owned(), + ) + } + } + } + + // The original create event must still be in the state + if state.get(&(EventType::RoomCreate, "".to_owned())).map(|a| a.as_ref()) != Some(&create_event) { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + debug!("Fetching auth chain events at event."); + let incoming_auth_events = match fetch_and_handle_events( + &db, + origin, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await + { + Ok(state) => state, + Err(_) => return Err("Failed to fetch auth chain.".to_owned()), + }; + + (state, incoming_auth_events) + } + Err(_) => { + return Err("Fetching state for event failed".into()); + } + }; + + // 11. Check the auth of the event passes based on the state of the event + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &incoming_pdu, + previous_create.clone(), + &state_at_incoming_event, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed.".to_owned())? + { + return Err("Event has failed auth check with state at the event.".into()); + } + debug!("Auth check succeeded."); + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(incoming_pdu.room_id()) + .map_err(|_| "Failed to load room state.".to_owned())? + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, // TODO: Use correct room version + &incoming_pdu, + previous_create, + ¤t_state, + None, + ) + .map_err(|_e| "Auth check failed.".to_owned())? + { + // Soft fail, we leave the event as an outlier but don't add it to the timeline + return Err("Event has been soft failed".into()); + }; + debug!("Auth check with current state succeeded."); + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + let mut extremities = db + .rooms + .get_pdu_leaves(&incoming_pdu.room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } + } + + let mut fork_states = BTreeSet::new(); + for id in &extremities { + match db.rooms.get_pdu(&id).map_err(|_| "Failed to ask db for pdu.".to_owned())? { + Some(leaf_pdu) => { + let pdu_shortstatehash = db + .rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?; + + let mut leaf_state = db + .rooms + .state_full(pdu_shortstatehash) + .map_err(|_| "Failed to ask db for room state.".to_owned())? + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect::>(); + + if let Some(state_key) = &leaf_pdu.state_key { + // Now it's the state after + let key = (leaf_pdu.kind.clone(), state_key.clone()); + leaf_state.insert(key, Arc::new(leaf_pdu)); + } + + fork_states.insert(leaf_state); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + fork_states.insert(current_state); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + state_after.insert( + (incoming_pdu.kind.clone(), state_key.clone()), + incoming_pdu.clone(), + ); + } + fork_states.insert(state_after.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.len() == 1 { + // There was only one state, so it has to be the room's current state (because that is + // always included) + info!("Skipping stateres because there is no new state."); + fork_states[0] + .iter() + .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) + .collect() + } else { + // We do need to force an update to this room's state + update_state = true; + + let mut auth_events = vec![]; + for map in &fork_states { + let mut state_auth = vec![]; + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + match fetch_and_handle_events(&db, origin, &[auth_id.clone()], &mut auth_cache) + .await + { + // This should always contain exactly one element when Ok + Ok(events) => state_auth.push(events[0].clone()), + Err(e) => { + debug!("Event was not present: {}", e); + } + } + } + auth_events.push(state_auth); + } + + // Add everything we will need to event_map + auth_cache.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); + auth_cache.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), pdu)), + ); + auth_cache.extend( + state_after + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + + match state_res::StateResolution::resolve( + &incoming_pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id.clone())) + .collect::>() + }) + .collect::>(), + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), + &mut auth_cache, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } + } + }; + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + append_incoming_pdu(&db, &incoming_pdu, extremities, &state_at_incoming_event) + .map_err(|_| "Failed to add pdu to db.".to_owned())?; + debug!("Appended incoming pdu."); + + // Set the new room state to the resolved state + if update_state { + db.rooms + .force_state(&room_id, new_room_state, &db.globals) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + debug!("Updated resolved state"); + + // Event has passed all auth/stateres checks + Ok(incoming_pdu) + }) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// -/// 0. Look in the auth_cache -/// 1. Look in the main timeline (pduid_pdu tree) -/// 2. Look at outlier pdu tree -/// 3. Ask origin server over federation -/// 4. TODO: Ask other servers over federation? +/// a. Look in the auth_cache +/// b. Look in the main timeline (pduid_pdu tree) +/// c. Look at outlier pdu tree +/// d. Ask origin server over federation +/// e. TODO: Ask other servers over federation? /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. //#[tracing::instrument(skip(db, key_map, auth_cache))] -pub(crate) async fn fetch_events( +pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, - key_map: &mut PublicKeyMap, events: &[EventId], auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; for id in events { + // a. Look at auth cache let pdu = match auth_cache.get(id) { Some(pdu) => { debug!("Event found in cache"); pdu.clone() } - // `get_pdu` checks the outliers tree for us + // b. Look in the main timeline (pduid_pdu tree) + // c. Look at outlier pdu tree + // (get_pdu checks both) None => match db.rooms.get_pdu(&id)? { Some(pdu) => { debug!("Event found in outliers"); Arc::new(pdu) } None => { + // d. Ask origin server over federation debug!("Fetching event over federation: {:?}", id); match db .sending @@ -1138,16 +1123,13 @@ pub(crate) async fn fetch_events( debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|e| { - error!("ERROR: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + let pdu = handle_incoming_pdu(origin, &event_id, value, false, db) + .await + .map_err(|e| { + error!("Error: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; - debug!("Added fetched pdu as outlier."); - db.rooms.add_pdu_outlier(&pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1253,180 +1235,15 @@ pub(crate) async fn fetch_signing_keys( )) } -/// Gather all state snapshots needed to resolve the current state of the room. -/// -/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). -/// -/// The state snapshot of the incoming event __needs__ to be added to the resulting list. -#[tracing::instrument(skip(db))] -pub(crate) async fn calculate_forward_extremities( - db: &Database, - pdu: &PduEvent, -) -> Result> { - let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; - - let mut is_incoming_leaf = true; - // Make sure the incoming event is not already a forward extremity - // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - error!("The incoming event is already present in get_pdu_leaves BUG"); - is_incoming_leaf = false; - // Not sure what to do here - } - - // If the incoming event is already referenced by an existing event - // then do nothing - it's not a candidate to be a new extremity if - // it has been referenced. - if db.rooms.is_pdu_referenced(pdu)? { - is_incoming_leaf = false; - } - - // TODO: - // [dendrite] Checks if any other leaves have been referenced and removes them - // but as long as we update the pdu leaves here and for events on our server this - // should not be possible. - - // Remove any forward extremities that are referenced by this incoming events prev_events - for incoming_leaf in &pdu.prev_events { - if current_leaves.contains(incoming_leaf) { - if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { - current_leaves.remove(pos); - } - } - } - - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - - Ok(current_leaves) -} - -/// This should always be called after the incoming event has been appended to the DB. -/// -/// This guarantees that the incoming event will be in the state sets (at least our servers -/// and the sending server). -pub(crate) async fn build_forward_extremity_snapshots( - db: &Database, - pdu: Arc, - mut current_state: StateMap>, - current_leaves: &[EventId], -) -> Result>>> { - let current_shortstatehash = db.rooms.current_shortstatehash(pdu.room_id())?; - - let mut includes_current_state = false; - let mut fork_states = BTreeSet::new(); - for id in current_leaves { - if id == &pdu.event_id { - continue; - } - match db.rooms.get_pdu(id)? { - // We can skip this because it is handled outside of this function - // The current server state and incoming event state are built to be - // the state after. - // This would be the incoming state from the server. - Some(leaf_pdu) => { - let pdu_shortstatehash = db - .rooms - .pdu_shortstatehash(&leaf_pdu.event_id)? - .ok_or_else(|| { - warn!("Leaf pdu: {:?}", leaf_pdu); - Error::bad_database("Found pdu with no statehash in db.") - })?; - - if current_shortstatehash.as_ref() == Some(&pdu_shortstatehash) { - includes_current_state = true; - } - - let mut state_before = db - .rooms - .state_full(pdu_shortstatehash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect::>(); - - // Now it's the state after - let key = (leaf_pdu.kind.clone(), leaf_pdu.state_key.clone()); - state_before.insert(key, Arc::new(leaf_pdu)); - - fork_states.insert(state_before); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err(Error::bad_database("Missing state snapshot.")); - } - } - } - - // This guarantees that our current room state is included - if !includes_current_state { - current_state.insert((pdu.kind(), pdu.state_key()), pdu); - - fork_states.insert(current_state); - } - - Ok(fork_states) -} - -#[tracing::instrument(skip(db))] -pub(crate) fn update_resolved_state( - db: &Database, - room_id: &RoomId, - state: Option>>, -) -> Result<()> { - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let mut new_state = HashMap::new(); - for ((ev_type, state_k), pdu) in state { - new_state.insert( - ( - ev_type, - state_k.ok_or(Error::Conflict( - "update_resolved_state: State contained non state event", - ))?, - ), - pdu.event_id.clone(), - ); - } - - db.rooms.force_state(room_id, new_state, &db.globals)?; - } - - Ok(()) -} - /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db))] pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, - new_room_leaves: &[EventId], + new_room_leaves: HashSet, state: &StateMap>, ) -> Result<()> { - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - let mut new_state = HashMap::new(); - for ((ev_type, state_k), state_pdu) in state { - new_state.insert( - ( - ev_type.clone(), - state_k.clone().ok_or(Error::Conflict( - "append_incoming_pdu: State contained non state event", - ))?, - ), - state_pdu.event_id.clone(), - ); - } - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; - let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1434,19 +1251,18 @@ pub(crate) fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let state_hash = db.rooms.append_to_state(&pdu, &db.globals)?; + db.rooms + .set_event_state(&pdu.event_id, state, &db.globals)?; db.rooms.append_pdu( pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &new_room_leaves, + &new_room_leaves.into_iter().collect::>(), &db, )?; - db.rooms.set_room_state(pdu.room_id(), state_hash)?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces From 5a13f6f7e063966ca14004e2b8fe287b34771d62 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Wed, 24 Mar 2021 18:25:21 +0100 Subject: [PATCH 081/103] Remove build container and use the always present build artifact directly Proper chmod caddy and conduit binaries --- tests/Complement.Dockerfile | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 370db7c..abae3fb 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -1,39 +1,34 @@ -FROM valkum/docker-rust-ci:latest as builder -WORKDIR /workdir - -ARG RUSTC_WRAPPER -ARG AWS_ACCESS_KEY_ID -ARG AWS_SECRET_ACCESS_KEY -ARG SCCACHE_BUCKET -ARG SCCACHE_ENDPOINT -ARG SCCACHE_S3_USE_SSL - -COPY . . -RUN test -e cached_target/release/conduit || cargo build --release - +# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit FROM valkum/docker-rust-ci:latest WORKDIR /workdir RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_amd64.tar.gz" RUN tar xzf caddy_2.2.1_linux_amd64.tar.gz -COPY --from=builder /workdir/target/debug/conduit /workdir/conduit +COPY cached_target/release/conduit /workdir/conduit +RUN chmod +x /workdir/conduit +RUN chmod +x /workdir/caddy -COPY Rocket-example.toml Rocket.toml +COPY conduit-example.toml conduit.toml ENV SERVER_NAME=localhost ENV ROCKET_LOG=normal +ENV CONDUIT_CONFIG=/workdir/conduit.toml -RUN sed -i "s/port = 14004/port = 8008/g" Rocket.toml -RUN echo "federation_enabled = true" >> Rocket.toml +RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml +RUN echo "allow_federation = true" >> conduit.toml +RUN echo "allow_encryption = true" >> conduit.toml +RUN echo "allow_registration = true" >> conduit.toml +RUN echo "log = \"info,rocket=info,_=off,sled=off\"" >> conduit.toml +RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"localhost:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json EXPOSE 8008 8448 CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ - sed -i "s/server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" Rocket.toml && \ + sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ /workdir/caddy start --config caddy.json > /dev/null && \ /workdir/conduit From cd4dc137310f8cb7b8df97df549b3da95a4d68af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Mar 2021 11:10:45 +0100 Subject: [PATCH 082/103] fix: use correct room versions --- Cargo.lock | 141 ++++++++++++----------- conduit-example.toml | 2 +- src/client_server/capabilities.rs | 1 - src/client_server/membership.rs | 21 ++-- src/client_server/room.rs | 5 +- src/database.rs | 2 +- src/database/rooms.rs | 26 ++++- src/database/sending.rs | 8 +- src/server_server.rs | 185 +++++++++++++++++++----------- 9 files changed, 236 insertions(+), 155 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9580942..2a80291 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,7 +229,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.25", + "time 0.2.26", "version_check", ] @@ -580,9 +580,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02efba560f227847cb41463a7395c514d127d4f74fff12ef0137fff1b84b96c4" +checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" dependencies = [ "color_quant", "weezl", @@ -596,9 +596,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" dependencies = [ "bytes", "fnv", @@ -661,12 +661,13 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] @@ -845,9 +846,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.48" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc9f84f9b115ce7843d60706df1422a916680bfdfcbdb0447c5614ff9d7e4d78" +checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" dependencies = [ "wasm-bindgen", ] @@ -883,9 +884,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b07a082330a35e43f63177cc01689da34fbffa0105e1246cf0311472cac73a" +checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" [[package]] name = "linked-hash-map" @@ -955,9 +956,9 @@ checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +checksum = "cc14fc54a812b4472b4113facc3e44d099fbc0ea2ce0551fa5c703f8edfbfd38" dependencies = [ "autocfg", ] @@ -979,9 +980,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.9" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" +checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" dependencies = [ "libc", "log", @@ -992,11 +993,10 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi", ] @@ -1097,15 +1097,15 @@ checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl" -version = "0.10.32" +version = "0.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" +checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" dependencies = [ "bitflags", "cfg-if", "foreign-types", - "lazy_static", "libc", + "once_cell", "openssl-sys", ] @@ -1117,18 +1117,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.14.0+1.1.1j" +version = "111.15.0+1.1.1k" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b569b5bd7e5462a1700f595c7c7d487691d73b5ce064176af7f9f0cbb80a9" +checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.60" +version = "0.9.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" dependencies = [ "autocfg", "cc", @@ -1203,9 +1203,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" +checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "pear" @@ -1249,18 +1249,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" dependencies = [ "proc-macro2", "quote", @@ -1459,9 +1459,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.4" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54fd1046a3107eb58f42de31d656fee6853e5d276c455fd943742dce89fc3dd3" +checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" dependencies = [ "aho-corasick", "memchr", @@ -1574,7 +1574,7 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.25", + "time 0.2.26", "tokio", "ubyte", "version_check", @@ -1612,7 +1612,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.25", + "time 0.2.26", "tokio", "tokio-rustls", "uncased", @@ -1953,18 +1953,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" +checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" dependencies = [ "proc-macro2", "quote", @@ -2089,9 +2089,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" dependencies = [ "version_check", ] @@ -2105,7 +2105,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=improvements#1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0" +source = "git+https://github.com/timokoesters/state-res?branch=improvements#625c37cb776b381a83ab7ee58b13e32506849648" dependencies = [ "itertools 0.10.0", "log", @@ -2167,9 +2167,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd9bc7ccc2688b3344c2f48b9b546648b25ce0b20fc717ee7fa7981a8ca9717" +checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" dependencies = [ "proc-macro2", "quote", @@ -2262,9 +2262,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" +checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" dependencies = [ "const_fn", "libc", @@ -2315,9 +2315,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d56477f6ed99e10225f38f9f75f872f29b8b8bd8c0b946f63345bb144e9eeda" +checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" dependencies = [ "autocfg", "bytes", @@ -2366,9 +2366,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec31e5cc6b46e653cf57762f36f71d5e6386391d88a72fd6db4508f8f676fb29" +checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" dependencies = [ "bytes", "futures-core", @@ -2483,9 +2483,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" +checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" dependencies = [ "async-trait", "cfg-if", @@ -2501,15 +2501,16 @@ dependencies = [ "rand", "smallvec", "thiserror", + "tinyvec", "tokio", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" +checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" dependencies = [ "cfg-if", "futures-util", @@ -2605,9 +2606,9 @@ checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "want" @@ -2633,9 +2634,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" +checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" dependencies = [ "cfg-if", "serde", @@ -2645,9 +2646,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" +checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" dependencies = [ "bumpalo", "lazy_static", @@ -2660,9 +2661,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" +checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" dependencies = [ "cfg-if", "js-sys", @@ -2672,9 +2673,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" +checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2682,9 +2683,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" +checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" dependencies = [ "proc-macro2", "quote", @@ -2695,15 +2696,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.71" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" +checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" [[package]] name = "web-sys" -version = "0.3.48" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec600b26223b2948cedfde2a0aa6756dcf1fef616f43d7b3097aaf53a6c4d92b" +checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/conduit-example.toml b/conduit-example.toml index fea84bd..87f959d 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -35,7 +35,7 @@ max_request_size = 20_000_000 # in bytes #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time -#log = "info,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,rocket=off,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index a3c0db6..ddc213d 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -17,7 +17,6 @@ use rocket::get; #[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); let mut capabilities = Capabilities::new(); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6a64ea4..5d630b4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::{error, info, warn}; +use log::{error, warn}; use ruma::{ api::{ client::{ @@ -455,7 +455,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + ver: &[RoomVersionId::Version6], }, ) .await; @@ -469,6 +469,11 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; + let room_version = match make_join_response.room_version { + Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + let mut join_event_stub = serde_json::from_str::(make_join_response.event.json().get()) .map_err(|_| { @@ -505,14 +510,14 @@ async fn join_room_by_id_helper( db.globals.server_name().as_str(), db.globals.keypair(), &mut join_event_stub, - &RoomVersionId::Version6, + &room_version, ) .expect("event is valid, we just created it"); // Generate event id let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&join_event_stub, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -546,7 +551,7 @@ async fn join_room_by_id_helper( })?; let event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&value, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -586,8 +591,7 @@ async fn join_room_by_id_helper( }) }) { - let (id, pdu) = pdu?; - info!("adding {} to outliers: {:#?}", id, pdu); + let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { @@ -641,8 +645,7 @@ async fn join_room_by_id_helper( }) }) { - let (id, pdu) = pdu?; - info!("adding {} to outliers: {:#?}", id, pdu); + let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 399677f..f8d980b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -343,10 +343,7 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!( - body.new_version, - RoomVersionId::Version5 | RoomVersionId::Version6 - ) { + if !matches!(body.new_version, RoomVersionId::Version6) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/database.rs b/src/database.rs index b14a912..d8734b5 100644 --- a/src/database.rs +++ b/src/database.rs @@ -71,7 +71,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,rocket=off,_=off,sled=off".to_owned() + "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } #[derive(Clone)] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index e1e97b4..676ac07 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -420,6 +420,27 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else::, _, _>( + || Ok(None), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) + }) + .transpose() + } + /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. @@ -1002,6 +1023,7 @@ impl Rooms { None }; + // If there was no create event yet, assume we are creating a version 6 room right now let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| { create_event.room_version }); @@ -1093,14 +1115,14 @@ impl Rooms { db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, - &RoomVersionId::Version6, + &room_version, ) .expect("event is valid, we just created it"); // Generate event id pdu.event_id = EventId::try_from(&*format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&pdu_json, &room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); diff --git a/src/database/sending.rs b/src/database/sending.rs index b792479..82d2cdd 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -10,7 +10,7 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; -use log::{info, warn}; +use log::warn; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -518,7 +518,11 @@ impl Sending { ) .await .map(|response| { - info!("server response: {:?}", response); + for pdu in response.pdus { + if pdu.1.is_err() { + warn!("Failed to send to {}: {:?}", server, pdu); + } + } kind.clone() }) .map_err(|e| (kind, e)); diff --git a/src/server_server.rs b/src/server_server.rs index e461b5a..28540eb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,10 +20,10 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, - serde::to_canonical_value, + events::{room::create::CreateEventContent, EventType}, + serde::{to_canonical_value, Raw}, signatures::CanonicalJsonValue, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -180,7 +180,12 @@ where .collect::>(); if status != 200 { - info!("{} {}:\n{}", url, status, String::from_utf8_lossy(&body),); + info!( + "{} {}: {}", + url, + status, + String::from_utf8_lossy(&body).lines().collect::>().join(" ") + ); } let response = T::IncomingResponse::try_from( @@ -534,6 +539,16 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); + let mut pub_key_map = BTreeMap::new(); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + let mut auth_cache = EventMap::new(); + for pdu in body.pdus.iter() { // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { @@ -544,13 +559,27 @@ pub async fn send_transaction_message_route<'a>( } }; - if let Err(e) = handle_incoming_pdu(&body.origin, &event_id, value, true, &db).await { + if let Err(e) = handle_incoming_pdu( + &body.origin, + &event_id, + value, + true, + &db, + &mut pub_key_map, + &mut auth_cache, + ) + .await + { resolved_map.insert(event_id, Err(e)); } } - if !resolved_map.is_empty() { - warn!("These PDU's failed {:?}", resolved_map); + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if e != "Room is unknown to this server." { + warn!("Incoming PDU failed {:?}", pdu); + } + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -589,12 +618,14 @@ fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, + pub_key_map: &'a mut BTreeMap>, + auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // 0. Skip the PDU if we already know about it - if let Ok(Some(pdu)) = db.rooms.get_pdu(&event_id) { + if let Ok(Some(pdu)) = db.rooms.get_non_outlier_pdu(&event_id) { return Ok(Arc::new(pdu)); } @@ -610,27 +641,17 @@ fn handle_incoming_pdu<'a>( Some(id) => id, None => { // Event is invalid - return Err("Event needs a valid RoomId".to_string()); + return Err("Event needs a valid RoomId.".to_string()); } }; match db.rooms.exists(&room_id) { Ok(true) => {} _ => { - return Err("Room is unknown to this server".to_string()); + return Err("Room is unknown to this server.".to_string()); } } - let mut pub_key_map = BTreeMap::new(); - - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - let mut auth_cache = EventMap::new(); - // We go through all the signatures we see on the value and fetch the corresponding signing // keys for (signature_server, signature) in match value @@ -674,22 +695,35 @@ fn handle_incoming_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let mut val = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version5) { - Err(e) => { - // Drop - error!("{:?}: {}", value, e); - return Err("Signature verification failed".to_string()); + let create_event = db + .rooms + .room_state_get(&room_id, &EventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.")? + .ok_or_else(|| "Failed to find create event in db.")?; + + let create_event_content = + serde_json::from_value::>(create_event.content.clone()) + .expect("Raw::from_value always works.") + .deserialize() + .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; + + let room_version = create_event_content.room_version; + + let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &room_version) { + Err(e) => { + // Drop + error!("{:?}: {}", value, e); + return Err("Signature verification failed".to_string()); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + match ruma::signatures::redact(&value, &room_version) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; + } + Ok(ruma::signatures::Verified::All) => value, + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type @@ -705,9 +739,15 @@ fn handle_incoming_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" debug!("Fetching auth events."); - fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &mut auth_cache) - .await - .map_err(|e| e.to_string())?; + fetch_and_handle_events( + db, + origin, + &incoming_pdu.auth_events, + pub_key_map, + auth_cache, + ) + .await + .map_err(|e| e.to_string())?; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events debug!("Checking auth."); @@ -738,12 +778,6 @@ fn handle_incoming_pdu<'a>( } } - let create_event = db - .rooms - .room_state_get(&incoming_pdu.room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.")? - .ok_or_else(|| "Failed to find create event in db.")?; - // The original create event must be in the auth events if auth_events .get(&(EventType::RoomCreate, "".to_owned())) @@ -768,7 +802,7 @@ fn handle_incoming_pdu<'a>( let incoming_pdu = Arc::new(incoming_pdu.clone()); if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + &room_version, &incoming_pdu, previous_create.clone(), &auth_events, @@ -810,7 +844,7 @@ fn handle_incoming_pdu<'a>( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: &incoming_pdu.room_id, + room_id: &room_id, event_id: &incoming_pdu.event_id, }, ) @@ -822,7 +856,8 @@ fn handle_incoming_pdu<'a>( &db, origin, &res.pdu_ids, - &mut auth_cache, + pub_key_map, + auth_cache, ) .await { @@ -854,7 +889,8 @@ fn handle_incoming_pdu<'a>( &db, origin, &res.auth_chain_ids, - &mut auth_cache, + pub_key_map, + auth_cache, ) .await { @@ -871,7 +907,7 @@ fn handle_incoming_pdu<'a>( // 11. Check the auth of the event passes based on the state of the event if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + &room_version, &incoming_pdu, previous_create.clone(), &state_at_incoming_event, @@ -886,14 +922,14 @@ fn handle_incoming_pdu<'a>( // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it let current_state = db .rooms - .room_state_full(incoming_pdu.room_id()) + .room_state_full(&room_id) .map_err(|_| "Failed to load room state.".to_owned())? .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(); if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, // TODO: Use correct room version + &room_version, &incoming_pdu, previous_create, ¤t_state, @@ -910,7 +946,7 @@ fn handle_incoming_pdu<'a>( // applied. We start with the previous extremities (aka leaves) let mut extremities = db .rooms - .get_pdu_leaves(&incoming_pdu.room_id) + .get_pdu_leaves(&room_id) .map_err(|_| "Failed to load room leaves".to_owned())?; // Remove any forward extremities that are referenced by this incoming event's prev_events @@ -922,7 +958,11 @@ fn handle_incoming_pdu<'a>( let mut fork_states = BTreeSet::new(); for id in &extremities { - match db.rooms.get_pdu(&id).map_err(|_| "Failed to ask db for pdu.".to_owned())? { + match db + .rooms + .get_pdu(&id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { Some(leaf_pdu) => { let pdu_shortstatehash = db .rooms @@ -985,7 +1025,7 @@ fn handle_incoming_pdu<'a>( } else if fork_states.len() == 1 { // There was only one state, so it has to be the room's current state (because that is // always included) - info!("Skipping stateres because there is no new state."); + debug!("Skipping stateres because there is no new state."); fork_states[0] .iter() .map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) @@ -998,8 +1038,14 @@ fn handle_incoming_pdu<'a>( for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { - match fetch_and_handle_events(&db, origin, &[auth_id.clone()], &mut auth_cache) - .await + match fetch_and_handle_events( + &db, + origin, + &[auth_id.clone()], + pub_key_map, + auth_cache, + ) + .await { // This should always contain exactly one element when Ok Ok(events) => state_auth.push(events[0].clone()), @@ -1030,8 +1076,8 @@ fn handle_incoming_pdu<'a>( ); match state_res::StateResolution::resolve( - &incoming_pdu.room_id, - &RoomVersionId::Version6, + &room_id, + &room_version, &fork_states .into_iter() .map(|map| { @@ -1044,7 +1090,7 @@ fn handle_incoming_pdu<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut auth_cache, + auth_cache, ) { Ok(new_state) => new_state, Err(_) => { @@ -1089,6 +1135,7 @@ pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, events: &[EventId], + pub_key_map: &mut BTreeMap>, auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; @@ -1123,12 +1170,20 @@ pub(crate) async fn fetch_and_handle_events( debug!("Got event over federation: {:?}", res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let pdu = handle_incoming_pdu(origin, &event_id, value, false, db) - .await - .map_err(|e| { - error!("Error: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + let pdu = handle_incoming_pdu( + origin, + &event_id, + value, + false, + db, + pub_key_map, + auth_cache, + ) + .await + .map_err(|e| { + error!("Error: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; pdu } From db7044a950bad897ed16725d7b18b9b47b767342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Mar 2021 13:41:05 +0100 Subject: [PATCH 083/103] sytest test --- src/database/rooms.rs | 6 +- src/server_server.rs | 9 +- tests/sytest/sytest-whitelist | 384 ++++++++++++++++++++++++++++++++++ 3 files changed, 394 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 676ac07..5d43626 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1081,7 +1081,7 @@ impl Rooms { signatures: BTreeMap::new(), }; - if !state_res::auth_check( + let auth_check = state_res::auth_check( &room_version, &Arc::new(pdu.clone()), create_prev_event, @@ -1091,7 +1091,9 @@ impl Rooms { .map_err(|e| { error!("{:?}", e); Error::bad_database("Auth check failed.") - })? { + })?; + + if !auth_check { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Event is not authorized.", diff --git a/src/server_server.rs b/src/server_server.rs index 28540eb..bb0b9af 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -184,7 +184,10 @@ where "{} {}: {}", url, status, - String::from_utf8_lossy(&body).lines().collect::>().join(" ") + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") ); } @@ -698,8 +701,8 @@ fn handle_incoming_pdu<'a>( let create_event = db .rooms .room_state_get(&room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.")? - .ok_or_else(|| "Failed to find create event in db.")?; + .map_err(|_| "Failed to ask database for event.".to_owned())? + .ok_or_else(|| "Failed to find create event in db.".to_owned())?; let create_event_content = serde_json::from_value::>(create_event.content.clone()) diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index d3271dd..eda851a 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -1,71 +1,335 @@ +/event/ does not allow access to events before the user joined +/event/ on joined room works +/event/ on non world readable room does not work /joined_members return joined members /joined_rooms returns only joined rooms +/whois 3pid invite join valid signature but revoked keys are rejected 3pid invite join valid signature but unreachable ID server are rejected 3pid invite join with wrong but valid signature are rejected +A change to displayname should appear in incremental /sync +A full_state incremental update returns all state +A full_state incremental update returns only recent timeline +A message sent after an initial sync appears in the timeline of an incremental sync. +A next_batch token can be used in the v1 messages API +A pair of events which redact each other should be ignored +A pair of servers can establish a join in a v2 room +A prev_batch token can be used in the v1 messages API +AS can create a user +AS can create a user with an underscore +AS can create a user with inhibit_login +AS can set avatar for ghosted users +AS can set displayname for ghosted users +AS can't set displayname for random users AS cannot create users outside its own namespace +AS user (not ghost) can join room without registering +AS user (not ghost) can join room without registering, with user_id query param +After changing password, a different session no longer works by default +After changing password, can log in with new password +After changing password, can't log in with old password +After changing password, different sessions can optionally be kept +After changing password, existing session still works After deactivating account, can't log in with an email +After deactivating account, can't log in with password Alias creators can delete alias with no ops Alias creators can delete canonical alias with no ops Alternative server names do not cause a routing loop +An event which redacts an event in a different room should be ignored +An event which redacts itself should be ignored +Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list +Backfill checks the events requested belong to the room +Backfill works correctly with history visibility set to joined +Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +Banned servers cannot /event_auth +Banned servers cannot /invite +Banned servers cannot /make_join +Banned servers cannot /make_leave +Banned servers cannot /send_join +Banned servers cannot /send_leave +Banned servers cannot backfill +Banned servers cannot get missing events +Banned servers cannot get room state +Banned servers cannot get room state ids +Banned servers cannot send events +Banned user is kicked and may not rejoin until unbanned Both GET and PUT work +Can /sync newly created room Can add account data +Can add account data to room Can add tag +Can claim one time key using POST +Can claim remote one time key using POST Can create filter +Can deactivate account Can delete canonical alias +Can download file 'ascii' +Can download file 'name with spaces' +Can download file 'name;with;semicolons' +Can download filter +Can download specifying a different ASCII file name +Can download specifying a different Unicode file name +Can download with Unicode file name locally +Can download with Unicode file name over federation +Can download without a file name locally +Can download without a file name over federation +Can forget room you've been kicked from +Can get 'm.room.name' state for a departed room (SPEC-216) +Can get account data without syncing +Can get remote public room list +Can get room account data without syncing +Can get rooms/{roomId}/members +Can get rooms/{roomId}/members for a departed room (SPEC-216) +Can get rooms/{roomId}/state for a departed room (SPEC-216) Can invite users to invite-only rooms Can list tags for a room Can logout all devices Can logout current device +Can paginate public room list +Can pass a JSON filter as a query parameter +Can query device keys using POST +Can query remote device keys using POST +Can query specific device keys using POST Can re-join room if re-invited Can read configuration endpoint +Can receive redactions from regular users over federation in room version 1 +Can receive redactions from regular users over federation in room version 2 +Can receive redactions from regular users over federation in room version 3 +Can receive redactions from regular users over federation in room version 4 +Can receive redactions from regular users over federation in room version 5 +Can receive redactions from regular users over federation in room version 6 Can recv a device message using /sync +Can recv a device message using /sync +Can recv device messages over federation Can recv device messages until they are acknowledged +Can recv device messages until they are acknowledged +Can reject invites over federation for rooms with version 1 +Can reject invites over federation for rooms with version 2 +Can reject invites over federation for rooms with version 3 +Can reject invites over federation for rooms with version 4 +Can reject invites over federation for rooms with version 5 +Can reject invites over federation for rooms with version 6 Can remove tag +Can search public room list Can send a message directly to a device using PUT /sendToDevice +Can send a message directly to a device using PUT /sendToDevice Can send a to-device message to two users which both receive it using /sync +Can send image in room message Can send messages with a wildcard device id +Can send messages with a wildcard device id Can send messages with a wildcard device id to two devices +Can send messages with a wildcard device id to two devices Can sync +Can sync a joined room +Can sync a room with a message with a transaction id +Can sync a room with a single message +Can upload device keys Can upload with ASCII file name Can upload with Unicode file name Can upload without a file name +Can't deactivate account with wrong password +Can't forget room you're still in +Changes to state are included in an gapped incremental sync +Changes to state are included in an incremental sync Changing the actions of an unknown default rule fails with 404 Changing the actions of an unknown rule fails with 404 Checking local federation server +Creators can delete alias Current state appears in timeline in private history Current state appears in timeline in private history with many messages before +DELETE /device/{deviceId} +DELETE /device/{deviceId} requires UI auth user to match device owner +DELETE /device/{deviceId} with no body gives a 401 Deleted tags appear in an incremental v2 /sync Deleting a non-existent alias should return a 404 +Device list doesn't change if remote server is down +Device messages over federation wake up /sync Device messages wake up /sync +Device messages wake up /sync Device messages with the same txn_id are deduplicated +Device messages with the same txn_id are deduplicated +Enabling an unknown default rule fails with 404 +Event size limits +Event with an invalid signature in the send_join response should not cause room join to fail Events come down the correct room +Events whose auth_events are in the wrong room do not mess up the room state +Existing members see new members' join events +Federation key API allows unsigned requests for keys +Federation key API can act as a notary server via a GET request +Federation key API can act as a notary server via a POST request +Federation rejects inbound events where the prev_events cannot be found +Fetching eventstream a second time doesn't yield the message again +Forgetting room does not show up in v2 /sync +Full state sync includes joined rooms +GET /capabilities is present and well formed for registered user GET /device/{deviceId} GET /device/{deviceId} gives a 404 for unknown devices GET /devices +GET /directory/room/:room_alias yields room ID +GET /events initially GET /events with negative 'limit' GET /events with non-numeric 'limit' GET /events with non-numeric 'timeout' +GET /initialSync initially GET /joined_rooms lists newly-created room GET /login yields a set of flows GET /media/r0/download can fetch the value again GET /profile/:user_id/avatar_url publicly accessible GET /profile/:user_id/displayname publicly accessible +GET /publicRooms includes avatar URLs GET /publicRooms lists newly-created room +GET /publicRooms lists rooms +GET /r0/capabilities is not public GET /register yields a set of flows +GET /rooms/:room_id/joined_members fetches my membership +GET /rooms/:room_id/messages returns a message GET /rooms/:room_id/state fetches entire room state GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +GET /rooms/:room_id/state/m.room.name gets name +GET /rooms/:room_id/state/m.room.power_levels can fetch levels +GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +GET /rooms/:room_id/state/m.room.topic gets topic +Get left notifs for other users in sync and /keys/changes when user leaves +Getting messages going forward is limited for a departed room (SPEC-216) Getting push rules doesn't corrupt the cache SYN-390 +Getting state IDs checks the events requested belong to the room +Getting state checks the events requested belong to the room +Ghost user must register before joining room +Guest non-joined user cannot call /events on default room +Guest non-joined user cannot call /events on invited room +Guest non-joined user cannot call /events on joined room +Guest non-joined user cannot call /events on shared room +Guest non-joined users can get individual state for world_readable rooms +Guest non-joined users can get individual state for world_readable rooms after leaving +Guest non-joined users can get state for world_readable rooms +Guest non-joined users cannot room initalSync for non-world_readable rooms +Guest non-joined users cannot send messages to guest_access rooms if not joined +Guest user can set display names +Guest user cannot call /events globally +Guest user cannot upgrade other users +Guest users can accept invites to private rooms over federation +Guest users can join guest_access rooms +Guest users can send messages to guest_access rooms if joined +If a device list update goes missing, the server resyncs on the next one +If remote user leaves room we no longer receive device updates +If remote user leaves room, changes device and rejoins we see update in /keys/changes +If remote user leaves room, changes device and rejoins we see update in sync +Inbound /make_join rejects attempts to join rooms where all users have left +Inbound /v1/make_join rejects remote attempts to join local users to rooms +Inbound /v1/send_join rejects incorrectly-signed joins +Inbound /v1/send_join rejects joins from other servers +Inbound /v1/send_leave rejects leaves from other servers +Inbound federation accepts a second soft-failed event +Inbound federation accepts attempts to join v2 rooms from servers with support +Inbound federation can backfill events +Inbound federation can get public room list +Inbound federation can get state for a room +Inbound federation can get state_ids for a room +Inbound federation can query profile data +Inbound federation can query room alias directory +Inbound federation can receive events +Inbound federation can receive invites via v1 API +Inbound federation can receive invites via v2 API +Inbound federation can receive redacted events +Inbound federation can receive v1 /send_join +Inbound federation can receive v2 /send_join +Inbound federation can return events +Inbound federation can return missing events for invite visibility +Inbound federation can return missing events for world_readable visibility +Inbound federation correctly soft fails events +Inbound federation of state requires event_id as a mandatory paramater +Inbound federation of state_ids requires event_id as a mandatory paramater +Inbound federation rejects attempts to join v1 rooms from servers without v1 support +Inbound federation rejects attempts to join v2 rooms from servers lacking version support +Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +Inbound federation rejects invite rejections which include invalid JSON for room version 6 +Inbound federation rejects invites which include invalid JSON for room version 6 +Inbound federation rejects receipts from wrong remote +Inbound federation rejects remote attempts to join local users to rooms +Inbound federation rejects remote attempts to kick local users to rooms +Inbound federation rejects typing notifications from wrong remote +Inbound: send_join rejects invalid JSON for room version 6 +Invalid JSON floats +Invalid JSON integers +Invalid JSON special values +Invited user can reject invite +Invited user can reject invite over federation +Invited user can reject invite over federation for empty room +Invited user can reject invite over federation several times +Invited user can see room metadata +Inviting an AS-hosted user asks the AS server +Lazy loading parameters in the filter are strictly boolean +Left rooms appear in the leave section of full state sync +Local delete device changes appear in v2 /sync +Local device key changes appear in /keys/changes +Local device key changes appear in v2 /sync +Local device key changes get to remote servers +Local new device changes appear in v2 /sync +Local non-members don't see posted message events +Local room members can get room messages +Local room members see posted message events +Local update device changes appear in v2 /sync +Local users can peek by room alias +Local users can peek into world_readable rooms by room ID +Message history can be paginated +Message history can be paginated over federation +Name/topic keys are correct +New account data appears in incremental v2 /sync +New read receipts appear in incremental v2 /sync +New room members see their own join event +New users appear in /keys/changes +Newly banned rooms appear in the leave section of incremental sync +Newly joined room is included in an incremental sync +Newly joined room is included in an incremental sync after invite +Newly left rooms appear in the leave section of gapped sync +Newly left rooms appear in the leave section of incremental sync Newly updated tags appear in an incremental v2 /sync +Non-numeric ports in server names are rejected +Outbound federation can backfill events +Outbound federation can query profile data +Outbound federation can query room alias directory +Outbound federation can query v1 /send_join +Outbound federation can query v2 /send_join +Outbound federation can request missing events +Outbound federation can send events +Outbound federation can send invites via v1 API +Outbound federation can send invites via v2 API +Outbound federation can send room-join requests +Outbound federation correctly handles unsupported room versions +Outbound federation passes make_join failures through to the client +Outbound federation rejects backfill containing invalid JSON for events in room version 6 +Outbound federation rejects m.room.create events with an unknown room version +Outbound federation rejects send_join responses with no m.room.create event +Outbound federation sends receipts +Outbound federation will ignore a missing event with bad JSON for room version 6 +POST /createRoom creates a room with the given version +POST /createRoom ignores attempts to set the room version via creation_content POST /createRoom makes a private room POST /createRoom makes a private room with invites +POST /createRoom makes a public room +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +POST /createRoom rejects attempts to create rooms with numeric versions +POST /createRoom rejects attempts to create rooms with unknown versions +POST /createRoom with creation content +POST /join/:room_alias can join a room +POST /join/:room_alias can join a room with custom content POST /join/:room_id can join a room +POST /join/:room_id can join a room with custom content POST /login as non-existing user is rejected POST /login can log in as a user POST /login can log in as a user with just the local part of the id POST /login returns the same device_id as that in the request POST /login wrong password is rejected POST /media/r0/upload can create an upload +POST /redact disallows redaction of event in different room +POST /register allows registration of usernames with '-' +POST /register allows registration of usernames with '.' +POST /register allows registration of usernames with '/' +POST /register allows registration of usernames with '3' +POST /register allows registration of usernames with '=' +POST /register allows registration of usernames with '_' +POST /register allows registration of usernames with 'q' POST /register can create a user POST /register downcases capitals in usernames POST /register rejects registration of usernames with '!' @@ -88,41 +352,161 @@ POST /rooms/:room_id/ban can ban a user POST /rooms/:room_id/invite can send an invite POST /rooms/:room_id/join can join a room POST /rooms/:room_id/leave can leave a room +POST /rooms/:room_id/read_markers can create read marker +POST /rooms/:room_id/receipt can create receipts +POST /rooms/:room_id/redact/:event_id as original message sender redacts message +POST /rooms/:room_id/redact/:event_id as power user redacts message +POST /rooms/:room_id/redact/:event_id as random user does not redact message +POST /rooms/:room_id/send/:event_type sends a message POST /rooms/:room_id/state/m.room.name sets name POST /rooms/:room_id/state/m.room.topic sets topic POST /rooms/:room_id/upgrade can upgrade a room version +POST rejects invalid utf-8 in JSON POSTed media can be thumbnailed PUT /device/{deviceId} gives a 404 for unknown devices PUT /device/{deviceId} updates device fields PUT /directory/room/:room_alias creates alias PUT /profile/:user_id/avatar_url sets my avatar PUT /profile/:user_id/displayname sets my name +PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +PUT /rooms/:room_id/send/:event_type/:txn_id sends a message PUT /rooms/:room_id/state/m.room.power_levels can set levels +PUT /rooms/:room_id/typing/:user_id sets typing notification PUT power_levels should not explode if the old power levels were empty +Peeked rooms only turn up in the sync for the device who peeked them +Previously left rooms don't appear in the leave section of sync Push rules come down in an initial /sync Read markers appear in incremental v2 /sync Read markers appear in initial v2 /sync Read markers can be updated +Read receipts appear in initial v2 /sync +Real non-joined user cannot call /events on default room +Real non-joined user cannot call /events on invited room +Real non-joined user cannot call /events on joined room +Real non-joined user cannot call /events on shared room +Real non-joined users can get individual state for world_readable rooms +Real non-joined users can get individual state for world_readable rooms after leaving +Real non-joined users can get state for world_readable rooms +Real non-joined users cannot room initalSync for non-world_readable rooms +Real non-joined users cannot send messages to guest_access rooms if not joined +Receipts must be m.read +Redaction of a redaction redacts the redaction reason Regular users can add and delete aliases in the default room configuration +Regular users can add and delete aliases when m.room.aliases is restricted +Regular users cannot create room aliases within the AS namespace +Regular users cannot register within the AS namespace +Remote media can be thumbnailed +Remote room alias queries can handle Unicode +Remote room members also see posted message events +Remote room members can get room messages +Remote user can backfill in a room with version 1 +Remote user can backfill in a room with version 2 +Remote user can backfill in a room with version 3 +Remote user can backfill in a room with version 4 +Remote user can backfill in a room with version 5 +Remote user can backfill in a room with version 6 +Remote users can join room by alias +Remote users may not join unfederated rooms +Request to logout with invalid an access token is rejected +Request to logout without an access token is rejected +Room aliases can contain Unicode Room creation reports m.room.create to myself Room creation reports m.room.member to myself +Room members can join a room with an overridden displayname +Room members can override their displayname on a room-specific basis +Room state at a rejected message event is the same as its predecessor +Room state at a rejected state event is the same as its predecessor Rooms a user is invited to appear in an incremental sync Rooms a user is invited to appear in an initial sync +Rooms can be created with an initial invite list (SYN-205) +Server correctly handles incoming m.device_list_update +Server correctly handles transactions that break edu limits +Server correctly resyncs when client query keys and there is no remote cache +Server correctly resyncs when server leaves and rejoins a room +Server rejects invalid JSON in a version 6 room Setting room topic reports m.room.topic to myself +Should not be able to take over the room by pretending there is no PL event Should reject keys claiming to belong to a different user +State from remote users is included in the state in the initial sync +State from remote users is included in the timeline in an incremental sync +State is included in the timeline in the initial sync +Sync can be polled for updates +Sync is woken up for leaves +Syncing a new room with a large timeline limit isn't limited Tags appear in an initial v2 /sync Trying to get push rules with unknown rule_id fails with 404 +Typing can be explicitly stopped Typing events appear in gapped sync Typing events appear in incremental sync Typing events appear in initial sync +Typing notification sent to local room members +Typing notifications also sent to remote room members +Typing notifications don't leak Uninvited users cannot join the room +Unprivileged users can set m.room.topic if it only needs level 0 User appears in user directory +User can create and send/receive messages in a room with version 1 +User can create and send/receive messages in a room with version 2 +User can create and send/receive messages in a room with version 3 +User can create and send/receive messages in a room with version 4 +User can create and send/receive messages in a room with version 5 +User can create and send/receive messages in a room with version 6 +User can invite local user to room with version 1 +User can invite local user to room with version 2 +User can invite local user to room with version 3 +User can invite local user to room with version 4 +User can invite local user to room with version 5 +User can invite local user to room with version 6 +User can invite remote user to room with version 1 +User can invite remote user to room with version 2 +User can invite remote user to room with version 3 +User can invite remote user to room with version 4 +User can invite remote user to room with version 5 +User can invite remote user to room with version 6 User directory correctly update on display name change User in dir while user still shares private rooms User in shared private room does appear in user directory User is offline if they set_presence=offline in their sync +User signups are forbidden from starting with '_' +Users can't delete other's aliases +Users cannot invite a user that is already in the room +Users cannot invite themselves to a room +Users cannot kick users from a room they are not in +Users cannot kick users who have already left a room +Users cannot set ban powerlevel higher than their own +Users cannot set kick powerlevel higher than their own +Users cannot set notifications powerlevel higher than their own +Users cannot set redact powerlevel higher than their own +Users receive device_list updates for their own devices Users with sufficient power-level can delete other's aliases Version responds 200 OK with valid structure +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +We can't peek into rooms with shared history_visibility +We don't send redundant membership state across incremental syncs by default We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) +We should see our own leave event, even if history_visibility is restricted (SYN-662) +Wildcard device messages over federation wake up /sync Wildcard device messages wake up /sync +Wildcard device messages wake up /sync +avatar_url updates affect room member events +displayname updates affect room member events +local user can join room with version 1 +local user can join room with version 2 +local user can join room with version 3 +local user can join room with version 4 +local user can join room with version 5 +local user can join room with version 6 +m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +m.room.history_visibility == "joined" allows/forbids appropriately for Real users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users query for user with no keys returns empty key dict +remote user can join room with version 1 +remote user can join room with version 2 +remote user can join room with version 3 +remote user can join room with version 4 +remote user can join room with version 5 +remote user can join room with version 6 +setting 'm.room.name' respects room powerlevel +setting 'm.room.power_levels' respects room powerlevel From fe744c856f9df7e27cd13956c950f35966377d44 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:25:10 +0200 Subject: [PATCH 084/103] Upgrade ruma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and refactor push rule code along the way. --- Cargo.lock | 159 +++++++++-------- Cargo.toml | 4 +- src/client_server/account.rs | 4 +- src/client_server/media.rs | 2 +- src/client_server/push.rs | 315 ++++++++++------------------------ src/client_server/state.rs | 25 ++- src/database/pusher.rs | 323 ++++++----------------------------- src/database/sending.rs | 4 +- src/database/users.rs | 16 +- src/lib.rs | 1 - src/main.rs | 1 - src/push_rules.rs | 256 --------------------------- src/ruma_wrapper.rs | 13 +- 13 files changed, 261 insertions(+), 862 deletions(-) delete mode 100644 src/push_rules.rs diff --git a/Cargo.lock b/Cargo.lock index 2a80291..c6c1769 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" +checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" [[package]] name = "constant_time_eq" @@ -356,9 +356,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "either" @@ -402,9 +402,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38799b106530aa30f774f7fca6d8f7e5f6234a79f427c4fad3c975eaf678931" +checksum = "0ca029e813a72b7526d28273d25f3e4a2f365d1b7a1018a6f93ec9053a119763" dependencies = [ "atomic", "pear", @@ -693,9 +693,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" dependencies = [ "bytes", "futures-channel", @@ -708,7 +708,7 @@ dependencies = [ "httpdate", "itoa", "pin-project", - "socket2", + "socket2 0.4.0", "tokio", "tower-service", "tracing", @@ -764,6 +764,7 @@ checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", "hashbrown", + "serde", ] [[package]] @@ -793,7 +794,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ - "socket2", + "socket2 0.3.19", "widestring", "winapi", "winreg 0.6.2", @@ -846,9 +847,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -884,9 +885,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" +checksum = "56d855069fafbb9b344c0f962150cd2c1187975cb1c22c1522c240d8c4986714" [[package]] name = "linked-hash-map" @@ -896,9 +897,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" dependencies = [ "scopeguard", ] @@ -956,9 +957,9 @@ checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc14fc54a812b4472b4113facc3e44d099fbc0ea2ce0551fa5c703f8edfbfd38" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" dependencies = [ "autocfg", ] @@ -1315,10 +1316,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "0.1.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" dependencies = [ + "thiserror", "toml", ] @@ -1336,9 +1338,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -1623,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "assign", "js_int", @@ -1643,7 +1645,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "assign", "http", @@ -1701,21 +1703,24 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ + "indexmap", "js_int", "maplit", "ruma-identifiers", "ruma-serde", "serde", "serde_json", + "tracing", + "wildmatch", ] [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-api", @@ -1754,8 +1759,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.18.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "paste", "rand", @@ -1768,8 +1773,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.18.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro2", "quote", @@ -1779,13 +1784,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.2.2" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "ruma-api", "ruma-common", @@ -1798,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "js_int", "ruma-api", @@ -1812,8 +1817,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "form_urlencoded", "itoa", @@ -1825,8 +1830,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.3.1" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1836,8 +1841,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=f196f5b6f164973d6b343af31ab4e0457f743675#f196f5b6f164973d6b343af31ab4e0457f743675" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" dependencies = [ "base64 0.13.0", "ring", @@ -1915,9 +1920,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d493c5f39e02dfb062cd8f33301f90f9b13b650e8c1b1d0fd75c19dd64bff69d" +checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" dependencies = [ "bitflags", "core-foundation", @@ -1928,9 +1933,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" +checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" dependencies = [ "core-foundation-sys", "libc", @@ -2081,6 +2086,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "spin" version = "0.5.2" @@ -2105,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?branch=improvements#625c37cb776b381a83ab7ee58b13e32506849648" +source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" dependencies = [ "itertools 0.10.0", "log", @@ -2167,9 +2182,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.64" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" dependencies = [ "proc-macro2", "quote", @@ -2300,9 +2315,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" dependencies = [ "tinyvec_macros", ] @@ -2634,9 +2649,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if", "serde", @@ -2646,9 +2661,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -2661,9 +2676,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" dependencies = [ "cfg-if", "js-sys", @@ -2673,9 +2688,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2683,9 +2698,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -2696,15 +2711,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.72" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "web-sys" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" dependencies = [ "js-sys", "wasm-bindgen", @@ -2732,6 +2747,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +[[package]] +name = "wildmatch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07ae7ce410f81ba679081aac1d4874f3b1c328535b630209aa5b4cdaaf895e20" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 453bc8e..7bd1557 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "f196f5b6f164973d6b343af31ab4e0457f743675" } +ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", branch = "improvements", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c890284d0bc3b9d6d4008ac475", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature #state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } #state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 10f5d75..4c5b60c 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -21,7 +21,7 @@ use ruma::{ }, EventType, }, - RoomAliasId, RoomId, RoomVersionId, UserId, + push, RoomAliasId, RoomId, RoomVersionId, UserId, }; use register::RegistrationKind; @@ -181,7 +181,7 @@ pub async fn register_route( EventType::PushRules, &ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { - global: crate::push_rules::default_pushrules(&user_id), + global: push::Ruleset::server_default(&user_id), }, }, &db.globals, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 57fc2b0..f9350e0 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -46,7 +46,7 @@ pub async fn create_content_route( db.flush().await?; Ok(create_content::Response { - content_uri: mxc, + content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, } .into()) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 9de8c16..e37e660 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -10,10 +10,7 @@ use ruma::{ }, }, events::{push_rules, EventType}, - push::{ - ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, - RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, - }, + push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; #[cfg(feature = "conduit_bin")] @@ -67,29 +64,24 @@ pub async fn get_pushrule_route( let rule = match body.kind { RuleKind::Override => global .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Underride => global .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Sender => global .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Room => global .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::Content => global .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.clone().into()), + .get(body.rule_id.as_str()) + .map(|rule| rule.clone().into()), RuleKind::_Custom(_) => None, }; @@ -105,14 +97,15 @@ pub async fn get_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn set_pushrule_route( db: State<'_, Database>, - body: Ruma>, + req: Ruma>, ) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = req.sender_user.as_ref().expect("user is authenticated"); + let body = req.body; if body.scope != "global" { return Err(Error::BadRequest( @@ -132,107 +125,62 @@ pub async fn set_pushrule_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.override_.remove(&rule); - } - - global.override_.insert(OverridePushRule( + global.override_.replace( ConditionalPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - conditions: body.conditions.clone(), + rule_id: body.rule_id, + conditions: body.conditions, } .into(), - )); + ); } RuleKind::Underride => { - if let Some(rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.underride.remove(&rule); - } - - global.underride.insert(UnderridePushRule( + global.underride.replace( ConditionalPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - conditions: body.conditions.clone(), + rule_id: body.rule_id, + conditions: body.conditions, } .into(), - )); + ); } RuleKind::Sender => { - if let Some(rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.sender.remove(&rule); - } - - global.sender.insert(SenderPushRule( + global.sender.replace( SimplePushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), + rule_id: body.rule_id, } .into(), - )); + ); } RuleKind::Room => { - if let Some(rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.room.remove(&rule); - } - - global.room.insert(RoomPushRule( + global.room.replace( SimplePushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), + rule_id: body.rule_id, } .into(), - )); + ); } RuleKind::Content => { - if let Some(rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.content.remove(&rule); - } - - global.content.insert(ContentPushRule( + global.content.replace( PatternedPushRuleInit { - actions: body.actions.clone(), + actions: body.actions, default: false, enabled: true, - rule_id: body.rule_id.clone(), - pattern: body.pattern.clone().unwrap_or_default(), + rule_id: body.rule_id, + pattern: body.pattern.unwrap_or_default(), } .into(), - )); + ); } RuleKind::_Custom(_) => {} } @@ -280,29 +228,24 @@ pub async fn get_pushrule_actions_route( let actions = match body.kind { RuleKind::Override => global .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Underride => global .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Sender => global .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Room => global .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::Content => global .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map(|rule| rule.0.actions.clone()), + .get(body.rule_id.as_str()) + .map(|rule| rule.actions.clone()), RuleKind::_Custom(_) => None, }; @@ -343,63 +286,33 @@ pub async fn set_pushrule_actions_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(mut rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.override_.remove(&rule); - rule.0.actions = body.actions.clone(); - global.override_.insert(rule); + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.override_.replace(rule); } } RuleKind::Underride => { - if let Some(mut rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.underride.remove(&rule); - rule.0.actions = body.actions.clone(); - global.underride.insert(rule); + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.underride.replace(rule); } } RuleKind::Sender => { - if let Some(mut rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.sender.remove(&rule); - rule.0.actions = body.actions.clone(); - global.sender.insert(rule); + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.sender.replace(rule); } } RuleKind::Room => { - if let Some(mut rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.room.remove(&rule); - rule.0.actions = body.actions.clone(); - global.room.insert(rule); + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.room.replace(rule); } } RuleKind::Content => { - if let Some(mut rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { - global.content.remove(&rule); - rule.0.actions = body.actions.clone(); - global.content.insert(rule); + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { + rule.actions = body.actions.clone(); + global.content.replace(rule); } } RuleKind::_Custom(_) => {} @@ -449,28 +362,28 @@ pub async fn get_pushrule_enabled_route( RuleKind::Override => global .override_ .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Underride => global .underride .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Sender => global .sender .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Room => global .room .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::Content => global .content .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .map_or(false, |rule| rule.0.enabled), + .find(|rule| rule.rule_id == body.rule_id) + .map_or(false, |rule| rule.enabled), RuleKind::_Custom(_) => false, }; @@ -508,62 +421,37 @@ pub async fn set_pushrule_enabled_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(mut rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { global.override_.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.override_.insert(rule); } } RuleKind::Underride => { - if let Some(mut rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.underride.get(body.rule_id.as_str()).cloned() { global.underride.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.underride.insert(rule); } } RuleKind::Sender => { - if let Some(mut rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.sender.get(body.rule_id.as_str()).cloned() { global.sender.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.sender.insert(rule); } } RuleKind::Room => { - if let Some(mut rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.room.get(body.rule_id.as_str()).cloned() { global.room.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.room.insert(rule); } } RuleKind::Content => { - if let Some(mut rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(mut rule) = global.content.get(body.rule_id.as_str()).cloned() { global.content.remove(&rule); - rule.0.enabled = body.enabled; + rule.enabled = body.enabled; global.content.insert(rule); } } @@ -612,52 +500,27 @@ pub async fn delete_pushrule_route( let global = &mut event.content.global; match body.kind { RuleKind::Override => { - if let Some(rule) = global - .override_ - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { global.override_.remove(&rule); } } RuleKind::Underride => { - if let Some(rule) = global - .underride - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.underride.get(body.rule_id.as_str()).cloned() { global.underride.remove(&rule); } } RuleKind::Sender => { - if let Some(rule) = global - .sender - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.sender.get(body.rule_id.as_str()).cloned() { global.sender.remove(&rule); } } RuleKind::Room => { - if let Some(rule) = global - .room - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.room.get(body.rule_id.as_str()).cloned() { global.room.remove(&rule); } } RuleKind::Content => { - if let Some(rule) = global - .content - .iter() - .find(|rule| rule.0.rule_id == body.rule_id) - .cloned() - { + if let Some(rule) = global.content.get(body.rule_id.as_str()).cloned() { global.content.remove(&rule); } } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 54c5fa5..88cce03 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -3,10 +3,7 @@ use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::state::{ - get_state_events, get_state_events_for_empty_key, get_state_events_for_key, - send_state_event_for_empty_key, send_state_event_for_key, - }, + r0::state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ room::history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, @@ -25,8 +22,8 @@ use rocket::{get, put}; #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let content = serde_json::from_str::( @@ -49,7 +46,7 @@ pub async fn send_state_event_for_key_route( db.flush().await?; - Ok(send_state_event_for_key::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }.into()) } #[cfg_attr( @@ -59,8 +56,8 @@ pub async fn send_state_event_for_key_route( #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { // This just calls send_state_event_for_key_route let Ruma { body, @@ -81,7 +78,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user .as_ref() - .expect("no user for send state empty key rout"), + .expect("no user for send state empty key route"), &body.content, json, &body.room_id, @@ -91,7 +88,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush().await?; - Ok(send_state_event_for_empty_key::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }.into()) } #[cfg_attr( @@ -199,8 +196,8 @@ pub async fn get_state_events_for_key_route( #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, - body: Ruma>, -) -> ConduitResult { + body: Ruma>, +) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -236,7 +233,7 @@ pub async fn get_state_events_for_empty_key_route( "State event not found.", ))?; - Ok(get_state_events_for_empty_key::Response { + Ok(get_state_events_for_key::Response { content: serde_json::value::to_raw_value(&event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 8e9b24e..6a88d5e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -9,15 +9,8 @@ use ruma::{ }, OutgoingRequest, }, - events::{ - room::{ - member::{MemberEventContent, MembershipState}, - message::{MessageEventContent, MessageType, TextMessageEventContent}, - power_levels::PowerLevelsEventContent, - }, - EventType, - }, - push::{Action, PushCondition, PushFormat, Ruleset, Tweak}, + events::{room::power_levels::PowerLevelsEventContent, EventType}, + push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, uint, UInt, UserId, }; use sled::IVec; @@ -181,276 +174,56 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { - if let Some(msgtype) = pdu.content.get("msgtype").and_then(|b| b.as_str()) { - if msgtype == "m.notice" { - return Ok(()); + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let ctx = PushConditionRoomCtx { + room_id: pdu.room_id.clone(), + member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + user_display_name: user.localpart().into(), // TODO: Use actual display name + users_power_levels: power_levels.users, + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications, + }; + + let mut notify = None; + let mut tweaks = Vec::new(); + + for action in ruleset.get_actions(&pdu.to_sync_state_event(), &ctx) { + let n = match action { + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, + Action::SetTweak(tweak) => { + tweaks.push(tweak.clone()); + continue; + } + }; + + if notify.is_some() { + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, + )); } + + notify = Some(n); } - for rule in ruleset.into_iter() { - // TODO: can actions contain contradictory Actions - if rule - .actions - .iter() - .any(|act| matches!(act, ruma::push::Action::DontNotify)) - || !rule.enabled - { - continue; - } + let notify = notify.ok_or_else(|| { + Error::bad_database( + r#"Malformed pushrule contains none of these actions: ["dont_notify", "notify", "coalesce"]"#, + ) + })?; - match rule.rule_id.as_str() { - ".m.rule.master" => {} - ".m.rule.suppress_notices" => { - if pdu.kind == EventType::RoomMessage - && pdu - .content - .get("msgtype") - .map_or(false, |ty| ty == "m.notice") - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.invite_for_me" => { - if let EventType::RoomMember = &pdu.kind { - if pdu.state_key.as_deref() == Some(user.as_str()) - && serde_json::from_value::(pdu.content.clone()) - .map_err(|_| Error::bad_database("PDU contained bad message content"))? - .membership - == MembershipState::Invite - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - ".m.rule.member_event" => { - if let EventType::RoomMember = &pdu.kind { - // TODO use this? - let _member = serde_json::from_value::(pdu.content.clone()) - .map_err(|_| Error::bad_database("PDU contained bad message content"))?; - if let Some(conditions) = rule.conditions { - if conditions.iter().any(|cond| match cond { - PushCondition::EventMatch { key, pattern } => { - let mut json = - serde_json::to_value(pdu).expect("PDU is valid JSON"); - for key in key.split('.') { - json = json[key].clone(); - } - // TODO: this is baddddd - json.to_string().contains(pattern) - } - _ => false, - }) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.contains_display_name" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - if body.contains(user.localpart()) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.tombstone" => { - if pdu.kind == EventType::RoomTombstone && pdu.state_key.as_deref() == Some("") { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.roomnotif" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - let power_level_cmp = |pl: PowerLevelsEventContent| { - &pl.notifications.room - <= pl.users.get(&pdu.sender).unwrap_or(&ruma::int!(0)) - }; - let deserialize = |pl: PduEvent| { - serde_json::from_value::(pl.content).ok() - }; - if body.contains("@room") - && db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(deserialize) - .flatten() - .map_or(false, power_level_cmp) - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.contains_user_name" => { - if let EventType::RoomMessage = &pdu.kind { - let msg_content = - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::bad_database("PDU contained bad message content") - })?; - if let MessageType::Text(TextMessageEventContent { body, .. }) = - &msg_content.msgtype - { - if body.contains(user.localpart()) { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - } - } - ".m.rule.call" => { - if pdu.kind == EventType::CallInvite { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.encrypted_room_one_to_one" => { - if db.rooms.room_members(&pdu.room_id).count() == 2 - && pdu.kind == EventType::RoomEncrypted - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.room_one_to_one" => { - if db.rooms.room_members(&pdu.room_id).count() == 2 - && pdu.kind == EventType::RoomMessage - { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.message" => { - if pdu.kind == EventType::RoomMessage { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - ".m.rule.encrypted" => { - if pdu.kind == EventType::RoomEncrypted { - let tweaks = rule - .actions - .iter() - .filter_map(|a| match a { - Action::SetTweak(tweak) => Some(tweak.clone()), - _ => None, - }) - .collect::>(); - send_notice(unread, pusher, tweaks, pdu, db).await?; - break; - } - } - _ => {} - } + if notify { + send_notice(unread, pusher, tweaks, pdu, db).await?; } Ok(()) diff --git a/src/database/sending.rs b/src/database/sending.rs index 82d2cdd..b0f9c4d 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -16,7 +16,7 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - uint, ServerName, UInt, UserId, + push, uint, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -428,7 +428,7 @@ impl Sending { .get::(None, &userid, EventType::PushRules) .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? .map(|ev| ev.content.global) - .unwrap_or_else(|| crate::push_rules::default_pushrules(&userid)); + .unwrap_or_else(|| push::Ruleset::server_default(&userid)); let unread: UInt = if let Some(last_read) = db .rooms diff --git a/src/database/users.rs b/src/database/users.rs index e5bc16e..ddbfd38 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -9,6 +9,7 @@ use ruma::{ }, encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, + identifiers::MxcUri, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; @@ -150,21 +151,22 @@ impl Users { } /// Get a the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result> { + pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.to_string())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Avatar URL in db is invalid.") - })?)) + .map(|bytes| { + let s = utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; + MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) + .transpose() } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl - .insert(user_id.to_string(), &*avatar_url)?; + .insert(user_id.to_string(), avatar_url.to_string().as_str())?; } else { self.userid_avatarurl.remove(user_id.to_string())?; } diff --git a/src/lib.rs b/src/lib.rs index aed129f..0af46e0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,6 @@ pub mod client_server; mod database; mod error; mod pdu; -mod push_rules; mod ruma_wrapper; pub mod server_server; mod utils; diff --git a/src/main.rs b/src/main.rs index 696ce5c..f523abb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,6 @@ pub mod server_server; mod database; mod error; mod pdu; -mod push_rules; mod ruma_wrapper; mod utils; diff --git a/src/push_rules.rs b/src/push_rules.rs deleted file mode 100644 index 76a1a61..0000000 --- a/src/push_rules.rs +++ /dev/null @@ -1,256 +0,0 @@ -use ruma::{ - push::{ - Action, ConditionalPushRule, ConditionalPushRuleInit, ContentPushRule, OverridePushRule, - PatternedPushRule, PatternedPushRuleInit, PushCondition, RoomMemberCountIs, Ruleset, Tweak, - UnderridePushRule, - }, - UserId, -}; - -pub fn default_pushrules(user_id: &UserId) -> Ruleset { - let mut rules = Ruleset::default(); - - rules.add(ContentPushRule(contains_user_name_rule(&user_id))); - - for rule in vec![ - master_rule(), - suppress_notices_rule(), - invite_for_me_rule(), - member_event_rule(), - contains_display_name_rule(), - tombstone_rule(), - roomnotif_rule(), - ] { - rules.add(OverridePushRule(rule)); - } - - for rule in vec![ - call_rule(), - encrypted_room_one_to_one_rule(), - room_one_to_one_rule(), - message_rule(), - encrypted_rule(), - ] { - rules.add(UnderridePushRule(rule)); - } - - rules -} - -pub fn master_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: false, - rule_id: ".m.rule.master".to_owned(), - conditions: vec![], - } - .into() -} - -pub fn suppress_notices_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: true, - rule_id: ".m.rule.suppress_notices".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.msgtype".to_owned(), - pattern: "m.notice".to_owned(), - }], - } - .into() -} - -pub fn invite_for_me_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.invite_for_me".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.membership".to_owned(), - pattern: "m.invite".to_owned(), - }], - } - .into() -} - -pub fn member_event_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::DontNotify], - default: true, - enabled: true, - rule_id: ".m.rule.member_event".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "content.membership".to_owned(), - pattern: "type".to_owned(), - }], - } - .into() -} - -pub fn contains_display_name_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(true)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.contains_display_name".to_owned(), - conditions: vec![PushCondition::ContainsDisplayName], - } - .into() -} - -pub fn tombstone_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], - default: true, - enabled: true, - rule_id: ".m.rule.tombstone".to_owned(), - conditions: vec![ - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.tombstone".to_owned(), - }, - PushCondition::EventMatch { - key: "state_key".to_owned(), - pattern: "".to_owned(), - }, - ], - } - .into() -} - -pub fn roomnotif_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(true))], - default: true, - enabled: true, - rule_id: ".m.rule.roomnotif".to_owned(), - conditions: vec![ - PushCondition::EventMatch { - key: "content.body".to_owned(), - pattern: "@room".to_owned(), - }, - PushCondition::SenderNotificationPermission { - key: "room".to_owned(), - }, - ], - } - .into() -} - -pub fn contains_user_name_rule(user_id: &UserId) -> PatternedPushRule { - PatternedPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(true)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.contains_user_name".to_owned(), - pattern: user_id.localpart().to_owned(), - } - .into() -} - -pub fn call_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("ring".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.call".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.call.invite".to_owned(), - }], - } - .into() -} - -pub fn encrypted_room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.encrypted_room_one_to_one".to_owned(), - conditions: vec![ - PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(2_u32.into()..), - }, - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.encrypted".to_owned(), - }, - ], - } - .into() -} - -pub fn room_one_to_one_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![ - Action::Notify, - Action::SetTweak(Tweak::Sound("default".to_owned())), - Action::SetTweak(Tweak::Highlight(false)), - ], - default: true, - enabled: true, - rule_id: ".m.rule.room_one_to_one".to_owned(), - conditions: vec![ - PushCondition::RoomMemberCount { - is: RoomMemberCountIs::from(2_u32.into()..), - }, - PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }, - ], - } - .into() -} - -pub fn message_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], - default: true, - enabled: true, - rule_id: ".m.rule.message".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.message".to_owned(), - }], - } - .into() -} - -pub fn encrypted_rule() -> ConditionalPushRule { - ConditionalPushRuleInit { - actions: vec![Action::Notify, Action::SetTweak(Tweak::Highlight(false))], - default: true, - enabled: true, - rule_id: ".m.rule.encrypted".to_owned(), - conditions: vec![PushCondition::EventMatch { - key: "type".to_owned(), - pattern: "m.room.encrypted".to_owned(), - }], - } - .into() -} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 9787e2d..8e1d34f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,5 @@ use crate::Error; use ruma::{ - api::OutgoingRequest, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -28,7 +27,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -37,7 +36,7 @@ pub struct Ruma { } #[cfg(feature = "conduit_bin")] -impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma +impl<'a, T: Outgoing> FromTransformedData<'a> for Ruma where T::Incoming: IncomingRequest, { @@ -56,6 +55,8 @@ where request: &'a Request<'_>, outcome: Transformed<'a, Self>, ) -> FromDataFuture<'a, Self, Self::Error> { + let metadata = T::Incoming::METADATA; + Box::pin(async move { let data = rocket::try_outcome!(outcome.owned()); let db = request @@ -80,7 +81,7 @@ where .and_then(|as_token| as_token.as_str()) .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { - match T::METADATA.authentication { + match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { let user_id = request.get_query_value::("user_id").map_or_else( || { @@ -112,7 +113,7 @@ where AuthScheme::None => (None, None, true), } } else { - match T::METADATA.authentication { + match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { if let Some(token) = token { match db.users.find_from_token(&token).unwrap() { @@ -166,7 +167,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { From dbe8c2ce19ad5c7e8493b717841f0a2e558be27b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:44:21 +0200 Subject: [PATCH 085/103] Remove lots of redundant string allocations --- src/database.rs | 4 +-- src/database/account_data.rs | 10 +++--- src/database/key_backups.rs | 30 ++++++++--------- src/database/rooms.rs | 16 +++++----- src/database/rooms/edus.rs | 62 +++++++++++++++++------------------- src/database/uiaa.rs | 4 +-- src/database/users.rs | 56 +++++++++++++++----------------- 7 files changed, 87 insertions(+), 95 deletions(-) diff --git a/src/database.rs b/src/database.rs index d8734b5..bacf3b9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -217,7 +217,7 @@ impl Database { } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let userid_bytes = user_id.to_string().as_bytes().to_vec(); + let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); userid_prefix.push(0xff); @@ -241,7 +241,7 @@ impl Database { // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let roomid_bytes = room_id.to_string().as_bytes().to_vec(); + let roomid_bytes = room_id.as_bytes().to_vec(); let mut roomid_prefix = roomid_bytes.clone(); roomid_prefix.push(0xff); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 38e6c32..f3832ea 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -30,7 +30,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); // Remove old entry @@ -42,7 +42,7 @@ impl AccountData { let mut key = prefix; key.extend_from_slice(&globals.next_count()?.to_be_bytes()); key.push(0xff); - key.extend_from_slice(event_type.to_string().as_bytes()); + key.extend_from_slice(event_type.as_ref().as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling if json.get("type").is_none() || json.get("content").is_none() { @@ -89,7 +89,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); // Skip the data that's exactly at since, because we sent that last time @@ -135,7 +135,7 @@ impl AccountData { .as_bytes() .to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&user_id.to_string().as_bytes()); + prefix.extend_from_slice(&user_id.as_bytes()); prefix.push(0xff); let kind = kind.clone(); @@ -148,7 +148,7 @@ impl AccountData { k.rsplit(|&b| b == 0xff) .next() .map(|current_event_type| { - current_event_type == kind.to_string().as_bytes() + current_event_type == kind.as_ref().as_bytes() }) .unwrap_or(false) }) diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 4c65354..0f9af2e 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -24,7 +24,7 @@ impl KeyBackups { ) -> Result { let version = globals.next_count()?.to_string(); - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -39,7 +39,7 @@ impl KeyBackups { } pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -67,7 +67,7 @@ impl KeyBackups { backup_metadata: &BackupAlgorithm, globals: &super::globals::Globals, ) -> Result { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -89,7 +89,7 @@ impl KeyBackups { } pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.backupid_algorithm .scan_prefix(&prefix) @@ -113,7 +113,7 @@ impl KeyBackups { } pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -132,7 +132,7 @@ impl KeyBackups { key_data: &KeyBackupData, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -147,7 +147,7 @@ impl KeyBackups { .insert(&key, &globals.next_count()?.to_be_bytes())?; key.push(0xff); - key.extend_from_slice(room_id.to_string().as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); key.extend_from_slice(session_id.as_bytes()); @@ -160,7 +160,7 @@ impl KeyBackups { } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -168,7 +168,7 @@ impl KeyBackups { } pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); @@ -187,7 +187,7 @@ impl KeyBackups { user_id: &UserId, version: &str, ) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); @@ -240,7 +240,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, ) -> BTreeMap { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); @@ -278,7 +278,7 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, ) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); key.push(0xff); @@ -297,7 +297,7 @@ impl KeyBackups { } pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); @@ -320,7 +320,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); @@ -346,7 +346,7 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&version.as_bytes()); key.push(0xff); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5d43626..ef1e558 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -151,7 +151,7 @@ impl Rooms { event_type: &EventType, state_key: &str, ) -> Result> { - let mut key = event_type.to_string().as_bytes().to_vec(); + let mut key = event_type.as_ref().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); @@ -416,7 +416,7 @@ impl Rooms { /// Returns the pdu's id. pub fn get_pdu_id(&self, event_id: &EventId) -> Result> { self.eventid_pduid - .get(event_id.to_string().as_bytes())? + .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } @@ -690,7 +690,7 @@ impl Rooms { .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) { - let mut key = pdu.room_id.to_string().as_bytes().to_vec(); + let mut key = pdu.room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(word.as_bytes()); key.push(0xff); @@ -1264,7 +1264,7 @@ impl Rooms { room_id: &RoomId, since: u64, ) -> Result>> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); // Skip the first pdu if it's exactly at since, because we sent that last time @@ -1298,7 +1298,7 @@ impl Rooms { until: u64, ) -> impl Iterator> { // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut current = prefix.clone(); @@ -1332,7 +1332,7 @@ impl Rooms { from: u64, ) -> impl Iterator> { // Create the first part of the full pdu id - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut current = prefix.clone(); @@ -1883,9 +1883,9 @@ impl Rooms { } pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.to_string().as_bytes().to_vec(); + let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.to_string().as_bytes()); + userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.roomuseroncejoinedids.get(userroom_id)?.is_some()) } diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 084e4a1..9e43fe1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -34,7 +34,7 @@ impl RoomEdus { event: EduEvent, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); // Remove old entry @@ -49,7 +49,7 @@ impl RoomEdus { key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element") - == user_id.to_string().as_bytes() + == user_id.as_bytes() }) { // This is the old room_latest @@ -59,7 +59,7 @@ impl RoomEdus { let mut room_latest_id = prefix; room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); room_latest_id.push(0xff); - room_latest_id.extend_from_slice(&user_id.to_string().as_bytes()); + room_latest_id.extend_from_slice(&user_id.as_bytes()); self.readreceiptid_readreceipt.insert( room_latest_id, @@ -76,7 +76,7 @@ impl RoomEdus { room_id: &RoomId, since: u64, ) -> Result>>> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut first_possible_edu = prefix.clone(); @@ -102,9 +102,9 @@ impl RoomEdus { count: u64, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); self.roomuserid_privateread .insert(&key, &count.to_be_bytes())?; @@ -118,9 +118,9 @@ impl RoomEdus { /// Returns the private read marker. #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); self.roomuserid_privateread.get(key)?.map_or(Ok(None), |v| { Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { @@ -131,9 +131,9 @@ impl RoomEdus { /// Returns the count of the last typing update in this room. pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&user_id.to_string().as_bytes()); + key.extend_from_slice(&user_id.as_bytes()); Ok(self .roomuserid_lastprivatereadupdate @@ -155,7 +155,7 @@ impl RoomEdus { timeout: u64, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let count = globals.next_count()?.to_be_bytes(); @@ -166,10 +166,10 @@ impl RoomEdus { room_typing_id.extend_from_slice(&count); self.typingid_userid - .insert(&room_typing_id, &*user_id.to_string().as_bytes())?; + .insert(&room_typing_id, &*user_id.as_bytes())?; self.roomid_lasttypingupdate - .insert(&room_id.to_string().as_bytes(), &count)?; + .insert(&room_id.as_bytes(), &count)?; Ok(()) } @@ -181,7 +181,7 @@ impl RoomEdus { room_id: &RoomId, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let user_id = user_id.to_string(); @@ -200,10 +200,8 @@ impl RoomEdus { } if found_outdated { - self.roomid_lasttypingupdate.insert( - &room_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.roomid_lasttypingupdate + .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -215,7 +213,7 @@ impl RoomEdus { room_id: &RoomId, globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let current_timestamp = utils::millis_since_unix_epoch(); @@ -248,10 +246,8 @@ impl RoomEdus { } if found_outdated { - self.roomid_lasttypingupdate.insert( - &room_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.roomid_lasttypingupdate + .insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; } Ok(()) @@ -268,7 +264,7 @@ impl RoomEdus { Ok(self .roomid_lasttypingupdate - .get(&room_id.to_string().as_bytes())? + .get(&room_id.as_bytes())? .map_or(Ok::<_, Error>(None), |bytes| { Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -281,7 +277,7 @@ impl RoomEdus { &self, room_id: &RoomId, ) -> Result> { - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut user_ids = Vec::new(); @@ -322,11 +318,11 @@ impl RoomEdus { let count = globals.next_count()?.to_be_bytes(); - let mut presence_id = room_id.to_string().as_bytes().to_vec(); + let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); presence_id.extend_from_slice(&count); presence_id.push(0xff); - presence_id.extend_from_slice(&presence.sender.to_string().as_bytes()); + presence_id.extend_from_slice(&presence.sender.as_bytes()); self.presenceid_presence.insert( presence_id, @@ -334,7 +330,7 @@ impl RoomEdus { )?; self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -345,7 +341,7 @@ impl RoomEdus { #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; @@ -355,7 +351,7 @@ impl RoomEdus { /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. pub fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_lastpresenceupdate - .get(&user_id.to_string().as_bytes())? + .get(&user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") @@ -398,7 +394,7 @@ impl RoomEdus { .try_into() .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.to_string().as_bytes().to_vec(); + let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); presence_id.extend_from_slice(&count); presence_id.push(0xff); @@ -424,7 +420,7 @@ impl RoomEdus { } self.userid_lastpresenceupdate.insert( - &user_id.to_string().as_bytes(), + &user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), )?; } @@ -443,7 +439,7 @@ impl RoomEdus { ) -> Result> { self.presence_maintain(rooms, globals)?; - let mut prefix = room_id.to_string().as_bytes().to_vec(); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let mut first_possible_edu = prefix.clone(); diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 381a701..4c33b86 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -148,7 +148,7 @@ impl Uiaa { device_id: &DeviceId, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -170,7 +170,7 @@ impl Uiaa { device_id: &DeviceId, session: &str, ) -> Result { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); diff --git a/src/database/users.rs b/src/database/users.rs index ddbfd38..c794e52 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -185,7 +185,7 @@ impl Users { // This method should never be called for nonexistent users. assert!(self.exists(user_id)?); - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -208,7 +208,7 @@ impl Users { /// Removes a device from a user. pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -234,7 +234,7 @@ impl Users { /// Returns an iterator over all device ids of this user. pub fn all_device_ids(&self, user_id: &UserId) -> impl Iterator>> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata self.userdeviceid_metadata @@ -254,7 +254,7 @@ impl Users { /// Replaces the access token of one device. pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -282,7 +282,7 @@ impl Users { one_time_key_value: &OneTimeKey, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -305,10 +305,8 @@ impl Users { .expect("OneTimeKey::to_string always works"), )?; - self.userid_lastonetimekeyupdate.insert( - &user_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.userid_lastonetimekeyupdate + .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; Ok(()) } @@ -316,7 +314,7 @@ impl Users { #[tracing::instrument(skip(self))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate - .get(&user_id.to_string().as_bytes())? + .get(&user_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") @@ -332,18 +330,16 @@ impl Users { key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, ) -> Result> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.to_string().as_bytes()); + prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); - self.userid_lastonetimekeyupdate.insert( - &user_id.to_string().as_bytes(), - &globals.next_count()?.to_be_bytes(), - )?; + self.userid_lastonetimekeyupdate + .insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys .scan_prefix(&prefix) @@ -373,7 +369,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -409,7 +405,7 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -434,7 +430,7 @@ impl Users { ) -> Result<()> { // TODO: Check signatures - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // Master key @@ -532,9 +528,9 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_id.to_string().as_bytes().to_vec(); + let mut key = target_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(key_id.to_string().as_bytes()); + key.extend_from_slice(key_id.as_bytes()); let mut cross_signing_key = serde_json::from_slice::(&self.keyid_key.get(&key)?.ok_or( @@ -617,14 +613,14 @@ impl Users { continue; } - let mut key = room_id.to_string().as_bytes().to_vec(); + let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&count); self.keychangeid_userid.insert(key, &*user_id.to_string())?; } - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&count); self.keychangeid_userid.insert(key, &*user_id.to_string())?; @@ -637,7 +633,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -724,7 +720,7 @@ impl Users { content: serde_json::Value, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_user_id.to_string().as_bytes().to_vec(); + let mut key = target_user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); @@ -751,7 +747,7 @@ impl Users { ) -> Result>> { let mut events = Vec::new(); - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); @@ -773,7 +769,7 @@ impl Users { device_id: &DeviceId, until: u64, ) -> Result<()> { - let mut prefix = user_id.to_string().as_bytes().to_vec(); + let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); @@ -808,7 +804,7 @@ impl Users { device_id: &DeviceId, device: &Device, ) -> Result<()> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -831,7 +827,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); + let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -845,7 +841,7 @@ impl Users { } pub fn all_devices_metadata(&self, user_id: &UserId) -> impl Iterator> { - let mut key = user_id.to_string().as_bytes().to_vec(); + let mut key = user_id.as_bytes().to_vec(); key.push(0xff); self.userdeviceid_metadata From dc031fff95594fbf572a8a7564f761518e9889d2 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Mon, 5 Apr 2021 21:46:10 +0200 Subject: [PATCH 086/103] Remove redundant calls to .iter() and .into_iter() --- src/database/rooms.rs | 2 +- src/server_server.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ef1e558..b2043d1 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -531,7 +531,7 @@ impl Rooms { self.roomid_pduleaves.remove(key?)?; } - for event_id in event_ids.iter() { + for event_id in event_ids { let mut key = prefix.to_owned(); key.extend_from_slice(event_id.as_bytes()); self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; diff --git a/src/server_server.rs b/src/server_server.rs index bb0b9af..421b4f8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -552,7 +552,7 @@ pub async fn send_transaction_message_route<'a>( // events that it references. let mut auth_cache = EventMap::new(); - for pdu in body.pdus.iter() { + for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { Ok(t) => t, @@ -757,7 +757,7 @@ fn handle_incoming_pdu<'a>( // Build map of auth events let mut auth_events = BTreeMap::new(); - for id in incoming_pdu.auth_events.iter() { + for id in &incoming_pdu.auth_events { let auth_event = auth_cache.get(id).ok_or_else(|| { "Auth event not found, event failed recursive auth checks.".to_string() })?; @@ -869,7 +869,7 @@ fn handle_incoming_pdu<'a>( }; let mut state = BTreeMap::new(); - for pdu in state_vec.into_iter() { + for pdu in state_vec { match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { Entry::Vacant(v) => { v.insert(pdu); @@ -1268,7 +1268,7 @@ pub(crate) async fn fetch_signing_keys( .await { debug!("Got signing keys: {:?}", keys); - for k in keys.server_keys.into_iter() { + for k in keys.server_keys { db.globals.add_signing_key(origin, &k)?; result.extend( k.verify_keys From f3f95a73d0d667afa213f01553e5294a1b30e502 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Apr 2021 15:56:57 +0200 Subject: [PATCH 087/103] improvement: /event route --- src/main.rs | 1 + src/server_server.rs | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/main.rs b/src/main.rs index f523abb..4ccc025 100644 --- a/src/main.rs +++ b/src/main.rs @@ -164,6 +164,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_public_rooms_route, server_server::get_public_rooms_filtered_route, server_server::send_transaction_message_route, + server_server::get_event_route, server_server::get_missing_events_route, server_server::get_room_state_ids_route, server_server::get_profile_information_route, diff --git a/src/server_server.rs b/src/server_server.rs index 421b4f8..84cfe61 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1388,6 +1388,34 @@ pub(crate) fn append_incoming_pdu( Ok(()) } +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/federation/v1/event/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn get_event_route<'a>( + db: State<'a, Database>, + body: Ruma>, +) -> ConduitResult { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_event::v1::Response { + origin: db.globals.server_name().to_owned(), + origin_server_ts: SystemTime::now(), + pdu: PduEvent::convert_to_outgoing_federation_event( + serde_json::from_value( + db.rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, + ) + .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + ), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 51aa6448bcb79892b15acceba5299caa3c3f17c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Apr 2021 17:58:33 +0200 Subject: [PATCH 088/103] fix: use sled main to avoid deadlock --- Cargo.lock | 123 ++++++++++++++++++++++++++++------------------------- Cargo.toml | 4 +- 2 files changed, 69 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6c1769..2b960f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,6 +109,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitmaps" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" +dependencies = [ + "typenum", +] + [[package]] name = "blake2b_simd" version = "0.5.11" @@ -258,19 +267,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-epoch" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - [[package]] name = "crossbeam-utils" version = "0.8.3" @@ -547,15 +543,6 @@ dependencies = [ "slab", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - [[package]] name = "getrandom" version = "0.1.16" @@ -739,6 +726,20 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "im" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111c1983f3c5bb72732df25cddacee9b546d08325fb584b5ebd38148be7b0246" +dependencies = [ + "bitmaps", + "rand_core 0.5.1", + "rand_xoshiro", + "sized-chunks", + "typenum", + "version_check", +] + [[package]] name = "image" version = "0.23.14" @@ -806,15 +807,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.10.0" @@ -955,15 +947,6 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" -[[package]] -name = "memoffset" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.16" @@ -1381,7 +1364,7 @@ checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.2", "rand_hc", ] @@ -1392,9 +1375,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.2", ] +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" + [[package]] name = "rand_core" version = "0.6.2" @@ -1410,7 +1399,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core", + "rand_core 0.6.2", +] + +[[package]] +name = "rand_xoshiro" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -2046,6 +2044,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "sized-chunks" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e65d6a9f13cd78f361ea5a2cf53a45d67cdda421ba0316b9be101560f3d207" +dependencies = [ + "bitmaps", + "typenum", +] + [[package]] name = "slab" version = "0.4.2" @@ -2055,14 +2063,11 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" version = "0.34.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" +source = "git+https://github.com/spacejam/sled.git?rev=e4640e0773595229f398438886f19bca6f7326a2#e4640e0773595229f398438886f19bca6f7326a2" dependencies = [ "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", "fs2", - "fxhash", + "im", "libc", "log", "parking_lot", @@ -2122,7 +2127,7 @@ name = "state-res" version = "0.1.0" source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" dependencies = [ - "itertools 0.10.0", + "itertools", "log", "maplit", "ruma", @@ -2547,6 +2552,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "typenum" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" + [[package]] name = "ubyte" version = "0.10.1" @@ -2819,18 +2830,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.6.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "3.0.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" dependencies = [ "libc", "zstd-sys", @@ -2838,12 +2849,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.4.20+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 7bd1557..caf0bdf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,9 @@ state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c8 # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -sled = { version = "0.34.6", default-features = false, features = ["compression"] } +#sled = { version = "0.34.6", default-features = false, features = ["compression"] } +sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } + # Used for emitting log entries log = "0.4.14" # Used for rocket<->ruma conversions From 84f4ce73e511c646a97a12126ebcc96d9b90991b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 9 Apr 2021 21:38:16 +0200 Subject: [PATCH 089/103] fix: membership deserializing --- Cargo.lock | 14 +++++------ Cargo.toml | 7 ++---- src/client_server/membership.rs | 16 ++++++++----- src/client_server/room.rs | 28 ++++++++++++---------- src/database.rs | 2 +- src/database/pusher.rs | 2 +- src/database/rooms.rs | 41 ++++++++++++++++++--------------- src/database/sending.rs | 18 +++++++++------ src/utils.rs | 9 ++++---- 9 files changed, 74 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b960f3..b9b9af7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -877,9 +877,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56d855069fafbb9b344c0f962150cd2c1187975cb1c22c1522c240d8c4986714" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" [[package]] name = "linked-hash-map" @@ -2125,7 +2125,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=af450d0fe2b0e1c890284d0bc3b9d6d4008ac475#af450d0fe2b0e1c890284d0bc3b9d6d4008ac475" +source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" dependencies = [ "itertools", "log", @@ -2578,9 +2578,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" dependencies = [ "matches", ] @@ -2760,9 +2760,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "wildmatch" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ae7ce410f81ba679081aac1d4874f3b1c328535b630209aa5b4cdaaf895e20" +checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" [[package]] name = "winapi" diff --git a/Cargo.toml b/Cargo.toml index caf0bdf..35037ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,11 +23,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570 #ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution -state-res = { git = "https://github.com/ruma/state-res", rev = "af450d0fe2b0e1c890284d0bc3b9d6d4008ac475", features = ["unstable-pre-spec"] } -# TODO: remove the gen-eventid feature -#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { git = "https://github.com/ruma/state-res", rev = "1621a491a9e867a1ad4dff9f2f92b0c1e2d44aa0", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2fc0b0728bf027a5899839ad94bb3091b", features = ["unstable-pre-spec"] } +#state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 5d630b4..3f4f23f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -604,12 +604,16 @@ async fn join_room_by_id_helper( db.rooms.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content.", - ) + serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadServerResponse("Invalid member event content") + })? + .clone(), + ) + .map_err(|_| { + Error::BadServerResponse("Invalid membership state content.") })?, &pdu.sender, &db.account_data, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index f8d980b..bba7f95 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -91,10 +91,24 @@ pub async fn create_room_route( )?; // 3. Power levels + + // Figure out preset. We need it for preset specific events + let preset = body + .preset + .clone() + .unwrap_or_else(|| match &body.visibility { + room::Visibility::Private => create_room::RoomPreset::PrivateChat, + room::Visibility::Public => create_room::RoomPreset::PublicChat, + room::Visibility::_Custom(_) => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom + }); + let mut users = BTreeMap::new(); users.insert(sender_user.clone(), 100.into()); - for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); + + if preset == create_room::RoomPreset::TrustedPrivateChat { + for invite_ in &body.invite { + users.insert(invite_.clone(), 100.into()); + } } let power_levels_content = if let Some(power_levels) = &body.power_level_content_override { @@ -133,16 +147,6 @@ pub async fn create_room_route( // 4. Events set by preset - // Figure out preset. We need it for preset specific events - let preset = body - .preset - .clone() - .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - room::Visibility::_Custom(s) => create_room::RoomPreset::_Custom(s.into()), - }); - // 4.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { diff --git a/src/database.rs b/src/database.rs index bacf3b9..cb0df15 100644 --- a/src/database.rs +++ b/src/database.rs @@ -108,7 +108,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity as u64) + .cache_capacity(config.cache_capacity as usize) .use_compression(true) .open()?; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 6a88d5e..27e5926 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -196,7 +196,7 @@ pub async fn send_push_notice( let mut notify = None; let mut tweaks = Vec::new(); - for action in ruleset.get_actions(&pdu.to_sync_state_event(), &ctx) { + for action in ruleset.get_actions(&pdu.to_sync_room_event(), &ctx) { let n = match action { Action::DontNotify => false, // TODO: Implement proper support for coalesce diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b2043d1..81697e3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -465,7 +465,7 @@ impl Rooms { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -671,11 +671,21 @@ impl Rooms { self.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::(pdu.content.clone()) - .map_err(|_| { + serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ) + })? + .clone(), + ) + .map_err(|_| { Error::BadRequest( ErrorKind::InvalidParam, - "Invalid member event content.", + "Invalid membership state content.", ) })?, &pdu.sender, @@ -895,19 +905,14 @@ impl Rooms { .scan_prefix(&old_shortstatehash) .filter_map(|pdu| pdu.map_err(|e| error!("{}", e)).ok()) // Chop the old_shortstatehash out leaving behind the short state key - .map(|(k, v)| { - ( - k.subslice(old_shortstatehash.len(), k.len() - old_shortstatehash.len()), - v, - ) - }) - .collect::>() + .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) + .collect::, IVec>>() } else { HashMap::new() }; if let Some(state_key) = &new_pdu.state_key { - let mut new_state: HashMap = old_state; + let mut new_state: HashMap, IVec> = old_state; let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); new_state_key.push(0xff); @@ -935,7 +940,7 @@ impl Rooms { } }; - new_state.insert(shortstatekey.into(), shorteventid.into()); + new_state.insert(shortstatekey, shorteventid.into()); let new_state_hash = self.calculate_hash( &new_state @@ -1377,13 +1382,11 @@ impl Rooms { &self, room_id: &RoomId, user_id: &UserId, - member_content: member::MemberEventContent, + membership: member::MembershipState, sender: &UserId, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { - let membership = member_content.membership; - let mut roomserver_id = room_id.as_bytes().to_vec(); roomserver_id.push(0xff); roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); @@ -1633,7 +1636,7 @@ impl Rooms { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result<(impl Iterator + 'a, Vec)> { + ) -> Result<(impl Iterator> + 'a, Vec)> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1661,7 +1664,7 @@ impl Rooms { .0 + 1; // +1 because the pdu id starts AFTER the separator - let pdu_id = key.subslice(pduid_index, key.len() - pduid_index); + let pdu_id = key[pduid_index..].to_vec(); Ok::<_, Error>(pdu_id) }) @@ -1700,7 +1703,7 @@ impl Rooms { .0 + 1; // +1 because the room id starts AFTER the separator - let room_id = key.subslice(roomid_index, key.len() - roomid_index); + let room_id = key[roomid_index..].to_vec(); Ok::<_, Error>(room_id) }) diff --git a/src/database/sending.rs b/src/database/sending.rs index b0f9c4d..779df06 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -47,7 +47,7 @@ impl Sending { let mut futures = FuturesUnordered::new(); // Retry requests we could not finish yet - let mut current_transactions = HashMap::>::new(); + let mut current_transactions = HashMap::>>::new(); for (key, outgoing_kind, pdu) in servercurrentpdus .iter() @@ -55,7 +55,7 @@ impl Sending { .filter_map(|(key, _)| { Self::parse_servercurrentpdus(&key) .ok() - .map(|(k, p)| (key, k, p)) + .map(|(k, p)| (key, k, p.to_vec())) }) { if pdu.is_empty() { @@ -150,7 +150,7 @@ impl Sending { .keys() .filter_map(|r| r.ok()) .map(|k| { - k.subslice(prefix.len(), k.len() - prefix.len()) + k[prefix.len()..].to_vec() }) .take(30) .collect::>(); @@ -211,7 +211,11 @@ impl Sending { }; }, Some(event) = &mut subscriber => { - if let sled::Event::Insert { key, .. } = event { + for (_tree, key, value_opt) in &event { + if value_opt.is_none() { + continue; + } + let servernamepduid = key.clone(); let exponential_backoff = |(tries, instant): &(u32, Instant)| { @@ -265,7 +269,7 @@ impl Sending { futures.push( Self::handle_event( outgoing_kind, - vec![pdu_id], + vec![pdu_id.to_vec()], &db, ) ); @@ -310,7 +314,7 @@ impl Sending { } #[tracing::instrument] - fn calculate_hash(keys: &[IVec]) -> Vec { + fn calculate_hash(keys: &[Vec]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); let hash = digest::digest(&digest::SHA256, &bytes); @@ -320,7 +324,7 @@ impl Sending { #[tracing::instrument(skip(db))] async fn handle_event( kind: OutgoingKind, - pdu_ids: Vec, + pdu_ids: Vec>, db: &Database, ) -> std::result::Result { match &kind { diff --git a/src/utils.rs b/src/utils.rs index 0783567..45d9de8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,7 +2,6 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; -use sled::IVec; use std::{ cmp, convert::TryInto, @@ -70,10 +69,10 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -pub fn common_elements( - mut iterators: impl Iterator>, - check_order: impl Fn(&IVec, &IVec) -> Ordering, -) -> Option> { +pub fn common_elements<'a>( + mut iterators: impl Iterator>>, + check_order: impl Fn(&[u8], &[u8]) -> Ordering, +) -> Option>> { let first_iterator = iterators.next()?; let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); From 044e65afccb87da5012117b3192a5bd4dbaf5150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:12:05 +0200 Subject: [PATCH 090/103] fix: move back to sled stable --- Cargo.lock | 123 +++++++++++++++++++--------------------- Cargo.toml | 4 +- src/database.rs | 5 +- src/database/sending.rs | 10 ++-- 4 files changed, 68 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9b9af7..cf881c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,15 +109,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitmaps" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] - [[package]] name = "blake2b_simd" version = "0.5.11" @@ -267,6 +258,19 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + [[package]] name = "crossbeam-utils" version = "0.8.3" @@ -543,6 +547,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "getrandom" version = "0.1.16" @@ -726,20 +739,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "im" -version = "15.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111c1983f3c5bb72732df25cddacee9b546d08325fb584b5ebd38148be7b0246" -dependencies = [ - "bitmaps", - "rand_core 0.5.1", - "rand_xoshiro", - "sized-chunks", - "typenum", - "version_check", -] - [[package]] name = "image" version = "0.23.14" @@ -807,6 +806,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.10.0" @@ -947,6 +955,15 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +[[package]] +name = "memoffset" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -1364,7 +1381,7 @@ checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.2", + "rand_core", "rand_hc", ] @@ -1375,15 +1392,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - [[package]] name = "rand_core" version = "0.6.2" @@ -1399,16 +1410,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.2", -] - -[[package]] -name = "rand_xoshiro" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" -dependencies = [ - "rand_core 0.5.1", + "rand_core", ] [[package]] @@ -2044,16 +2046,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "sized-chunks" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e65d6a9f13cd78f361ea5a2cf53a45d67cdda421ba0316b9be101560f3d207" -dependencies = [ - "bitmaps", - "typenum", -] - [[package]] name = "slab" version = "0.4.2" @@ -2063,11 +2055,14 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "sled" version = "0.34.6" -source = "git+https://github.com/spacejam/sled.git?rev=e4640e0773595229f398438886f19bca6f7326a2#e4640e0773595229f398438886f19bca6f7326a2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" dependencies = [ "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", "fs2", - "im", + "fxhash", "libc", "log", "parking_lot", @@ -2127,7 +2122,7 @@ name = "state-res" version = "0.1.0" source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" dependencies = [ - "itertools", + "itertools 0.10.0", "log", "maplit", "ruma", @@ -2552,12 +2547,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "typenum" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" - [[package]] name = "ubyte" version = "0.10.1" @@ -2830,18 +2819,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zstd" -version = "0.6.1+zstd.1.4.9" +version = "0.5.4+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" +checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "3.0.1+zstd.1.4.9" +version = "2.0.6+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" +checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" dependencies = [ "libc", "zstd-sys", @@ -2849,10 +2838,12 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.20+zstd.1.4.9" +version = "1.4.18+zstd.1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" +checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" dependencies = [ "cc", + "glob", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 35037ee..3109dd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,8 +29,8 @@ state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2 # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" # Used for storing data permanently -#sled = { version = "0.34.6", default-features = false, features = ["compression"] } -sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +sled = { version = "0.34.6", default-features = false, features = ["compression"] } +#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } # Used for emitting log entries log = "0.4.14" diff --git a/src/database.rs b/src/database.rs index cb0df15..a266c21 100644 --- a/src/database.rs +++ b/src/database.rs @@ -108,7 +108,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity as usize) + .cache_capacity(config.cache_capacity as u64) .use_compression(true) .open()?; @@ -301,7 +301,8 @@ impl Database { } pub async fn flush(&self) -> Result<()> { - self._db.flush_async().await?; + // noop while we don't use sled 1.0 + //self._db.flush_async().await?; Ok(()) } } diff --git a/src/database/sending.rs b/src/database/sending.rs index 779df06..d6dcead 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -211,10 +211,12 @@ impl Sending { }; }, Some(event) = &mut subscriber => { - for (_tree, key, value_opt) in &event { - if value_opt.is_none() { - continue; - } + if let sled::Event::Insert { key, .. } = event { + // New sled version: + //for (_tree, key, value_opt) in &event { + // if value_opt.is_none() { + // continue; + // } let servernamepduid = key.clone(); From 588de12d799090cf2c964d763f06cd8ee80ef8cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:12:29 +0200 Subject: [PATCH 091/103] fix: lost forward extremity --- src/server_server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server_server.rs b/src/server_server.rs index 84cfe61..4a93a3d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1010,6 +1010,7 @@ fn handle_incoming_pdu<'a>( fork_states.insert(current_state); // We also add state after incoming event to the fork states + extremities.insert(incoming_pdu.event_id.clone()); let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { state_after.insert( From b0ea692706bed8de8bc5f3e82b9186700ba1ee0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 10:50:30 +0200 Subject: [PATCH 092/103] fix: malformed pushrule error when event does not trigger any actions --- src/database/pusher.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 27e5926..9a9452c 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -216,16 +216,11 @@ pub async fn send_push_notice( notify = Some(n); } - let notify = notify.ok_or_else(|| { - Error::bad_database( - r#"Malformed pushrule contains none of these actions: ["dont_notify", "notify", "coalesce"]"#, - ) - })?; - - if notify { + if notify == Some(true) { send_notice(unread, pusher, tweaks, pdu, db).await?; } - + // Else the event triggered no actions + Ok(()) } From 8773e5013d08a37851c4379db6bc66641602ece2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:01:27 +0200 Subject: [PATCH 093/103] feat: incoming invites over federation --- Cargo.lock | 36 +++--- Cargo.toml | 8 +- src/client_server/account.rs | 10 +- src/client_server/membership.rs | 3 + src/client_server/sync.rs | 45 ++----- src/database.rs | 10 +- src/database/pusher.rs | 4 +- src/database/rooms.rs | 203 +++++++++++++++++++++++--------- src/main.rs | 1 + src/server_server.rs | 133 +++++++++++++++++---- 10 files changed, 307 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf881c2..42042b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "assign", "js_int", @@ -1645,7 +1645,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "http", "percent-encoding", @@ -1660,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1671,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "ruma-api", "ruma-common", @@ -1685,7 +1685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "assign", "http", @@ -1704,7 +1704,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "indexmap", "js_int", @@ -1720,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-common", @@ -1734,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1745,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1760,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "paste", "rand", @@ -1774,7 +1774,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro2", "quote", @@ -1785,12 +1785,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "ruma-api", "ruma-common", @@ -1803,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "js_int", "ruma-api", @@ -1818,7 +1818,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "form_urlencoded", "itoa", @@ -1831,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1842,7 +1842,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=a310ccc318a4eb51062923d570d5a86c1468e8a1#a310ccc318a4eb51062923d570d5a86c1468e8a1" +source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" dependencies = [ "base64 0.13.0", "ring", @@ -2120,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=1ec42ea2fc0b0728bf027a5899839ad94bb3091b#1ec42ea2fc0b0728bf027a5899839ad94bb3091b" +source = "git+https://github.com/timokoesters/state-res?rev=2e90b36babeb0d6b99ce8d4b513302a25dcdffc1#2e90b36babeb0d6b99ce8d4b513302a25dcdffc1" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 3109dd8..a28c08d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -#ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "push-gateway-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -#ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "push-gateway-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/timokoesters/ruma", rev = "b11de1e1f9d3c15267d09617131cf217f8277fa4", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "1ec42ea2fc0b0728bf027a5899839ad94bb3091b", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "2e90b36babeb0d6b99ce8d4b513302a25dcdffc1", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4c5b60c..2241d45 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -617,11 +617,11 @@ pub async fn deactivate_route( } // Leave all joined rooms and reject all invitations - for room_id in db - .rooms - .rooms_joined(&sender_user) - .chain(db.rooms.rooms_invited(&sender_user)) - { + for room_id in db.rooms.rooms_joined(&sender_user).chain( + db.rooms + .rooms_invited(&sender_user) + .map(|t| t.map(|(r, _)| r)), + ) { let room_id = room_id?; let event = member::MemberEventContent { membership: member::MembershipState::Leave, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3f4f23f..3876246 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -599,6 +599,8 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid user id in send_join response.") })?; + let invite_state = Vec::new(); // TODO add a few important events + // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth db.rooms.update_membership( @@ -616,6 +618,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid membership state content.") })?, &pdu.sender, + Some(invite_state), &db.account_data, &db.globals, )?; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index bd7046d..f1ad9a5 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -588,44 +588,23 @@ pub async fn sync_events_route( } let mut invited_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_invited(&sender_user) { - let room_id = room_id?; - let mut invited_since_last_sync = false; - for pdu in db.rooms.pdus_since(&sender_user, &room_id, since)? { - let (_, pdu) = pdu?; - if pdu.kind == EventType::RoomMember && pdu.state_key == Some(sender_user.to_string()) { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))?; + for result in db.rooms.rooms_invited(&sender_user) { + let (room_id, invite_state_events) = result?; + let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; - if content.membership == MembershipState::Invite { - invited_since_last_sync = true; - break; - } - } - } - - if !invited_since_last_sync { + // Invited before last sync + if Some(since) >= invite_count { continue; } - let invited_room = sync_events::InvitedRoom { - invite_state: sync_events::InviteState { - events: db - .rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_stripped_state_event()) - .collect(), + invited_rooms.insert( + room_id.clone(), + sync_events::InvitedRoom { + invite_state: sync_events::InviteState { + events: invite_state_events, + }, }, - }; - - if !invited_room.is_empty() { - invited_rooms.insert(room_id.clone(), invited_room); - } + ); } for user_id in left_encrypted_users { diff --git a/src/database.rs b/src/database.rs index a266c21..211c3f4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -161,8 +161,8 @@ impl Database { userroomid_joined: db.open_tree("userroomid_joined")?, roomuserid_joined: db.open_tree("roomuserid_joined")?, roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, - userroomid_invited: db.open_tree("userroomid_invited")?, - roomuserid_invited: db.open_tree("roomuserid_invited")?, + userroomid_invitestate: db.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, userroomid_left: db.open_tree("userroomid_left")?, statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, @@ -236,7 +236,11 @@ impl Database { ); futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push(self.rooms.userroomid_invited.watch_prefix(&userid_prefix)); + futures.push( + self.rooms + .userroomid_invitestate + .watch_prefix(&userid_prefix), + ); futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); // Events for rooms we are in diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 9a9452c..f4c02d0 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -216,11 +216,11 @@ pub async fn send_push_notice( notify = Some(n); } - if notify == Some(true) { + if notify == Some(true) { send_notice(unread, pusher, tweaks, pdu, db).await?; } // Else the event triggered no actions - + Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 81697e3..ba98790 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -11,10 +11,10 @@ use ruma::{ events::{ ignored_user_list, room::{create::CreateEventContent, member, message}, - EventType, + AnyStrippedStateEvent, EventType, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; use state_res::{Event, StateMap}; @@ -51,8 +51,8 @@ pub struct Rooms { pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, - pub(super) userroomid_invited: sled::Tree, - pub(super) roomuserid_invited: sled::Tree, + pub(super) userroomid_invitestate: sled::Tree, + pub(super) roomuserid_invitecount: sled::Tree, pub(super) userroomid_left: sled::Tree, /// Remember the current state hash of a room. @@ -145,12 +145,12 @@ impl Rooms { /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] - pub fn state_get( + pub fn state_get_id( &self, shortstatehash: u64, event_type: &EventType, state_key: &str, - ) -> Result> { + ) -> Result> { let mut key = event_type.as_ref().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); @@ -161,7 +161,8 @@ impl Rooms { let mut stateid = shortstatehash.to_be_bytes().to_vec(); stateid.extend_from_slice(&shortstatekey); - self.stateid_shorteventid + Ok(self + .stateid_shorteventid .get(&stateid)? .map(|bytes| self.shorteventid_eventid.get(&bytes).ok().flatten()) .flatten() @@ -178,13 +179,24 @@ impl Rooms { ) }) .map(|r| r.ok()) - .flatten() - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + .flatten()) } else { Ok(None) } } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn state_get( + &self, + shortstatehash: u64, + event_type: &EventType, + state_key: &str, + ) -> Result> { + self.state_get_id(shortstatehash, event_type, state_key)? + .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + } + /// Returns the state hash for this pdu. #[tracing::instrument(skip(self))] pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { @@ -354,6 +366,21 @@ impl Rooms { } } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &EventType, + state_key: &str, + ) -> Result> { + if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + self.state_get_id(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] pub fn room_state_get( @@ -395,7 +422,7 @@ impl Rooms { } /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else::, _, _>( @@ -666,29 +693,64 @@ impl Rooms { // if the state_key fails let target_user_id = UserId::try_from(state_key.clone()) .expect("This state_key was previously validated"); + + let membership = serde_json::from_value::( + pdu.content + .get("membership") + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ) + })? + .clone(), + ) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid membership state content.", + ) + })?; + + let invite_state = match membership { + member::MembershipState::Invite => { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &pdu.room_id, + &EventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + Some(state) + } + _ => None, + }; + // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth self.update_membership( &pdu.room_id, &target_user_id, - serde_json::from_value::( - pdu.content - .get("membership") - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content", - ) - })? - .clone(), - ) - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid membership state content.", - ) - })?, + membership, &pdu.sender, + invite_state, &db.account_data, &db.globals, )?; @@ -1044,10 +1106,10 @@ impl Rooms { // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(self.get_pdu_json(event_id).ok()??.get("depth")?.as_u64()?)) + .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) .max() - .unwrap_or(0_u64) - + 1; + .unwrap_or(uint!(0)) + + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { @@ -1071,9 +1133,7 @@ impl Rooms { content, state_key, prev_events, - depth: depth - .try_into() - .map_err(|_| Error::bad_database("Depth is invalid"))?, + depth, auth_events: auth_events .iter() .map(|(_, pdu)| pdu.event_id.clone()) @@ -1384,6 +1444,7 @@ impl Rooms { user_id: &UserId, membership: member::MembershipState, sender: &UserId, + invite_state: Option>>, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { @@ -1487,8 +1548,8 @@ impl Rooms { self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; self.userroomid_left.remove(&userroom_id)?; } member::MembershipState::Invite => { @@ -1508,8 +1569,13 @@ impl Rooms { } self.roomserverids.insert(&roomserver_id, &[])?; - self.userroomid_invited.insert(&userroom_id, &[])?; - self.roomuserid_invited.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.insert( + &userroom_id, + serde_json::to_vec(&invite_state.unwrap_or_default()) + .expect("state to bytes always works"), + )?; + self.roomuserid_invitecount + .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_left.remove(&userroom_id)?; @@ -1526,8 +1592,8 @@ impl Rooms { self.userroomid_left.insert(&userroom_id, &[])?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invited.remove(&userroom_id)?; - self.roomuserid_invited.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; } _ => {} } @@ -1797,7 +1863,7 @@ impl Rooms { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.roomuserid_invited + self.roomuserid_invitecount .scan_prefix(prefix) .keys() .map(|key| { @@ -1816,6 +1882,22 @@ impl Rooms { }) } + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { @@ -1840,27 +1922,32 @@ impl Rooms { /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self))] - pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { + pub fn rooms_invited( + &self, + user_id: &UserId, + ) -> impl Iterator>)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_invited - .scan_prefix(prefix) - .keys() - .map(|key| { - Ok(RoomId::try_from( - utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, + self.userroomid_invitestate.scan_prefix(prefix).map(|r| { + let (key, state) = r?; + let room_id = RoomId::try_from( + utils::string_from_bytes( + &key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?) - }) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok((room_id, state)) + }) } /// Returns an iterator over all rooms a user left. @@ -1906,7 +1993,7 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_invited.get(userroom_id)?.is_some()) + Ok(self.userroomid_invitestate.get(userroom_id)?.is_some()) } pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { diff --git a/src/main.rs b/src/main.rs index 4ccc025..6fd04ce 100644 --- a/src/main.rs +++ b/src/main.rs @@ -167,6 +167,7 @@ fn setup_rocket() -> (rocket::Rocket, Config) { server_server::get_event_route, server_server::get_missing_events_route, server_server::get_room_state_ids_route, + server_server::create_invite_route, server_server::get_profile_information_route, ], ) diff --git a/src/server_server.rs b/src/server_server.rs index 4a93a3d..1fad54e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -10,20 +10,24 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_remote_server_keys, get_server_keys, - get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, + get_remote_server_keys, get_server_keys, get_server_version, ServerSigningKeys, + VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, + membership::create_invite, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::{room::create::CreateEventContent, EventType}, + events::{ + room::{create::CreateEventContent, member::MembershipState}, + EventType, + }, serde::{to_canonical_value, Raw}, signatures::CanonicalJsonValue, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, EventMap, StateMap}; use std::{ @@ -332,13 +336,13 @@ pub async fn request_well_known( #[tracing::instrument(skip(db))] pub fn get_server_version_route( db: State<'_, Database>, -) -> ConduitResult { +) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - Ok(get_server_version::Response { - server: Some(get_server_version::Server { + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), }), @@ -1406,12 +1410,9 @@ pub fn get_event_route<'a>( origin: db.globals.server_name().to_owned(), origin_server_ts: SystemTime::now(), pdu: PduEvent::convert_to_outgoing_federation_event( - serde_json::from_value( - db.rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, - ) - .map_err(|_| Error::bad_database("Invalid pdu in database."))?, + db.rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?, ), } .into()) @@ -1438,9 +1439,10 @@ pub fn get_missing_events_route<'a>( if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { if body.earliest_events.contains( &serde_json::from_value( - pdu.get("event_id") - .cloned() - .ok_or_else(|| Error::bad_database("Event in db has no event_id field."))?, + serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no event_id field.") + })?) + .expect("canonical json is valid json value"), ) .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?, ) { @@ -1449,16 +1451,14 @@ pub fn get_missing_events_route<'a>( } queued_events.extend_from_slice( &serde_json::from_value::>( - pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Invalid prev_events field of pdu in db.") - })?, + serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no prev_events field.") + })?) + .expect("canonical json is valid json value"), ) .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, ); - events.push(PduEvent::convert_to_outgoing_federation_event( - serde_json::from_value(pdu) - .map_err(|_| Error::bad_database("Invalid pdu in database."))?, - )); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); } i += 1; } @@ -1518,6 +1518,93 @@ pub fn get_room_state_ids_route<'a>( .into()) } +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/federation/v2/invite/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub fn create_invite_route<'a>( + db: State<'a, Database>, + body: Ruma, +) -> ConduitResult { + if body.room_version < RoomVersionId::Version6 { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + let mut signed_event = utils::to_canonical_object(&body.event) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + let sender = serde_json::from_value( + serde_json::to_value( + signed_event + .get("sender") + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender field.") + })? + .clone(), + ) + .expect("CanonicalJsonValue to serde_json::Value always works"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + let invited_user = serde_json::from_value( + serde_json::to_value( + signed_event + .get("state_key") + .ok_or_else(|| { + Error::BadRequest(ErrorKind::InvalidParam, "Event had no state_key field.") + })? + .clone(), + ) + .expect("CanonicalJsonValue to serde_json::Value always works"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + let mut invite_state = body.invite_room_state.clone(); + + let mut event = serde_json::from_str::>( + &body.event.json().to_string(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$dummy".into()); + invite_state.push( + serde_json::from_value::(event.into()) + .map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })? + .to_stripped_state_event(), + ); + + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db.account_data, + &db.globals, + )?; + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + } + .into()) +} + #[cfg_attr( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") From a8231eef25ca427ecec61cfe1e0c9c2412b26c83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:29:37 +0200 Subject: [PATCH 094/103] fix: alias parsing --- src/database/rooms.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ba98790..09944cf 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1668,8 +1668,10 @@ impl Rooms { .scan_prefix(prefix) .values() .map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::bad_database("Alias in aliasid_alias is invalid."))?) + Ok(utils::string_from_bytes(&bytes?) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))?) }) } From a961732f5f52c7bbba11335f84b3f0ceae9e9404 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 11 Apr 2021 21:35:17 +0200 Subject: [PATCH 095/103] fix: overflow --- src/database/rooms/edus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 9e43fe1..c48f4c2 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -382,7 +382,7 @@ impl RoomEdus { .ok()?, )) }) - .take_while(|(_, timestamp)| current_timestamp - timestamp > 5 * 60_000) + .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) // 5 Minutes { // Send new presence events to set the user offline From 1dc85895a7a243b46ae8c60615c6b268f4c8f9df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 12 Apr 2021 10:12:52 +0200 Subject: [PATCH 096/103] fix: show warning for invalid user ids --- src/client_server/membership.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 3876246..149df93 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -595,7 +595,8 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { - let target_user_id = UserId::try_from(state_key.clone()).map_err(|_| { + let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { + warn!("Invalid user id in send_join response: {}: {}", state_key, e); Error::BadServerResponse("Invalid user id in send_join response.") })?; From 662a0cf1df7c33ec1f3c00b6f0a722cdbc03a3d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 12 Apr 2021 12:40:16 +0200 Subject: [PATCH 097/103] improvement: better and more efficient message count calculation --- src/client_server/membership.rs | 5 +- src/client_server/read_marker.rs | 4 ++ src/client_server/sync.rs | 38 ++++++------ src/database.rs | 3 + src/database/pusher.rs | 54 +++++++++++------ src/database/rooms.rs | 101 +++++++++++++++++++++++++++++-- src/database/sending.rs | 32 +++------- 7 files changed, 169 insertions(+), 68 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 149df93..e816005 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -596,7 +596,10 @@ async fn join_room_by_id_helper( if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { let target_user_id = UserId::try_from(state_key.clone()).map_err(|e| { - warn!("Invalid user id in send_join response: {}: {}", state_key, e); + warn!( + "Invalid user id in send_join response: {}: {}", + state_key, e + ); Error::BadServerResponse("Invalid user id in send_join response.") })?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 555b7e7..166e59a 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -47,6 +47,8 @@ pub async fn set_read_marker_route( ))?, &db.globals, )?; + db.rooms + .reset_notification_counts(&sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( @@ -103,6 +105,8 @@ pub async fn create_receipt_route( ))?, &db.globals, )?; + db.rooms + .reset_notification_counts(&sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); user_receipts.insert( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index f1ad9a5..fe14208 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -12,7 +12,7 @@ use ruma::{ use rocket::{get, tokio}; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, time::Duration, }; @@ -370,23 +370,23 @@ pub async fn sync_events_route( ); let notification_count = if send_notification_counts { - if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_user)? { - Some( - (db.rooms - .pdus_since(&sender_user, &room_id, last_read)? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into(), - ) - } else { - None - } + Some( + db.rooms + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + db.rooms + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) } else { None }; @@ -440,7 +440,7 @@ pub async fn sync_events_route( invited_member_count: invited_member_count.map(|n| (n as u32).into()), }, unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count: None, + highlight_count, notification_count, }, timeline: sync_events::Timeline { diff --git a/src/database.rs b/src/database.rs index 211c3f4..9d629dd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -165,6 +165,9 @@ impl Database { roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, userroomid_left: db.open_tree("userroomid_left")?, + userroomid_notificationcount: db.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: db.open_tree("userroomid_highlightcount")?, + statekey_shortstatekey: db.open_tree("statekey_shortstatekey")?, stateid_shorteventid: db.open_tree("stateid_shorteventid")?, eventid_shorteventid: db.open_tree("eventid_shorteventid")?, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f4c02d0..e2bd3f1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -174,29 +174,10 @@ pub async fn send_push_notice( pdu: &PduEvent, db: &Database, ) -> Result<()> { - let power_levels: PowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_value(ev.content) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let ctx = PushConditionRoomCtx { - room_id: pdu.room_id.clone(), - member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), - user_display_name: user.localpart().into(), // TODO: Use actual display name - users_power_levels: power_levels.users, - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications, - }; - let mut notify = None; let mut tweaks = Vec::new(); - for action in ruleset.get_actions(&pdu.to_sync_room_event(), &ctx) { + for action in get_actions(user, &ruleset, pdu, db)? { let n = match action { Action::DontNotify => false, // TODO: Implement proper support for coalesce @@ -224,6 +205,39 @@ pub async fn send_push_notice( Ok(()) } +pub fn get_actions<'a>( + user: &UserId, + ruleset: &'a Ruleset, + pdu: &PduEvent, + db: &Database, +) -> Result> { + let power_levels: PowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_value(ev.content) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let ctx = PushConditionRoomCtx { + room_id: pdu.room_id.clone(), + member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + user_display_name: db + .users + .displayname(&user)? + .unwrap_or(user.localpart().to_owned()), + users_power_levels: power_levels.users, + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications, + }; + + Ok(ruleset + .get_actions(&pdu.to_sync_room_event(), &ctx) + .map(Clone::clone)) +} + async fn send_notice( unread: UInt, pusher: &Pusher, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 09944cf..3f37de6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -9,10 +9,11 @@ use ring::digest; use ruma::{ api::client::error::ErrorKind, events::{ - ignored_user_list, + ignored_user_list, push_rules, room::{create::CreateEventContent, member, message}, AnyStrippedStateEvent, EventType, }, + push::{self, Action, Tweak}, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -26,7 +27,7 @@ use std::{ sync::Arc, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, pusher}; /// The unique identifier of each state group. /// @@ -51,10 +52,13 @@ pub struct Rooms { pub(super) userroomid_joined: sled::Tree, pub(super) roomuserid_joined: sled::Tree, pub(super) roomuseroncejoinedids: sled::Tree, - pub(super) userroomid_invitestate: sled::Tree, - pub(super) roomuserid_invitecount: sled::Tree, + pub(super) userroomid_invitestate: sled::Tree, // InviteState = Vec> + pub(super) roomuserid_invitecount: sled::Tree, // InviteCount = Count pub(super) userroomid_left: sled::Tree, + pub(super) userroomid_notificationcount: sled::Tree, // NotifyCount = u64 + pub(super) userroomid_highlightcount: sled::Tree, // HightlightCount = u64 + /// Remember the current state hash of a room. pub(super) roomid_shortstatehash: sled::Tree, /// Remember the state hash at events in the past. @@ -649,6 +653,7 @@ impl Rooms { // fails self.edus .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; + self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; self.pduid_pdu.insert( &pdu_id, @@ -673,6 +678,45 @@ impl Rooms { continue; } + let rules_for_user = db + .account_data + .get::(None, &user, EventType::PushRules)? + .map(|ev| ev.content.global) + .unwrap_or_else(|| push::Ruleset::server_default(&user)); + + let mut highlight = false; + let mut notify = false; + + for action in pusher::get_actions(&user, &rules_for_user, pdu, db)? { + match action { + Action::DontNotify => notify = false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => notify = true, + Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + } + _ => {} + }; + } + + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(pdu.room_id.as_bytes()); + + if notify { + &self + .userroomid_notificationcount + .update_and_fetch(&userroom_id, utils::increment)? + .expect("utils::increment will always put in a value"); + } + + if highlight { + &self + .userroomid_highlightcount + .update_and_fetch(&userroom_id, utils::increment)? + .expect("utils::increment will always put in a value"); + } + for senderkey in db .pusher .get_pusher_senderkeys(&user) @@ -738,6 +782,14 @@ impl Rooms { { state.push(e.to_stripped_state_event()); } + if let Some(e) = + self.room_state_get(&pdu.room_id, &EventType::RoomMember, pdu.sender.as_str())? + { + state.push(e.to_stripped_state_event()); + } + + state.push(pdu.to_stripped_state_event()); + Some(state) } _ => None, @@ -844,6 +896,47 @@ impl Rooms { Ok(()) } + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + self.userroomid_highlightcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + + Ok(()) + } + + pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid notification count in db.")) + }) + .unwrap_or(Ok(0)) + } + + pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_highlightcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid highlight count in db.")) + }) + .unwrap_or(Ok(0)) + } + /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) diff --git a/src/database/sending.rs b/src/database/sending.rs index d6dcead..ffd3ed6 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, sync::Arc, time::{Duration, Instant, SystemTime}, @@ -16,7 +16,7 @@ use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, events::{push_rules, EventType}, - push, uint, ServerName, UInt, UserId, + push, ServerName, UInt, UserId, }; use sled::IVec; use tokio::{select, sync::Semaphore}; @@ -432,32 +432,16 @@ impl Sending { let rules_for_user = db .account_data .get::(None, &userid, EventType::PushRules) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .unwrap_or_default() .map(|ev| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); - let unread: UInt = if let Some(last_read) = db + let unread: UInt = db .rooms - .edus - .private_read_get(&pdu.room_id, &userid) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? - { - (db.rooms - .pdus_since(&userid, &pdu.room_id, last_read) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .filter(|(_, pdu)| { - matches!( - pdu.kind.clone(), - EventType::RoomMessage | EventType::RoomEncrypted - ) - }) - .count() as u32) - .into() - } else { - // Just return zero unread messages - uint!(0) - }; + .notification_count(&userid, &pdu.room_id) + .map_err(|e| (kind.clone(), e))? + .try_into() + .expect("notifiation count can't go that high"); let permit = db.sending.maximum_requests.acquire().await; From b4f79b77ba37c875c9c9c78cf8a03b1eeda83d64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 15:00:45 +0200 Subject: [PATCH 098/103] feat: reject invites over federation --- Cargo.lock | 106 +++++------ Cargo.toml | 6 +- src/appservice_server.rs | 17 +- src/client_server/membership.rs | 35 +--- src/client_server/sync.rs | 95 ++-------- src/database.rs | 5 +- src/database/pusher.rs | 17 +- src/database/rooms.rs | 299 +++++++++++++++++++++++++++++--- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 38 ++-- 10 files changed, 391 insertions(+), 241 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42042b6..d3da6fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -457,9 +457,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" dependencies = [ "futures-channel", "futures-core", @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" dependencies = [ "futures-core", "futures-sink", @@ -482,15 +482,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" [[package]] name = "futures-executor" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" dependencies = [ "futures-core", "futures-task", @@ -499,15 +499,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" [[package]] name = "futures-macro" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -517,21 +517,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" dependencies = [ "futures-channel", "futures-core", @@ -650,9 +650,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ "bytes", "fnv", @@ -672,9 +672,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.3.5" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" +checksum = "bc35c995b9d93ec174cf9a27d425c7892722101e14993cd227fdb51d70cf9589" [[package]] name = "httpdate" @@ -1497,9 +1497,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +checksum = "2296f2fac53979e8ccbc4a1136b25dcefd37be9ed7e4a1f6b05a6029c84ff124" dependencies = [ "base64 0.13.0", "bytes", @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "assign", "js_int", @@ -1645,8 +1645,9 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ + "bytes", "http", "percent-encoding", "ruma-api-macros", @@ -1660,7 +1661,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1671,7 +1672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "ruma-api", "ruma-common", @@ -1685,9 +1686,10 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "assign", + "bytes", "http", "js_int", "maplit", @@ -1704,7 +1706,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "indexmap", "js_int", @@ -1720,7 +1722,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-common", @@ -1734,7 +1736,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1745,7 +1747,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1762,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "paste", "rand", @@ -1774,7 +1776,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro2", "quote", @@ -1785,12 +1787,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.2" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "ruma-api", "ruma-common", @@ -1803,7 +1805,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "js_int", "ruma-api", @@ -1818,7 +1820,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "form_urlencoded", "itoa", @@ -1831,7 +1833,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1842,7 +1844,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0" -source = "git+https://github.com/timokoesters/ruma?rev=b11de1e1f9d3c15267d09617131cf217f8277fa4#b11de1e1f9d3c15267d09617131cf217f8277fa4" +source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" dependencies = [ "base64 0.13.0", "ring", @@ -1910,9 +1912,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ "ring", "untrusted", @@ -2120,7 +2122,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=2e90b36babeb0d6b99ce8d4b513302a25dcdffc1#2e90b36babeb0d6b99ce8d4b513302a25dcdffc1" +source = "git+https://github.com/timokoesters/state-res?rev=94534b8ff3e71b544ae36206abc182321e9d41f1#94534b8ff3e71b544ae36206abc182321e9d41f1" dependencies = [ "itertools 0.10.0", "log", @@ -2182,9 +2184,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" +checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb" dependencies = [ "proc-macro2", "quote", @@ -2330,9 +2332,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5" dependencies = [ "autocfg", "bytes", @@ -2381,9 +2383,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +checksum = "940a12c99365c31ea8dd9ba04ec1be183ffe4920102bb7122c2f515437601e8e" dependencies = [ "bytes", "futures-core", @@ -2558,9 +2560,9 @@ dependencies = [ [[package]] name = "uncased" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300932469d646d39929ffe84ad5c1837beecf602519ef5695e485b472de4082b" +checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" dependencies = [ "version_check", ] diff --git a/Cargo.toml b/Cargo.toml index a28c08d..84e40d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -#ruma = { git = "https://github.com/ruma/ruma", rev = "a310ccc318a4eb51062923d570d5a86c1468e8a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/timokoesters/ruma", rev = "b11de1e1f9d3c15267d09617131cf217f8277fa4", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "6394609feb4af5c43b840fab85b824b13cebb156", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "2e90b36babeb0d6b99ce8d4b513302a25dcdffc1", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "94534b8ff3e71b544ae36206abc182321e9d41f1", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 04f14c0..1b72c76 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; use log::warn; -use ruma::api::OutgoingRequest; +use ruma::api::{IncomingResponse, OutgoingRequest}; use std::{ convert::{TryFrom, TryInto}, fmt::Debug, @@ -66,15 +66,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error: {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { warn!( @@ -86,7 +81,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e816005..d491ca0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -91,37 +91,7 @@ pub async fn leave_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = serde_json::from_value::>( - db.rooms - .room_state_get( - &body.room_id, - &EventType::RoomMember, - &sender_user.to_string(), - )? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content, - ) - .expect("from_value::> can never fail") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = member::MembershipState::Leave; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: serde_json::to_value(event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - &sender_user, - &body.room_id, - &db, - )?; + db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush().await?; @@ -480,6 +450,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid make_join event json received from server.") })?; + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), to_canonical_value(db.globals.server_name()) @@ -699,5 +670,7 @@ async fn join_room_by_id_helper( )?; } + db.flush().await?; + Ok(join_room_by_id::Response::new(room_id.clone()).into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fe14208..66a1e13 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,5 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::error; use ruma::{ api::client::r0::sync::sync_events, events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, @@ -494,83 +493,17 @@ pub async fn sync_events_route( } let mut left_rooms = BTreeMap::new(); - for room_id in db.rooms.rooms_left(&sender_user) { - let room_id = room_id?; + for result in db.rooms.rooms_left(&sender_user) { + let (room_id, left_state_events) = result?; + let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; - let since_member = if let Some(since_member) = db - .rooms - .pdus_after(sender_user, &room_id, since) - .next() - .and_then(|pdu| pdu.ok()) - .and_then(|pdu| { - db.rooms - .pdu_shortstatehash(&pdu.1.event_id) - .ok()? - .ok_or_else(|| { - error!("{:?}", pdu.1); - Error::bad_database("Pdu in db doesn't have a state hash.") - }) - .ok() - }) - .and_then(|shortstatehash| { - db.rooms - .state_get(shortstatehash, &EventType::RoomMember, sender_user.as_str()) - .ok()? - .ok_or_else(|| Error::bad_database("State hash in db doesn't have a state.")) - .ok() - }) - .and_then(|pdu| { - serde_json::from_value::>( - pdu.content.clone(), - ) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .map(|content| (pdu, content)) - .ok() - }) { - since_member - } else { - // We couldn't find the since_member event. This is very weird - we better abort + // Left before last sync + if Some(since) >= left_count { continue; - }; + } - let left_since_last_sync = since_member.1.membership == MembershipState::Join; - - let left_room = if left_since_last_sync { - device_list_left.extend( - db.rooms - .room_members(&room_id) - .filter_map(|user_id| Some(user_id.ok()?)) - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send if the sender doesn't share any encrypted room with the target - // anymore - !share_encrypted_room(&db, sender_user, user_id, &room_id) - }), - ); - - let pdus = db.rooms.pdus_since(&sender_user, &room_id, since)?; - let mut room_events = pdus - .filter_map(|pdu| pdu.ok()) // Filter out buggy events - .take_while(|(_, pdu)| &since_member.0 != pdu) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect::>(); - room_events.push(since_member.0.to_sync_room_event()); - - sync_events::LeftRoom { - account_data: sync_events::AccountData { events: Vec::new() }, - timeline: sync_events::Timeline { - limited: false, - prev_batch: Some(next_batch.clone()), - events: room_events, - }, - state: sync_events::State { events: Vec::new() }, - } - } else { + left_rooms.insert( + room_id.clone(), sync_events::LeftRoom { account_data: sync_events::AccountData { events: Vec::new() }, timeline: sync_events::Timeline { @@ -578,13 +511,11 @@ pub async fn sync_events_route( prev_batch: Some(next_batch.clone()), events: Vec::new(), }, - state: sync_events::State { events: Vec::new() }, - } - }; - - if !left_room.is_empty() { - left_rooms.insert(room_id.clone(), left_room); - } + state: sync_events::State { + events: left_state_events, + }, + }, + ); } let mut invited_rooms = BTreeMap::new(); diff --git a/src/database.rs b/src/database.rs index 9d629dd..6bb1b17 100644 --- a/src/database.rs +++ b/src/database.rs @@ -163,7 +163,8 @@ impl Database { roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?, userroomid_invitestate: db.open_tree("userroomid_invitestate")?, roomuserid_invitecount: db.open_tree("roomuserid_invitecount")?, - userroomid_left: db.open_tree("userroomid_left")?, + userroomid_leftstate: db.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: db.open_tree("roomuserid_leftcount")?, userroomid_notificationcount: db.open_tree("userroomid_notificationcount")?, userroomid_highlightcount: db.open_tree("userroomid_highlightcount")?, @@ -244,7 +245,7 @@ impl Database { .userroomid_invitestate .watch_prefix(&userid_prefix), ); - futures.push(self.rooms.userroomid_left.watch_prefix(&userid_prefix)); + futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); // Events for rooms we are in for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { diff --git a/src/database/pusher.rs b/src/database/pusher.rs index e2bd3f1..be30576 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -7,7 +7,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - OutgoingRequest, + IncomingResponse, OutgoingRequest, }, events::{room::power_levels::PowerLevelsEventContent, EventType}, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, @@ -129,15 +129,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { info!( @@ -149,7 +144,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3f37de6..caf7a09 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1,17 +1,18 @@ mod edus; pub use edus::RoomEdus; +use member::MembershipState; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::{debug, error, warn}; use regex::Regex; use ring::digest; use ruma::{ - api::client::error::ErrorKind, + api::{client::error::ErrorKind, federation}, events::{ ignored_user_list, push_rules, room::{create::CreateEventContent, member, message}, - AnyStrippedStateEvent, EventType, + AnyStrippedStateEvent, AnySyncStateEvent, EventType, }, push::{self, Action, Tweak}, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, @@ -54,7 +55,8 @@ pub struct Rooms { pub(super) roomuseroncejoinedids: sled::Tree, pub(super) userroomid_invitestate: sled::Tree, // InviteState = Vec> pub(super) roomuserid_invitecount: sled::Tree, // InviteCount = Count - pub(super) userroomid_left: sled::Tree, + pub(super) userroomid_leftstate: sled::Tree, + pub(super) roomuserid_leftcount: sled::Tree, pub(super) userroomid_notificationcount: sled::Tree, // NotifyCount = u64 pub(super) userroomid_highlightcount: sled::Tree, // HightlightCount = u64 @@ -671,7 +673,7 @@ impl Rooms { .users .iter() .filter_map(|r| r.ok()) - .filter(|user_id| db.rooms.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { @@ -782,9 +784,11 @@ impl Rooms { { state.push(e.to_stripped_state_event()); } - if let Some(e) = - self.room_state_get(&pdu.room_id, &EventType::RoomMember, pdu.sender.as_str())? - { + if let Some(e) = self.room_state_get( + &pdu.room_id, + &EventType::RoomMember, + pdu.sender.as_str(), + )? { state.push(e.to_stripped_state_event()); } @@ -1380,7 +1384,7 @@ impl Rooms { .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - || db.rooms.room_members(&room_id).any(|userid| { + || self.room_members(&room_id).any(|userid| { userid.map_or(false, |userid| users.is_match(userid.as_str())) }) }; @@ -1537,7 +1541,7 @@ impl Rooms { user_id: &UserId, membership: member::MembershipState, sender: &UserId, - invite_state: Option>>, + last_state: Option>>, account_data: &super::account_data::AccountData, globals: &super::globals::Globals, ) -> Result<()> { @@ -1643,7 +1647,8 @@ impl Rooms { self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_left.remove(&userroom_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Invite => { // We want to know if the sender is ignored by the receiver @@ -1664,14 +1669,15 @@ impl Rooms { self.roomserverids.insert(&roomserver_id, &[])?; self.userroomid_invitestate.insert( &userroom_id, - serde_json::to_vec(&invite_state.unwrap_or_default()) + serde_json::to_vec(&last_state.unwrap_or_default()) .expect("state to bytes always works"), )?; self.roomuserid_invitecount .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_left.remove(&userroom_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; } member::MembershipState::Leave | member::MembershipState::Ban => { if self @@ -1682,7 +1688,12 @@ impl Rooms { { self.roomserverids.remove(&roomserver_id)?; } - self.userroomid_left.insert(&userroom_id, &[])?; + self.userroomid_leftstate.insert( + &userroom_id, + serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount + .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -1694,13 +1705,191 @@ impl Rooms { Ok(()) } + pub async fn leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + // Ask a remote server if we don't have this room + if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = self + .invite_state(user_id, room_id)? + .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + self.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + &db.account_data, + &db.globals, + )?; + } else { + let mut event = serde_json::from_value::>( + self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content, + ) + .expect("from_value::> can never fail") + .deserialize() + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = member::MembershipState::Leave; + + self.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: serde_json::to_value(event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + db, + )?; + } + + Ok(()) + } + + async fn remote_leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = db + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers = invite_state + .iter() + .filter_map(|event| { + serde_json::from_str::(&event.json().to_string()).ok() + }) + .filter_map(|event| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::try_from(sender).ok()) + .map(|user| user.server_name().to_owned()); + + for remote_server in servers { + let make_leave_response = db + .sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::get_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version = match make_leave_response.room_version { + Some(room_version) if room_version == RoomVersionId::Version6 => room_version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.json().get()) + .map_err(|_| { + Error::BadServerResponse("Invalid make_leave event json received from server.") + })?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + to_canonical_value(db.globals.server_name()) + .map_err(|_| Error::bad_database("Invalid server name found"))?, + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + to_canonical_value(utils::millis_since_unix_epoch()) + .expect("Timestamp is valid js_int value"), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut leave_event_stub, + &room_version, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + db.sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) + } + /// Makes a user forget a room. pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - self.userroomid_left.remove(userroom_id)?; + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(userroom_id)?; + self.roomuserid_leftcount.remove(roomuser_id)?; Ok(()) } @@ -1977,7 +2166,6 @@ impl Rooms { }) } - /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self))] pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.as_bytes().to_vec(); @@ -1993,6 +2181,21 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] + pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid leftcount in db.") + })?)) + }) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { @@ -2045,25 +2248,75 @@ impl Rooms { }) } + #[tracing::instrument(skip(self))] + pub fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + + self.userroomid_invitestate + .get(key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + pub fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + + self.userroomid_leftstate + .get(key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self))] - pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { + pub fn rooms_left( + &self, + user_id: &UserId, + ) -> impl Iterator>)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - self.userroomid_left.scan_prefix(prefix).keys().map(|key| { - Ok(RoomId::try_from( + self.userroomid_leftstate.scan_prefix(prefix).map(|r| { + let (key, state) = r?; + let room_id = RoomId::try_from( utils::string_from_bytes( - &key? - .rsplit(|&b| b == 0xff) + &key.rsplit(|&b| b == 0xff) .next() .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_left is invalid unicode.") + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") })?, ) - .map_err(|_| Error::bad_database("Room ID in userroomid_left is invalid."))?) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok((room_id, state)) }) } @@ -2096,6 +2349,6 @@ impl Rooms { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - Ok(self.userroomid_left.get(userroom_id)?.is_some()) + Ok(self.userroomid_leftstate.get(userroom_id)?.is_some()) } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 8e1d34f..c60c04e 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,9 +1,10 @@ use crate::Error; use ruma::{ + api::OutgoingResponse, identifiers::{DeviceId, UserId}, Outgoing, }; -use std::{convert::TryInto, ops::Deref}; +use std::ops::Deref; #[cfg(feature = "conduit_bin")] use { @@ -145,7 +146,7 @@ where let mut body = Vec::new(); handle.read_to_end(&mut body).await.unwrap(); - let http_request = http_request.body(body.clone()).unwrap(); + let http_request = http_request.body(&*body).unwrap(); debug!("{:?}", http_request); match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { @@ -178,9 +179,9 @@ impl Deref for Ruma { /// This struct converts ruma responses into rocket http responses. pub type ConduitResult = std::result::Result, Error>; -pub struct RumaResponse>>>(pub T); +pub struct RumaResponse(pub T); -impl>>> From for RumaResponse { +impl From for RumaResponse { fn from(t: T) -> Self { Self(t) } @@ -189,12 +190,11 @@ impl>>> From for RumaResponse { #[cfg(feature = "conduit_bin")] impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse where - T: Send + TryInto>>, - T::Error: Send, + T: Send + OutgoingResponse, 'o: 'r, { fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - let http_response: Result, _> = self.0.try_into(); + let http_response: Result, _> = self.0.try_into_http_response(); match http_response { Ok(http_response) => { let mut response = rocket::response::Response::build(); diff --git a/src/server_server.rs b/src/server_server.rs index 1fad54e..304bc19 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,7 +18,7 @@ use ruma::{ query::get_profile_information, transactions::send_transaction_message, }, - OutgoingRequest, + IncomingResponse, OutgoingRequest, OutgoingResponse, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -173,15 +173,10 @@ where let status = reqwest_response.status(); - let body = reqwest_response - .bytes() - .await - .unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }) // TODO: handle timeout - .into_iter() - .collect::>(); + let body = reqwest_response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout if status != 200 { info!( @@ -195,7 +190,7 @@ where ); } - let response = T::IncomingResponse::try_from( + let response = T::IncomingResponse::try_from_http_response( http_response .body(body) .expect("reqwest body is valid http body"), @@ -350,6 +345,7 @@ pub fn get_server_version_route( .into()) } +// Response type for this endpoint is Json because we need to calculate a signature for the response #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { @@ -369,7 +365,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { }, ); let mut response = serde_json::from_slice( - http::Response::try_from(get_server_keys::v2::Response { + get_server_keys::v2::Response { server_key: ServerSigningKeys { server_name: db.globals.server_name().to_owned(), verify_keys, @@ -377,7 +373,8 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { signatures: BTreeMap::new(), valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2), }, - }) + } + .try_into_http_response() .unwrap() .body(), ) @@ -745,7 +742,7 @@ fn handle_incoming_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - debug!("Fetching auth events."); + debug!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_events( db, origin, @@ -757,7 +754,10 @@ fn handle_incoming_pdu<'a>( .map_err(|e| e.to_string())?; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!("Checking auth."); + debug!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); // Build map of auth events let mut auth_events = BTreeMap::new(); @@ -1151,7 +1151,7 @@ pub(crate) async fn fetch_and_handle_events( // a. Look at auth cache let pdu = match auth_cache.get(id) { Some(pdu) => { - debug!("Event found in cache"); + debug!("Found {} in cache", id); pdu.clone() } // b. Look in the main timeline (pduid_pdu tree) @@ -1159,12 +1159,12 @@ pub(crate) async fn fetch_and_handle_events( // (get_pdu checks both) None => match db.rooms.get_pdu(&id)? { Some(pdu) => { - debug!("Event found in outliers"); + debug!("Found {} in outliers", id); Arc::new(pdu) } None => { // d. Ask origin server over federation - debug!("Fetching event over federation: {:?}", id); + debug!("Fetching {} over federation.", id); match db .sending .send_federation_request( @@ -1175,7 +1175,7 @@ pub(crate) async fn fetch_and_handle_events( .await { Ok(res) => { - debug!("Got event over federation: {:?}", res); + debug!("Got {} over federation: {:?}", id, res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; let pdu = handle_incoming_pdu( From 5049d0e01b173b7fe1f6cdd6b22fa22a4e223d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 18:17:51 +0200 Subject: [PATCH 099/103] improvement: check signatures on join --- src/client_server/membership.rs | 100 +++++++++++++-------------- src/server_server.rs | 115 ++++++++++++++------------------ 2 files changed, 94 insertions(+), 121 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index d491ca0..f648978 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use super::State; use crate::{ client_server, pdu::{PduBuilder, PduEvent}, - utils, ConduitResult, Database, Error, Result, Ruma, + server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::{error, warn}; use ruma::{ @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -515,27 +515,6 @@ async fn join_room_by_id_helper( ) .await?; - let add_event_id = |pdu: &Raw| -> Result<(EventId, CanonicalJsonObject)> { - let mut value = serde_json::from_str(pdu.json().get()).map_err(|e| { - error!("{:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, &room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("a valid EventId can be converted to CanonicalJsonValue"), - ); - - Ok((event_id, value)) - }; - let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); @@ -546,23 +525,15 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); + let mut pub_key_map = BTreeMap::new(); + + for pdu in send_join_response.room_state.state.iter() { + let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + })?; - for pdu in send_join_response - .room_state - .state - .iter() - .map(add_event_id) - .map(|r| { - let (event_id, value) = r?; - PduEvent::from_id_val(&event_id, value.clone()) - .map(|ev| (event_id, Arc::new(ev))) - .map_err(|e| { - warn!("{:?}: {}", value, e); - Error::BadServerResponse("Invalid PDU in send_join response.") - }) - }) - { - let (_id, pdu) = pdu?; db.rooms.add_pdu_outlier(&pdu)?; if let Some(state_key) = &pdu.state_key { if pdu.kind == EventType::RoomMember { @@ -612,22 +583,12 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; - for pdu in send_join_response - .room_state - .auth_chain - .iter() - .map(add_event_id) - .map(|r| { - let (event_id, value) = r?; - PduEvent::from_id_val(&event_id, value.clone()) - .map(|ev| (event_id, Arc::new(ev))) - .map_err(|e| { - warn!("{:?}: {}", value, e); - Error::BadServerResponse("Invalid PDU in send_join response.") - }) - }) - { - let (_id, pdu) = pdu?; + for pdu in send_join_response.room_state.auth_chain.iter() { + let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + })?; db.rooms.add_pdu_outlier(&pdu)?; } @@ -674,3 +635,32 @@ async fn join_room_by_id_helper( Ok(join_room_by_id::Response::new(room_id.clone()).into()) } + +async fn validate_and_add_event_id( + pdu: &Raw, + room_version: &RoomVersionId, + pub_key_map: &mut BTreeMap>, + db: &Database, +) -> Result<(EventId, CanonicalJsonObject)> { + let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { + error!("{:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; + + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&value, &room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + value.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("a valid EventId can be converted to CanonicalJsonValue"), + ); + + Ok((event_id, value)) +} diff --git a/src/server_server.rs b/src/server_server.rs index 304bc19..39b626f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -658,44 +658,7 @@ fn handle_incoming_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys - for (signature_server, signature) in match value - .get("signatures") - .ok_or_else(|| "No signatures in server response pdu.".to_string())? - { - CanonicalJsonValue::Object(map) => map, - _ => return Err("Invalid signatures object in server response pdu.".to_string()), - } { - let signature_object = match signature { - CanonicalJsonValue::Object(map) => map, - _ => { - return Err( - "Invalid signatures content object in server response pdu.".to_string() - ) - } - }; - - let signature_ids = signature_object.keys().collect::>(); - - debug!("Fetching signing keys for {}", signature_server); - let keys = match fetch_signing_keys( - &db, - &Box::::try_from(&**signature_server).map_err(|_| { - "Invalid servername in signatures of server response pdu.".to_string() - })?, - signature_ids, - ) - .await - { - Ok(keys) => keys, - Err(_) => { - return Err( - "Signature verification failed: Could not fetch signing key.".to_string(), - ); - } - }; - - pub_key_map.insert(signature_server.clone(), keys); - } + fetch_required_signing_keys(&value, pub_key_map, db).await.map_err(|e| e.to_string())?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match @@ -1639,38 +1602,58 @@ pub fn get_profile_information_route<'a>( .into()) } -/* -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v2/invite/<_>/<_>", data = "") -)] -pub fn get_user_devices_route<'a>( - db: State<'a, Database>, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut displayname = None; - let mut avatar_url = None; - - match body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?, - None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; +pub async fn fetch_required_signing_keys( + event: &BTreeMap, + pub_key_map: &mut BTreeMap>, + db: &Database, +) -> Result<()> { + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in match event + .get("signatures") + .ok_or_else(|| Error::BadServerResponse("No signatures in server response pdu."))? + { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + )) } + } { + let signature_object = match signature { + CanonicalJsonValue::Object(map) => map, + _ => { + return Err(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + )) + } + }; + + let signature_ids = signature_object.keys().collect::>(); + + debug!("Fetching signing keys for {}", signature_server); + let keys = match fetch_signing_keys( + db, + &Box::::try_from(&**signature_server).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await + { + Ok(keys) => keys, + Err(_) => { + return Err(Error::BadServerResponse( + "Signature verification failed: Could not fetch signing key.", + )); + } + }; + + pub_key_map.insert(signature_server.clone(), keys); } - Ok(get_profile_information::v1::Response { - displayname, - avatar_url, - } - .into()) + Ok(()) } -*/ #[cfg(test)] mod tests { From 8b40e0a85ffec3ad9a712fd5175944158ac46f5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 13 Apr 2021 21:34:31 +0200 Subject: [PATCH 100/103] improvement: fetch signing keys in parallel when joining a room --- Cargo.lock | 48 +++++++++---------- Cargo.toml | 4 +- src/client_server/membership.rs | 31 +++++++++++-- src/server_server.rs | 82 ++++++++++++++++++++++----------- 4 files changed, 105 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3da6fb..d153c28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1625,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "assign", "js_int", @@ -1645,9 +1645,8 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ - "bytes", "http", "percent-encoding", "ruma-api-macros", @@ -1661,7 +1660,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1672,7 +1671,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "ruma-api", "ruma-common", @@ -1686,10 +1685,9 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "assign", - "bytes", "http", "js_int", "maplit", @@ -1705,8 +1703,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "indexmap", "js_int", @@ -1722,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-common", @@ -1736,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1747,7 +1745,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-api", @@ -1761,8 +1759,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "paste", "rand", @@ -1775,8 +1773,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.18.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.19.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro2", "quote", @@ -1786,13 +1784,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "ruma-api", "ruma-common", @@ -1805,7 +1803,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "js_int", "ruma-api", @@ -1820,7 +1818,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "form_urlencoded", "itoa", @@ -1833,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1843,8 +1841,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=6394609feb4af5c43b840fab85b824b13cebb156#6394609feb4af5c43b840fab85b824b13cebb156" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=c1693569f15920e408aa6a26b7f3cc7fc6693a63#c1693569f15920e408aa6a26b7f3cc7fc6693a63" dependencies = [ "base64 0.13.0", "ring", @@ -2122,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=94534b8ff3e71b544ae36206abc182321e9d41f1#94534b8ff3e71b544ae36206abc182321e9d41f1" +source = "git+https://github.com/timokoesters/state-res?rev=84e70c062708213d01281438598e16f13dffeda4#84e70c062708213d01281438598e16f13dffeda4" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 84e40d2..9aa9cee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", rev = "6394609feb4af5c43b840fab85b824b13cebb156", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7f3cc7fc6693a63", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "220d5b4a76b3b781f7f8297fbe6b14473b04214b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "94534b8ff3e71b544ae36206abc182321e9d41f1", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "84e70c062708213d01281438598e16f13dffeda4", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f648978..c348409 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -5,6 +5,7 @@ use crate::{ server_server, utils, ConduitResult, Database, Error, Result, Ruma, }; use log::{error, warn}; +use rocket::futures; use ruma::{ api::{ client::{ @@ -21,6 +22,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; +use std::sync::RwLock; use std::{collections::BTreeMap, convert::TryFrom}; #[cfg(feature = "conduit_bin")] @@ -525,10 +527,18 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); - let mut pub_key_map = BTreeMap::new(); + let mut pub_key_map = RwLock::new(BTreeMap::new()); - for pdu in send_join_response.room_state.state.iter() { - let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + for result in futures::future::join_all( + send_join_response + .room_state + .state + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), + ) + .await + { + let (event_id, value) = result?; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -584,7 +594,8 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; for pdu in send_join_response.room_state.auth_chain.iter() { - let (event_id, value) = validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + let (event_id, value) = + validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -639,7 +650,7 @@ async fn join_room_by_id_helper( async fn validate_and_add_event_id( pdu: &Raw, room_version: &RoomVersionId, - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<(EventId, CanonicalJsonObject)> { let mut value = serde_json::from_str::(pdu.json().get()).map_err(|e| { @@ -648,6 +659,16 @@ async fn validate_and_add_event_id( })?; server_server::fetch_required_signing_keys(&value, pub_key_map, db).await?; + if let Err(e) = ruma::signatures::verify_event( + &*pub_key_map + .read() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?, + &value, + room_version, + ) { + warn!("Event failed verification: {}", e); + return Err(Error::BadServerResponse("Event failed verification.")); + } let event_id = EventId::try_from(&*format!( "${}", diff --git a/src/server_server.rs b/src/server_server.rs index 39b626f..791ec1c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -38,7 +38,7 @@ use std::{ net::{IpAddr, SocketAddr}, pin::Pin, result::Result as StdResult, - sync::Arc, + sync::{Arc, RwLock}, time::{Duration, SystemTime}, }; @@ -543,7 +543,7 @@ pub async fn send_transaction_message_route<'a>( let mut resolved_map = BTreeMap::new(); - let mut pub_key_map = BTreeMap::new(); + let pub_key_map = RwLock::new(BTreeMap::new()); // This is all the auth_events that have been recursively fetched so they don't have to be // deserialized over and over again. @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( value, true, &db, - &mut pub_key_map, + &pub_key_map, &mut auth_cache, ) .await @@ -622,7 +622,7 @@ fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, - pub_key_map: &'a mut BTreeMap>, + pub_key_map: &'a RwLock>>, auth_cache: &'a mut EventMap>, ) -> AsyncRecursiveResult<'a, Arc> { Box::pin(async move { @@ -658,7 +658,9 @@ fn handle_incoming_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys - fetch_required_signing_keys(&value, pub_key_map, db).await.map_err(|e| e.to_string())?; + fetch_required_signing_keys(&value, &pub_key_map, db) + .await + .map_err(|e| e.to_string())?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match @@ -676,7 +678,11 @@ fn handle_incoming_pdu<'a>( let room_version = create_event_content.room_version; - let mut val = match ruma::signatures::verify_event(&pub_key_map, &value, &room_version) { + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + &room_version, + ) { Err(e) => { // Drop error!("{:?}: {}", value, e); @@ -1106,7 +1112,7 @@ pub(crate) async fn fetch_and_handle_events( db: &Database, origin: &ServerName, events: &[EventId], - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; @@ -1256,6 +1262,7 @@ pub(crate) async fn fetch_signing_keys( } } + warn!("Failed to find public key for server: {}", origin); Err(Error::BadServerResponse( "Failed to find public key for server", )) @@ -1486,7 +1493,7 @@ pub fn get_room_state_ids_route<'a>( put("/_matrix/federation/v2/invite/<_>/<_>", data = "") )] #[tracing::instrument(skip(db, body))] -pub fn create_invite_route<'a>( +pub async fn create_invite_route<'a>( db: State<'a, Database>, body: Ruma, ) -> ConduitResult { @@ -1510,6 +1517,20 @@ pub fn create_invite_route<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + // Generate event id + let event_id = EventId::try_from(&*format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let sender = serde_json::from_value( serde_json::to_value( signed_event @@ -1543,24 +1564,26 @@ pub fn create_invite_route<'a>( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; event.insert("event_id".to_owned(), "$dummy".into()); - invite_state.push( - serde_json::from_value::(event.into()) - .map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })? - .to_stripped_state_event(), - ); - db.rooms.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - &db.account_data, - &db.globals, - )?; + let pdu = serde_json::from_value::(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If the room already exists, the remote server will notify us about the join via /send + if !db.rooms.exists(&pdu.room_id)? { + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db.account_data, + &db.globals, + )?; + } Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), @@ -1604,7 +1627,7 @@ pub fn get_profile_information_route<'a>( pub async fn fetch_required_signing_keys( event: &BTreeMap, - pub_key_map: &mut BTreeMap>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { // We go through all the signatures we see on the value and fetch the corresponding signing @@ -1642,14 +1665,17 @@ pub async fn fetch_required_signing_keys( .await { Ok(keys) => keys, - Err(_) => { + Err(e) => { return Err(Error::BadServerResponse( "Signature verification failed: Could not fetch signing key.", )); } }; - pub_key_map.insert(signature_server.clone(), keys); + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); } Ok(()) From 595129463856cc4b03cccebba678ced4af3865c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 09:39:06 +0200 Subject: [PATCH 101/103] feat: join cursed rooms this removes several restrictions and tries to continue verifying a pdu event if some auth events fail (it drops/ignores bad pdus) --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/client_server/membership.rs | 25 ++++++++++++++++++++----- src/pdu.rs | 25 ++++++++++--------------- src/server_server.rs | 27 ++++++++++++++++----------- 5 files changed, 48 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d153c28..f6af8b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2120,7 +2120,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/timokoesters/state-res?rev=84e70c062708213d01281438598e16f13dffeda4#84e70c062708213d01281438598e16f13dffeda4" +source = "git+https://github.com/timokoesters/state-res?rev=9bb46ae681bfc361cff740e78dc42bb711db9779#9bb46ae681bfc361cff740e78dc42bb711db9779" dependencies = [ "itertools 0.10.0", "log", diff --git a/Cargo.toml b/Cargo.toml index 9aa9cee..fba7e5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "c1693569f15920e408aa6a26b7 #ruma = { path = "../ruma/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "unstable-pre-spec", "unstable-exhaustive-types"] } # Used when doing state resolution -state-res = { git = "https://github.com/timokoesters/state-res", rev = "84e70c062708213d01281438598e16f13dffeda4", features = ["unstable-pre-spec"] } +state-res = { git = "https://github.com/timokoesters/state-res", rev = "9bb46ae681bfc361cff740e78dc42bb711db9779", features = ["unstable-pre-spec"] } #state-res = { path = "../state-res", features = ["unstable-pre-spec"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c348409..4be0d5f 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -527,7 +527,7 @@ async fn join_room_by_id_helper( .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; let mut state = BTreeMap::new(); - let mut pub_key_map = RwLock::new(BTreeMap::new()); + let pub_key_map = RwLock::new(BTreeMap::new()); for result in futures::future::join_all( send_join_response @@ -538,7 +538,11 @@ async fn join_room_by_id_helper( ) .await { - let (event_id, value) = result?; + let (event_id, value) = match result { + Ok(t) => t, + Err(_) => continue, + }; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") @@ -593,9 +597,20 @@ async fn join_room_by_id_helper( db.rooms.force_state(room_id, state, &db.globals)?; - for pdu in send_join_response.room_state.auth_chain.iter() { - let (event_id, value) = - validate_and_add_event_id(pdu, &room_version, &mut pub_key_map, &db).await?; + for result in futures::future::join_all( + send_join_response + .room_state + .auth_chain + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, &db)), + ) + .await + { + let (event_id, value) = match result { + Ok(t) => t, + Err(_) => continue, + }; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") diff --git a/src/pdu.rs b/src/pdu.rs index 009fde6..a7d9432 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -166,22 +166,17 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let json = format!( - r#"{{"content":{},"type":"{}","event_id":"{}","sender":"{}","origin_server_ts":{},"unsigned":{},"state_key":"{}"}}"#, - self.content, - self.kind, - self.event_id, - self.sender, - self.origin_server_ts, - serde_json::to_string(&self.unsigned).expect("Map::to_string always works"), - self.state_key - .as_ref() - .expect("state events have state keys") - ); + let json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "unsigned": self.unsigned, + "state_key": self.state_key, + }); - Raw::from_json( - serde_json::value::RawValue::from_string(json).expect("our string is valid json"), - ) + serde_json::from_value(json).expect("Raw::from_value always works") } #[tracing::instrument(skip(self))] diff --git a/src/server_server.rs b/src/server_server.rs index 791ec1c..538540a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -685,7 +685,7 @@ fn handle_incoming_pdu<'a>( ) { Err(e) => { // Drop - error!("{:?}: {}", value, e); + warn!("{:?}: {}", value, e); return Err("Signature verification failed".to_string()); } Ok(ruma::signatures::Verified::Signatures) => { @@ -1147,7 +1147,7 @@ pub(crate) async fn fetch_and_handle_events( debug!("Got {} over federation: {:?}", id, res); let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu)?; - let pdu = handle_incoming_pdu( + let pdu = match handle_incoming_pdu( origin, &event_id, value, @@ -1157,14 +1157,20 @@ pub(crate) async fn fetch_and_handle_events( auth_cache, ) .await - .map_err(|e| { - error!("Error: {:?}", e); - Error::Conflict("Authentication of event failed") - })?; + { + Ok(pdu) => pdu, + Err(e) => { + warn!("Authentication of event {} failed: {:?}", id, e); + continue; + } + }; pdu } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + Err(_) => { + warn!("Failed to fetch event: {}", id); + continue; + } } } }, @@ -1665,10 +1671,9 @@ pub async fn fetch_required_signing_keys( .await { Ok(keys) => keys, - Err(e) => { - return Err(Error::BadServerResponse( - "Signature verification failed: Could not fetch signing key.", - )); + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; } }; From c1953efa6bd3b8e354bc0bcff372072694f5b043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 10:43:31 +0200 Subject: [PATCH 102/103] chore: code cleanup --- src/client_server/membership.rs | 7 +--- src/database/pusher.rs | 2 +- src/database/rooms.rs | 71 ++++++++++++++++++--------------- src/server_server.rs | 33 +++++++-------- src/utils.rs | 2 +- 5 files changed, 59 insertions(+), 56 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index dad4328..0da0747 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -568,9 +568,7 @@ async fn join_room_by_id_helper( serde_json::from_value::( pdu.content .get("membership") - .ok_or_else(|| { - Error::BadServerResponse("Invalid member event content") - })? + .ok_or(Error::BadServerResponse("Invalid member event content"))? .clone(), ) .map_err(|_| { @@ -578,8 +576,7 @@ async fn join_room_by_id_helper( })?, &pdu.sender, Some(invite_state), - &db.account_data, - &db.globals, + db, )?; } state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index be30576..c204386 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -222,7 +222,7 @@ pub fn get_actions<'a>( user_display_name: db .users .displayname(&user)? - .unwrap_or(user.localpart().to_owned()), + .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users, default_power_level: power_levels.users_default, notification_power_levels: power_levels.notifications, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index caf7a09..5053360 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -706,14 +706,14 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - &self + self .userroomid_notificationcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); } if highlight { - &self + self .userroomid_highlightcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); @@ -743,12 +743,10 @@ impl Rooms { let membership = serde_json::from_value::( pdu.content .get("membership") - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid member event content", - ) - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content", + ))? .clone(), ) .map_err(|_| { @@ -807,8 +805,7 @@ impl Rooms { membership, &pdu.sender, invite_state, - &db.account_data, - &db.globals, + db, )?; } } @@ -1205,7 +1202,7 @@ impl Rooms { .iter() .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) .max() - .unwrap_or(uint!(0)) + .unwrap_or_else(|| uint!(0)) + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); @@ -1542,8 +1539,7 @@ impl Rooms { membership: member::MembershipState, sender: &UserId, last_state: Option>>, - account_data: &super::account_data::AccountData, - globals: &super::globals::Globals, + db: &Database, ) -> Result<()> { let mut roomserver_id = room_id.as_bytes().to_vec(); roomserver_id.push(0xff); @@ -1603,23 +1599,32 @@ impl Rooms { // .ok(); // Copy old tags to new room - if let Some(tag_event) = account_data.get::( - Some(&predecessor.room_id), - user_id, - EventType::Tag, - )? { - account_data - .update(Some(room_id), user_id, EventType::Tag, &tag_event, globals) + if let Some(tag_event) = + db.account_data.get::( + Some(&predecessor.room_id), + user_id, + EventType::Tag, + )? + { + db.account_data + .update( + Some(room_id), + user_id, + EventType::Tag, + &tag_event, + &db.globals, + ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = account_data - .get::( - None, - user_id, - EventType::Direct, - )? { + if let Some(mut direct_event) = + db.account_data.get::( + None, + user_id, + EventType::Direct, + )? + { let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -1630,12 +1635,12 @@ impl Rooms { } if room_ids_updated { - account_data.update( + db.account_data.update( None, user_id, EventType::Direct, &direct_event, - globals, + &db.globals, )?; } }; @@ -1652,7 +1657,8 @@ impl Rooms { } member::MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = account_data + let is_ignored = db + .account_data .get::( None, // Ignored users are in global account data &user_id, // Receiver @@ -1673,7 +1679,7 @@ impl Rooms { .expect("state to bytes always works"), )?; self.roomuserid_invitecount - .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; + .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_leftstate.remove(&userroom_id)?; @@ -1693,7 +1699,7 @@ impl Rooms { serde_json::to_vec(&Vec::>::new()).unwrap(), )?; // TODO self.roomuserid_leftcount - .insert(&roomuser_id, &globals.next_count()?.to_be_bytes())?; + .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -1729,8 +1735,7 @@ impl Rooms { MembershipState::Leave, user_id, last_state, - &db.account_data, - &db.globals, + db, )?; } else { let mut event = serde_json::from_value::>( diff --git a/src/server_server.rs b/src/server_server.rs index 538540a..5b49472 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1413,15 +1413,16 @@ pub fn get_missing_events_route<'a>( let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - if body.earliest_events.contains( - &serde_json::from_value( + let event_id = + serde_json::from_value( serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| { Error::bad_database("Event in db has no event_id field.") })?) .expect("canonical json is valid json value"), ) - .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?, - ) { + .map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?; + + if body.earliest_events.contains(&event_id) { i += 1; continue; } @@ -1541,9 +1542,10 @@ pub async fn create_invite_route<'a>( serde_json::to_value( signed_event .get("sender") - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender field.") - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? .clone(), ) .expect("CanonicalJsonValue to serde_json::Value always works"), @@ -1553,9 +1555,10 @@ pub async fn create_invite_route<'a>( serde_json::to_value( signed_event .get("state_key") - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Event had no state_key field.") - })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? .clone(), ) .expect("CanonicalJsonValue to serde_json::Value always works"), @@ -1586,8 +1589,7 @@ pub async fn create_invite_route<'a>( MembershipState::Invite, &sender, Some(invite_state), - &db.account_data, - &db.globals, + &db, )?; } @@ -1638,10 +1640,9 @@ pub async fn fetch_required_signing_keys( ) -> Result<()> { // We go through all the signatures we see on the value and fetch the corresponding signing // keys - for (signature_server, signature) in match event - .get("signatures") - .ok_or_else(|| Error::BadServerResponse("No signatures in server response pdu."))? - { + for (signature_server, signature) in match event.get("signatures").ok_or( + Error::BadServerResponse("No signatures in server response pdu."), + )? { CanonicalJsonValue::Object(map) => map, _ => { return Err(Error::BadServerResponse( diff --git a/src/utils.rs b/src/utils.rs index 45d9de8..106baff 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -69,7 +69,7 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -pub fn common_elements<'a>( +pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, ) -> Option>> { From 001d8dc2573988b13da5e481a422fa8a1df109c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 14 Apr 2021 12:55:14 +0200 Subject: [PATCH 103/103] fix: don't do expensive operation on local /send --- src/database/pusher.rs | 2 +- src/database/rooms.rs | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index c204386..40b829f 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -218,7 +218,7 @@ pub fn get_actions<'a>( let ctx = PushConditionRoomCtx { room_id: pdu.room_id.clone(), - member_count: (db.rooms.room_members(&pdu.room_id).count() as u32).into(), + member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users .displayname(&user)? diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5053360..955ad83 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -674,6 +674,7 @@ impl Rooms { .iter() .filter_map(|r| r.ok()) .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) + .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) { // Don't notify the user of their own events if user == pdu.sender { @@ -706,15 +707,13 @@ impl Rooms { userroom_id.extend_from_slice(pdu.room_id.as_bytes()); if notify { - self - .userroomid_notificationcount + self.userroomid_notificationcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); } if highlight { - self - .userroomid_highlightcount + self.userroomid_highlightcount .update_and_fetch(&userroom_id, utils::increment)? .expect("utils::increment will always put in a value"); }