From 79692db45dfe2fec6d0fec280bfba2be740d58a5 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sun, 31 May 2020 22:49:07 +0200 Subject: [PATCH 01/62] First version of cargo-deb packaging setup --- Cargo.toml | 22 ++++++++++++++++ debian/config | 23 +++++++++++++++++ debian/env | 48 +++++++++++++++++++++++++++++++++++ debian/matrix-conduit.service | 21 +++++++++++++++ debian/postinst | 26 +++++++++++++++++++ debian/postrm | 22 ++++++++++++++++ debian/templates | 14 ++++++++++ 7 files changed, 176 insertions(+) create mode 100644 debian/config create mode 100644 debian/env create mode 100644 debian/matrix-conduit.service create mode 100644 debian/postinst create mode 100644 debian/postrm create mode 100644 debian/templates diff --git a/Cargo.toml b/Cargo.toml index 8b29be8..1feb7ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,3 +72,25 @@ required-features = ["conduit_bin"] [lib] name = "conduit" path = "src/lib.rs" + +[package.metadata.deb] +name = "matrix-conduit" +maintainer = "Paul van Tilburg " +copyright = "2020, Timo Kösters " +license-file = ["LICENSE", "3"] +depends = "$auto, ca-certificates" +extended-description = """\ +A fast Matrix homeserver that is optimized for smaller, personal servers, \ +instead of a server that has high scalability.""" +section = "net" +priority = "optional" +assets = [ + ["debian/env", "etc/matrix-conduit/env", "644"], + ["README.md", "usr/share/doc/matrix-conduit/", "644"], + ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], +] +conf-files = [ + "/etc/matrix-conduit/env" +] +maintainer-scripts = "debian/" +systemd-units = { unit-name = "matrix-conduit" } diff --git a/debian/config b/debian/config new file mode 100644 index 0000000..a9ad498 --- /dev/null +++ b/debian/config @@ -0,0 +1,23 @@ +#!/bin/sh +set -e + +# Source debconf library. +. /usr/share/debconf/confmodule + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/env" + +# Ask for the Matrix homeserver name and port. +db_input high matrix-conduit/hostname || true +db_go + +db_input medium matrix-conduit/port || true +db_go + +# Update the values in the config. +db_get matrix-conduit/hostname +sed -i -e "s/^ROCKET_SERVER_NAME=.*/ROCKET_SERVER_NAME=\"$RET\"/" $CONDUIT_CONFIG_FILE +db_get matrix-conduit/port +sed -i -e "s/^ROCKET_PORT=.*/ROCKET_PORT=\"$RET\"/" $CONDUIT_CONFIG_FILE + +exit 0 diff --git a/debian/env b/debian/env new file mode 100644 index 0000000..3f72c5b --- /dev/null +++ b/debian/env @@ -0,0 +1,48 @@ +# Conduit homeserver configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit can happen either via a `Rocket.toml` file that +# is placed in /var/lib/matrix-conduit or via setting the environment +# variables below. + +# The server (host)name of the Matrix homeserver. +# +# This is the hostname the homeserver will be reachable at via a client. +ROCKET_SERVER_NAME="YOURSERVERNAME.HERE" + +# The address the Matrix homeserver listens on. +# +# By default the server listens on 0.0.0.0. Change this for example to +# 127.0.0.1 to only listen on the localhost when using a reverse proxy. +#ROCKET_ADDRESS="0.0.0.0" + +# The port of the Matrix homeserver. +# +# This port is often accessed by a reverse proxy. +ROCKET_PORT="14004" + +# The maximum size of a Matrix HTTP requests in bytes. +# +# This mostly affects the size of files that can be downloaded/uploaded. +ROCKET_MAX_REQUEST_SIZE=20000000 + +# Whether user registration is allowed. +# +# User registration is allowed by default. +#ROCKET_REGISTRATION_DISABLED=true + +# Whether encryption is enabled. +# +# (End-to-end) encryption is enabled by default. +#ROCKET_ENCRYPTION_DISABLED=true + +# Whether federation with other Matrix servers is enabled. +# +# Federation is disabled by default; it is still experimental. +#ROCKET_FEDERATION_ENABLED=true + +# The log level of the homeserver. +# +# The log level is "critical" by default. +# Allowed values are: "off", "normal", "debug", "critical" +#ROCKET_LOG="normal" diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service new file mode 100644 index 0000000..96c4856 --- /dev/null +++ b/debian/matrix-conduit.service @@ -0,0 +1,21 @@ +[Unit] +Description=Conduit Matrix homeserver +After=network.target + +[Service] +User=_matrix-conduit +Group=_matrix-conduit +Type=simple + +Environment="ROCKET_ENV=production" +Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" +EnvironmentFile=/etc/matrix-conduit/env + +ExecStart=/usr/sbin/matrix-conduit +Restart=on-failure +RestartSec=10 +StartLimitInterval=1m +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/debian/postinst b/debian/postinst new file mode 100644 index 0000000..ee684da --- /dev/null +++ b/debian/postinst @@ -0,0 +1,26 @@ +#!/bin/sh +set -e + +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit + +case "$1" in + configure) + # Create the `_matrix-conduit` user if it does not exist yet. + if ! getent passwd _matrix-conduit > /dev/null ; then + echo 'Adding system user for the Conduit Matrix homeserver' 1>&2 + adduser --system --group --quiet \ + --home $CONDUIT_DATABASE_PATH \ + --disabled-login \ + --force-badname \ + _matrix-conduit + fi + + # Create the database path if it does not exist yet. + if [ ! -d "$CONDUIT_DATABASE_PATH" ]; then + mkdir -p "$CONDUIT_DATABASE_PATH" + chown _matrix-conduit "$CONDUIT_DATABASE_PATH" + fi + ;; +esac + +#DEBHELPER# diff --git a/debian/postrm b/debian/postrm new file mode 100644 index 0000000..04ca325 --- /dev/null +++ b/debian/postrm @@ -0,0 +1,22 @@ +#!/bin/sh +set -e + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit + +case $1 in + purge) + # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior + # "configuration files must be preserved when the package is removed, and + # only deleted when the package is purged." + if [ -d "$CONDUIT_CONFIG_PATH" ]; then + rm -r "$CONDUIT_CONFIG_PATH" + fi + + if [ -d "$CONDUIT_DATABASE_PATH" ]; then + rm -r "$CONDUIT_DATABASE_PATH" + fi + ;; +esac + +#DEBHELPER# diff --git a/debian/templates b/debian/templates new file mode 100644 index 0000000..66bf55c --- /dev/null +++ b/debian/templates @@ -0,0 +1,14 @@ +Template: matrix-conduit/hostname +Type: string +Default: localhost +Description: The server (host)name of the Matrix homeserver. + This is the hostname the homeserver will be reachable at via a client. + . + If set to "localhost", you can connect with a client locally and clients + from other hosts and also other servers will not be able to reach you! + +Template: matrix-conduit/port +Type: string +Default: 14004 +Description: The port of the Matrix homeserver + This port is often accessed by a reverse proxy. From f72554de1014eea6b5c224548e9d0aeb15553cf7 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 20:35:22 +0100 Subject: [PATCH 02/62] Split config into a Debian and local part * The Debian part will be generated and managed by Debconf and configure homeserver name, address and port * The local part will just be a config file that shows the other configuration options Added the address configuration and moved the config generation from the config to the postinst script. --- Cargo.toml | 4 +-- debian/config | 14 +++------- debian/env | 48 ----------------------------------- debian/env.local | 33 ++++++++++++++++++++++++ debian/matrix-conduit.service | 3 ++- debian/postinst | 47 ++++++++++++++++++++++++++++++++++ debian/templates | 13 +++++++--- 7 files changed, 98 insertions(+), 64 deletions(-) delete mode 100644 debian/env create mode 100644 debian/env.local diff --git a/Cargo.toml b/Cargo.toml index 1feb7ca..d0dfcf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,12 +85,12 @@ instead of a server that has high scalability.""" section = "net" priority = "optional" assets = [ - ["debian/env", "etc/matrix-conduit/env", "644"], + ["debian/env.local", "etc/matrix-conduit/local", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] conf-files = [ - "/etc/matrix-conduit/env" + "/etc/matrix-conduit/local" ] maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } diff --git a/debian/config b/debian/config index a9ad498..8710ef9 100644 --- a/debian/config +++ b/debian/config @@ -4,20 +4,14 @@ set -e # Source debconf library. . /usr/share/debconf/confmodule -CONDUIT_CONFIG_PATH=/etc/matrix-conduit -CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/env" - -# Ask for the Matrix homeserver name and port. +# Ask for the Matrix homeserver name, address and port. db_input high matrix-conduit/hostname || true db_go +db_input low matrix-conduit/address || true +db_go + db_input medium matrix-conduit/port || true db_go -# Update the values in the config. -db_get matrix-conduit/hostname -sed -i -e "s/^ROCKET_SERVER_NAME=.*/ROCKET_SERVER_NAME=\"$RET\"/" $CONDUIT_CONFIG_FILE -db_get matrix-conduit/port -sed -i -e "s/^ROCKET_PORT=.*/ROCKET_PORT=\"$RET\"/" $CONDUIT_CONFIG_FILE - exit 0 diff --git a/debian/env b/debian/env deleted file mode 100644 index 3f72c5b..0000000 --- a/debian/env +++ /dev/null @@ -1,48 +0,0 @@ -# Conduit homeserver configuration -# -# Conduit is an application based on the Rocket web framework. -# Configuration of Conduit can happen either via a `Rocket.toml` file that -# is placed in /var/lib/matrix-conduit or via setting the environment -# variables below. - -# The server (host)name of the Matrix homeserver. -# -# This is the hostname the homeserver will be reachable at via a client. -ROCKET_SERVER_NAME="YOURSERVERNAME.HERE" - -# The address the Matrix homeserver listens on. -# -# By default the server listens on 0.0.0.0. Change this for example to -# 127.0.0.1 to only listen on the localhost when using a reverse proxy. -#ROCKET_ADDRESS="0.0.0.0" - -# The port of the Matrix homeserver. -# -# This port is often accessed by a reverse proxy. -ROCKET_PORT="14004" - -# The maximum size of a Matrix HTTP requests in bytes. -# -# This mostly affects the size of files that can be downloaded/uploaded. -ROCKET_MAX_REQUEST_SIZE=20000000 - -# Whether user registration is allowed. -# -# User registration is allowed by default. -#ROCKET_REGISTRATION_DISABLED=true - -# Whether encryption is enabled. -# -# (End-to-end) encryption is enabled by default. -#ROCKET_ENCRYPTION_DISABLED=true - -# Whether federation with other Matrix servers is enabled. -# -# Federation is disabled by default; it is still experimental. -#ROCKET_FEDERATION_ENABLED=true - -# The log level of the homeserver. -# -# The log level is "critical" by default. -# Allowed values are: "off", "normal", "debug", "critical" -#ROCKET_LOG="normal" diff --git a/debian/env.local b/debian/env.local new file mode 100644 index 0000000..cd552de --- /dev/null +++ b/debian/env.local @@ -0,0 +1,33 @@ +# Conduit homeserver local configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit happens via Debconf (see the resulting config in +# `/etc/matrix-conduit/debian`) and optionally by uncommenting and tweaking the +# variables in this file below. + +# The maximum size of a Matrix HTTP requests in bytes. +# +# This mostly affects the size of files that can be downloaded/uploaded. +# It defaults to 20971520 (20MB). +#ROCKET_MAX_REQUEST_SIZE=20971520 + +# Whether user registration is allowed. +# +# User registration is not disabled by default. +#ROCKET_REGISTRATION_DISABLED=false + +# Whether encryption is enabled. +# +# (End-to-end) encryption is not disabled by default. +#ROCKET_ENCRYPTION_DISABLED=false + +# Whether federation with other Matrix servers is enabled. +# +# Federation is not enabled by default; it is still experimental. +#ROCKET_FEDERATION_ENABLED=false + +# The log level of the homeserver. +# +# The log level is "critical" by default. +# Allowed values are: "off", "normal", "debug", "critical" +#ROCKET_LOG="critical" diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 96c4856..42969c1 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -9,7 +9,8 @@ Type=simple Environment="ROCKET_ENV=production" Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" -EnvironmentFile=/etc/matrix-conduit/env +EnvironmentFile=/etc/matrix-conduit/debian +EnvironmentFile=/etc/matrix-conduit/local ExecStart=/usr/sbin/matrix-conduit Restart=on-failure diff --git a/debian/postinst b/debian/postinst index ee684da..bd7fb85 100644 --- a/debian/postinst +++ b/debian/postinst @@ -1,6 +1,10 @@ #!/bin/sh set -e +. /usr/share/debconf/confmodule + +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_CONFIG_FILE="$CONDUIT_CONFIG_PATH/debian" CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit case "$1" in @@ -20,6 +24,49 @@ case "$1" in mkdir -p "$CONDUIT_DATABASE_PATH" chown _matrix-conduit "$CONDUIT_DATABASE_PATH" fi + + # Write the debconf values in the config. + db_get matrix-conduit/hostname + ROCKET_SERVER_NAME="$RET" + db_get matrix-conduit/address + ROCKET_ADDRESS="$RET" + db_get matrix-conduit/port + ROCKET_PORT="$RET" + cat >"$CONDUIT_CONFIG_FILE" << EOF +# Conduit homeserver Debian configuration +# +# Conduit is an application based on the Rocket web framework. +# Configuration of Conduit happens via Debconf (of which the resulting config +# is in this file) and optionally by uncommenting and tweaking the variables in +# /etc/matrix-conduit/local. + +# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! +# +# Please make changes by running: +# +# \$ dpkg-reconfigure matrix-conduit +# +# or by providing overriding changes in /etc/matrix-conduit/local. + +# The server (host)name of the Matrix homeserver. +# +# This is the hostname the homeserver will be reachable at via a client. +ROCKET_SERVER_NAME="$ROCKET_SERVER_NAME" + +# The address the Matrix homeserver listens on. +# +# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to +# only listen on the localhost when using a reverse proxy. +ROCKET_ADDRESS="$ROCKET_ADDRESS" + +# The port of the Matrix homeserver. +# +# This port is could be any available port if accessed by a reverse proxy. +# By default the server listens on port 8000. +ROCKET_PORT="$ROCKET_PORT" + +# THIS FILE IS GENERATED BY DEBCONF AND WILL BE OVERRIDDEN! +EOF ;; esac diff --git a/debian/templates b/debian/templates index 66bf55c..a408f84 100644 --- a/debian/templates +++ b/debian/templates @@ -1,14 +1,21 @@ Template: matrix-conduit/hostname Type: string Default: localhost -Description: The server (host)name of the Matrix homeserver. +Description: The server (host)name of the Matrix homeserver This is the hostname the homeserver will be reachable at via a client. . If set to "localhost", you can connect with a client locally and clients - from other hosts and also other servers will not be able to reach you! + from other hosts and also other homeservers will not be able to reach you! + +Template: matrix-conduit/address +Type: string +Default: 127.0.0.1 +Description: The listen address of the Matrix homeserver + This is the address the homeserver will listen on. Leave it set to 127.0.0.1 + when using a reverse proxy. Template: matrix-conduit/port Type: string Default: 14004 Description: The port of the Matrix homeserver - This port is often accessed by a reverse proxy. + This port is most often just accessed by a reverse proxy. From 1b4a79d47c7f91b6d3562520637d28f6f00ec6c9 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 20:50:58 +0100 Subject: [PATCH 03/62] Add and install README.Debian This file documents how the packaging is organized and how to configure and use it. It also details what the default deployment is like. --- Cargo.toml | 1 + debian/README.Debian | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 debian/README.Debian diff --git a/Cargo.toml b/Cargo.toml index d0dfcf4..76c52e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,6 +86,7 @@ section = "net" priority = "optional" assets = [ ["debian/env.local", "etc/matrix-conduit/local", "644"], + ["debian/README.Debian", "usr/share/doc/matrix-conduit/", "644"], ["README.md", "usr/share/doc/matrix-conduit/", "644"], ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], ] diff --git a/debian/README.Debian b/debian/README.Debian new file mode 100644 index 0000000..69fb975 --- /dev/null +++ b/debian/README.Debian @@ -0,0 +1,29 @@ +Conduit for Debian +================== + +Configuration +------------- + +When installed, Debconf handles the configuration of the homeserver (host)name, +the address and port it listens on. These configuration variables end up in +/etc/matrix-conduit/debian. + +You can tweak more detailed settings by uncommenting and setting the variables +in /etc/matrix-conduit/local. This involves settings such as the maximum file +size for download/upload, enabling federation, etc. + +Running +------- + +The package uses the matrix-conduit.service systemd unit file to start and +stop Conduit. It loads the configuration files mentioned above to set up the +environment before running the server. + +This package assumes by default that Conduit is placed behind a reverse proxy +such as Apache or nginx. This default deployment entails just listening on +127.0.0.1 and the free port 14004 and is reachable via a client using the URL +http://localhost:14004. + +At a later stage this packaging may support also setting up TLS and running +stand-alone. In this case, however, you need to set up some certificates and +renewal, for it to work properly. From 1a341543ba155e3b4416368344cbb705690aceca Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Fri, 13 Nov 2020 21:37:22 +0100 Subject: [PATCH 04/62] Lock down the Conduit process in the systemd unit This will secure the service more and allow only what is necessary. --- debian/matrix-conduit.service | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 42969c1..5ab7917 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -7,6 +7,33 @@ User=_matrix-conduit Group=_matrix-conduit Type=simple +AmbientCapabilities= +CapabilityBoundingSet= +LockPersonality=yes +MemoryDenyWriteExecute=yes +NoNewPrivileges=yes +ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +ProtectHostname=yes +ProtectKernelLogs=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +PrivateDevices=yes +PrivateMounts=yes +PrivateTmp=yes +PrivateUsers=yes +RemoveIPC=yes +RestrictAddressFamilies=AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SystemCallArchitectures=native +SystemCallFilter=@system-service +SystemCallErrorNumber=EPERM +StateDirectory=matrix-conduit + Environment="ROCKET_ENV=production" Environment="ROCKET_DATABASE_PATH=/var/lib/matrix-conduit" EnvironmentFile=/etc/matrix-conduit/debian From ebb38cd33043004bf3a7d4c453e5e904406d8994 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 16 Jan 2021 14:48:24 -0700 Subject: [PATCH 05/62] improvement: respect logout_devices param on password change Move logout devices comment next to relevant loop remove unnecessary log --- src/client_server/account.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index f48543e..3d6498f 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -572,16 +572,16 @@ pub async fn change_password_route( db.users.set_password(&sender_user, &body.new_password)?; - // TODO: Read logout_devices field when it's available and respect that, currently not supported in Ruma - // See: https://github.com/ruma/ruma/issues/107 - // Logout all devices except the current one - for id in db - .users - .all_device_ids(&sender_user) - .filter_map(|id| id.ok()) - .filter(|id| id != sender_device) - { - db.users.remove_device(&sender_user, &id)?; + if body.logout_devices { + // Logout all devices except the current one + for id in db + .users + .all_device_ids(&sender_user) + .filter_map(|id| id.ok()) + .filter(|id| id != sender_device) + { + db.users.remove_device(&sender_user, &id)?; + } } db.flush().await?; From 890187e00419602a85c85f18b87867d96ec1972c Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sat, 16 Jan 2021 22:15:45 -0700 Subject: [PATCH 06/62] improvement: Handle optional device_id field during login remove debug logging --- src/client_server/session.rs | 35 ++++++++++++++++++++++++++--------- src/database/users.rs | 2 +- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index da3d8d8..173e823 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -77,7 +77,6 @@ pub async fn login_route( // Generate new device id if the user didn't specify one let device_id = body - .body .device_id .clone() .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); @@ -85,14 +84,32 @@ pub async fn login_route( // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); - // TODO: Don't always create a new device - // Add device - db.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - )?; + let mut create_new_device = true; + + // Only search db for existing device if one was provided in the request + match &body.device_id { + Some(_) => { + // Look to see if provided device_id already exists + if let Some(_) = db.users.all_device_ids(&user_id).find(|x| match x { + Ok(x) if **x == *device_id => true, + _ => false, + }) { + // Replace token for existing device + db.users.set_token(&user_id, &device_id, &token)?; + create_new_device = false; + } + } + _ => (), + }; + + if create_new_device { + db.users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; + } info!("{} logged in", user_id); diff --git a/src/database/users.rs b/src/database/users.rs index 2a03960..d6a4ecf 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -251,7 +251,7 @@ impl Users { } /// Replaces the access token of one device. - fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.to_string().as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); From 762255fa8d0ec7797ead205ac454479ff53fd860 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Sun, 17 Jan 2021 08:39:47 -0700 Subject: [PATCH 07/62] Simplify device creation logic during login --- src/client_server/session.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 173e823..48fbea2 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -84,25 +84,17 @@ pub async fn login_route( // Generate a new token for the device let token = utils::random_string(TOKEN_LENGTH); - let mut create_new_device = true; + // Determine if device_id was provided and exists in the db for this user + let device_exists = body.device_id.as_ref().map_or(false, |device_id| { + db.users + .all_device_ids(&user_id) + .find(|x| x.as_ref().map_or(false, |v| v == device_id)) + .is_some() + }); - // Only search db for existing device if one was provided in the request - match &body.device_id { - Some(_) => { - // Look to see if provided device_id already exists - if let Some(_) = db.users.all_device_ids(&user_id).find(|x| match x { - Ok(x) if **x == *device_id => true, - _ => false, - }) { - // Replace token for existing device - db.users.set_token(&user_id, &device_id, &token)?; - create_new_device = false; - } - } - _ => (), - }; - - if create_new_device { + if device_exists { + db.users.set_token(&user_id, &device_id, &token)?; + } else { db.users.create_device( &user_id, &device_id, From a1e296374f2e42e4b22789cc1736f73bf5e85a1f Mon Sep 17 00:00:00 2001 From: Valkum Date: Fri, 22 Jan 2021 20:11:19 +0100 Subject: [PATCH 08/62] Allow the complement test image to use build artifacts --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 306105a..0ef8f90 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN cargo build +RUN test -e target/release/cond_test || cargo build --release --offline FROM valkum/docker-rust-ci:latest WORKDIR /workdir From 265fab843a42d6eaef7a777104a72d101a2e91f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 09/62] feature: push rule settings --- src/client_server/push.rs | 624 +++++++++++++++++++++++++++++++++++++- src/main.rs | 5 + 2 files changed, 616 insertions(+), 13 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 05ba8d0..667d667 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,16 +1,22 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; -use log::warn; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, + r0::push::{ + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, + get_pushrules_all, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, + }, }, events::EventType, + push::{ + ConditionalPushRuleInit, ContentPushRule, OverridePushRule, PatternedPushRuleInit, + RoomPushRule, SenderPushRule, SimplePushRuleInit, UnderridePushRule, + }, }; #[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use rocket::{delete, get, post, put}; #[cfg_attr( feature = "conduit_bin", @@ -36,16 +42,201 @@ pub async fn get_pushrules_all_route( .into()) } -#[cfg_attr(feature = "conduit_bin", put( - "/_matrix/client/r0/pushrules/<_>/<_>/<_>", - //data = "" -))] +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn get_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = event.content.global; + let rule = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.clone().into()), + RuleKind::_Custom(_) => None, + }; + + if let Some(rule) = rule { + Ok(get_pushrule::Response { rule }.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + } +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] pub async fn set_pushrule_route( db: State<'_, Database>, - //body: Ruma, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + + global.override_.insert(OverridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + + global.underride.insert(UnderridePushRule( + ConditionalPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + conditions: body.conditions.clone(), + } + .into(), + )); + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + + global.sender.insert(SenderPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + + global.room.insert(RoomPushRule( + SimplePushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + } + .into(), + )); + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + + global.content.insert(ContentPushRule( + PatternedPushRuleInit { + actions: body.actions.clone(), + default: false, + enabled: true, + rule_id: body.rule_id.clone(), + pattern: body.pattern.clone().unwrap_or_default(), + } + .into(), + )); + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; @@ -54,19 +245,426 @@ pub async fn set_pushrule_route( #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled") + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn get_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let actions = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map(|rule| rule.0.actions.clone()), + RuleKind::_Custom(_) => None, + }; + + db.flush().await?; + + Ok(get_pushrule_actions::Response { + actions: actions.unwrap_or_default(), + } + .into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") +)] +pub async fn set_pushrule_actions_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.actions = body.actions.clone(); + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.actions = body.actions.clone(); + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.actions = body.actions.clone(); + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.actions = body.actions.clone(); + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.actions = body.actions.clone(); + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + }; + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(set_pushrule_actions::Response.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") +)] +pub async fn get_pushrule_enabled_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + let enabled = match body.kind { + RuleKind::Override => global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Underride => global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Sender => global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Room => global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::Content => global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .map_or(false, |rule| rule.0.enabled), + RuleKind::_Custom(_) => false, + }; + + db.flush().await?; + + Ok(get_pushrule_enabled::Response { enabled }.into()) +} + +#[cfg_attr( + feature = "conduit_bin", + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, + body: Ruma>, ) -> ConduitResult { - // TODO - warn!("TODO: set_pushrule_enabled_route"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(mut rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + rule.0.enabled = body.enabled; + global.override_.insert(rule); + } + } + RuleKind::Underride => { + if let Some(mut rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + rule.0.enabled = body.enabled; + global.underride.insert(rule); + } + } + RuleKind::Sender => { + if let Some(mut rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + rule.0.enabled = body.enabled; + global.sender.insert(rule); + } + } + RuleKind::Room => { + if let Some(mut rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + rule.0.enabled = body.enabled; + global.room.insert(rule); + } + } + RuleKind::Content => { + if let Some(mut rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + rule.0.enabled = body.enabled; + global.content.insert(rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; db.flush().await?; Ok(set_pushrule_enabled::Response.into()) } +#[cfg_attr( + feature = "conduit_bin", + delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") +)] +pub async fn delete_pushrule_route( + db: State<'_, Database>, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != "global" { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let mut event = db + .account_data + .get::(None, &sender_user, EventType::PushRules)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "PushRules event not found.", + ))?; + + let global = &mut event.content.global; + match body.kind { + RuleKind::Override => { + if let Some(rule) = global + .override_ + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.override_.remove(&rule); + } + } + RuleKind::Underride => { + if let Some(rule) = global + .underride + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.underride.remove(&rule); + } + } + RuleKind::Sender => { + if let Some(rule) = global + .sender + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.sender.remove(&rule); + } + } + RuleKind::Room => { + if let Some(rule) = global + .room + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.room.remove(&rule); + } + } + RuleKind::Content => { + if let Some(rule) = global + .content + .iter() + .find(|rule| rule.0.rule_id == body.rule_id) + .cloned() + { + global.content.remove(&rule); + } + } + RuleKind::_Custom(_) => {} + } + + db.account_data.update( + None, + &sender_user, + EventType::PushRules, + &event, + &db.globals, + )?; + + db.flush().await?; + + Ok(delete_pushrule::Response.into()) +} + #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { diff --git a/src/main.rs b/src/main.rs index 9c0eab6..93ab560 100644 --- a/src/main.rs +++ b/src/main.rs @@ -55,7 +55,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_capabilities_route, client_server::get_pushrules_all_route, client_server::set_pushrule_route, + client_server::get_pushrule_route, client_server::set_pushrule_enabled_route, + client_server::get_pushrule_enabled_route, + client_server::get_pushrule_actions_route, + client_server::set_pushrule_actions_route, + client_server::delete_pushrule_route, client_server::get_room_event_route, client_server::get_filter_route, client_server::create_filter_route, From 6de5b3c2a06545556fac530905edd976f6c01d84 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Dec 2020 13:41:00 +0100 Subject: [PATCH 10/62] Update repository link in crate metadata --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 76c52e5..4bf9247 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ description = "A Matrix homeserver written in Rust" license = "Apache-2.0" authors = ["timokoesters "] homepage = "https://conduit.rs" -repository = "https://git.koesters.xyz/timo/conduit" +repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.1.0" edition = "2018" From a61b1cef5bfa311484d84f4359262b46dd5a0a3c Mon Sep 17 00:00:00 2001 From: Valkum Date: Thu, 4 Feb 2021 23:51:20 +0100 Subject: [PATCH 11/62] Fix binary name typo --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 0ef8f90..24ee9ea 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN test -e target/release/cond_test || cargo build --release --offline +RUN test -e target/release/conduit || cargo build --release --offline FROM valkum/docker-rust-ci:latest WORKDIR /workdir From 1d7207b39e15b249ce5b704c04d44cb442228168 Mon Sep 17 00:00:00 2001 From: Valkum Date: Fri, 5 Feb 2021 02:06:14 +0100 Subject: [PATCH 12/62] Sync are-we-synapse with dendrite --- tests/sytest/are-we-synapse-yet.list | 1258 +++++++++++++------------- tests/sytest/are-we-synapse-yet.py | 38 +- 2 files changed, 666 insertions(+), 630 deletions(-) diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list index cdc280a..9909198 100644 --- a/tests/sytest/are-we-synapse-yet.list +++ b/tests/sytest/are-we-synapse-yet.list @@ -17,17 +17,17 @@ reg POST /register rejects registration of usernames with '£' reg POST /register rejects registration of usernames with 'é' reg POST /register rejects registration of usernames with '\n' reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON +reg POST /r0/admin/register with shared secret +reg POST /r0/admin/register admin with shared secret +reg POST /r0/admin/register with shared secret downcases capitals +reg POST /r0/admin/register with shared secret disallows symbols +reg POST rejects invalid utf-8 in JSON log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected +log POST /login can log in as a user +log POST /login returns the same device_id as that in the request +log POST /login can log in as a user with just the local part of the id +log POST /login as non-existing user is rejected +log POST /login wrong password is rejected log Interactive authentication types include SSO log Can perform interactive authentication with SSO log The user must be consistent through an interactive authentication session with SSO @@ -39,18 +39,18 @@ pro PUT /profile/:user_id/displayname sets my name pro GET /profile/:user_id/displayname publicly accessible pro PUT /profile/:user_id/avatar_url sets my avatar pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} +dev GET /device/{deviceId} dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields +dev GET /devices +dev PUT /device/{deviceId} updates device fields dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session +dev DELETE /device/{deviceId} +dev DELETE /device/{deviceId} requires UI auth user to match device owner +dev DELETE /device/{deviceId} with no body gives a 401 +dev The deleted device must be consistent through an interactive auth session dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence +pre GET /presence/:user_id/status fetches initial status +pre PUT /presence/:user_id/status updates my presence crm POST /createRoom makes a public room crm POST /createRoom makes a private room crm POST /createRoom makes a private room with invites @@ -62,21 +62,21 @@ crm POST /createRoom rejects attempts to create rooms with numeric versions crm POST /createRoom rejects attempts to create rooms with unknown versions crm POST /createRoom ignores attempts to set the room version via creation_content mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room +mem GET /rooms/:room_id/joined_members fetches my membership +v1s GET /rooms/:room_id/initialSync fetches initial sync state +pub GET /publicRooms lists newly-created room ali GET /directory/room/:room_alias yields room ID mem GET /joined_rooms lists newly-created room rst POST /rooms/:room_id/state/m.room.name sets name rst GET /rooms/:room_id/state/m.room.name gets name rst POST /rooms/:room_id/state/m.room.topic sets topic rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state +rst GET /rooms/:room_id/state fetches entire room state crm POST /createRoom with creation content ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases +nsp GET /rooms/:room_id/aliases lists aliases jon POST /rooms/:room_id/join can join a room jon POST /join/:room_alias can join a room jon POST /join/:room_id can join a room @@ -89,748 +89,778 @@ snd POST /rooms/:room_id/send/:event_type sends a message snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification +get GET /rooms/:room_id/messages lazy loads members correctly +typ PUT /rooms/:room_id/typing/:user_id sets typing notification +typ Typing notifications don't leak (3 subtests) rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels rst PUT /rooms/:room_id/state/m.room.power_levels can set levels rst PUT power_levels should not explode if the old power levels were empty rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts +rct POST /rooms/:room_id/receipt can create receipts red POST /rooms/:room_id/read_markers can create read marker -med POST /media/v1/upload can create an upload -med GET /media/v1/download can fetch the value again -cap GET /capabilities is present and well formed for registered user +med POST /media/r0/upload can create an upload +med GET /media/r0/download can fetch the value again +cap GET /capabilities is present and well formed for registered user cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login +reg Register with a recaptcha +reg registration is idempotent, without username specified +reg registration is idempotent, with username specified +reg registration remembers parameters +reg registration accepts non-ascii passwords +reg registration with inhibit_login inhibits login reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices +reg Can register using an email address +log Can login with 3pid and password using m.login.password +log login types include SSO +log /login/cas/redirect redirects if the old m.login.cas login type is listed +log Can login with new user via CAS +lox Can logout current device +lox Can logout all devices lox Request to logout with invalid an access token is rejected lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password +log After changing password, can't log in with old password +log After changing password, can log in with new password +log After changing password, existing session still works +log After changing password, a different session no longer works by default +log After changing password, different sessions can optionally be kept +psh Pushers created with a different access token are deleted on password change +psh Pushers created with a the same access token are not deleted on password change +acc Can deactivate account +acc Can't deactivate account with wrong password +acc After deactivating account, can't log in with password acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events +v1s initialSync sees my presence status +pre Presence change reports an event to myself +pre Friends presence changes reports events crm Room creation reports m.room.create to myself crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent +rst Setting room topic reports m.room.topic to myself +v1s Global initialSync +v1s Global initialSync with limit=0 gives no messages +v1s Room initialSync +v1s Room initialSync with limit=0 gives no messages +rst Setting state twice is idempotent +jon Joining room twice is idempotent syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync +v1s New room members see existing users' presence in room initialSync syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms +syn Existing members see new members' presence +v1s All room members see all room members' presence in global initialSync +f,jon Remote users can join room by alias +syn New room members see their own join event +v1s New room members see existing members' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new member's presence +v1s New room members see first user's profile information in global initialSync +v1s New room members see first user's profile information in per-room initialSync +f,jon Remote users may not join unfederated rooms syn Local room members see posted message events v1s Fetching eventstream a second time doesn't yield the message again syn Local non-members don't see posted message events -get Local room members can get room messages +get Local room members can get room messages f,syn Remote room members also see posted message events -f,get Remote room members can get room messages +f,get Remote room members can get room messages get Message history can be paginated f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired +eph Ephemeral messages received from clients are correctly expired ali Room aliases can contain Unicode f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases +ali Canonical alias can be set +ali Canonical alias can include alt_aliases ali Regular users can add and delete aliases in the default room configuration ali Regular users can add and delete aliases when m.room.aliases is restricted ali Deleting a non-existent alias should return a 404 ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel +ali Users with sufficient power-level can delete other's aliases +ali Can delete canonical alias +ali Alias creators can delete alias with no ops +ali Alias creators can delete canonical alias with no ops +ali Only room members can list aliases of a room +inv Can invite users to invite-only rooms +inv Uninvited users cannot join the room +inv Invited user can reject invite +f,inv Invited user can reject invite over federation +f,inv Invited user can reject invite over federation several times +inv Invited user can reject invite for empty room +f,inv Invited user can reject invite over federation for empty room +inv Invited user can reject local invite after originator leaves +inv Invited user can see room metadata +f,inv Remote invited user can see room metadata +inv Users cannot invite themselves to a room +inv Users cannot invite a user that is already in the room +ban Banned user is kicked and may not rejoin until unbanned +f,ban Remote banned user is kicked and may not rejoin until unbanned +ban 'ban' event respects room powerlevel +plv setting 'm.room.name' respects room powerlevel plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) plv Unprivileged users can set m.room.topic if it only needs level 0 plv Users cannot set ban powerlevel higher than their own (2 subtests) plv Users cannot set kick powerlevel higher than their own (2 subtests) plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) +v1s Check that event streams started after a client joined a room work (SYT-1) +v1s Event stream catches up fully after many messages +xxx POST /rooms/:room_id/redact/:event_id as power user redacts message +xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message +xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message +xxx POST /redact disallows redaction of event in different room +xxx Redaction of a redaction redacts the redaction reason +v1s A departed room is still included in /initialSync (SPEC-216) +v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) +rst Can get rooms/{roomId}/state for a departed room (SPEC-216) mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) +get Can get rooms/{roomId}/messages for a departed room (SPEC-216) +rst Can get 'm.room.name' state for a departed room (SPEC-216) syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join +3pd Can invite existing 3pid +3pd Can invite existing 3pid with no ops into a private room +3pd Can invite existing 3pid in createRoom +3pd Can invite unbound 3pid +f,3pd Can invite unbound 3pid over federation +3pd Can invite unbound 3pid with no ops into a private room +f,3pd Can invite unbound 3pid over federation with no ops into a private room +f,3pd Can invite unbound 3pid over federation with users from both servers +3pd Can accept unbound 3pid invite after inviter leaves +3pd Can accept third party invite with /join 3pd 3pid invite join with wrong but valid signature are rejected 3pd 3pid invite join valid signature but revoked keys are rejected 3pd 3pid invite join valid signature but unreachable ID server are rejected gst Guest user cannot call /events globally gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access +gst Guest users can send messages to guest_access rooms if joined +gst Guest user calling /events doesn't tightloop +gst Guest users are kicked from guest_access rooms on revocation of guest_access gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user +gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation +gst Guest user can upgrade to fully featured user gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis +pub GET /publicRooms lists rooms +pub GET /publicRooms includes avatar URLs +gst Guest users can accept invites to private rooms over federation +gst Guest users denied access over federation if guest access prohibited +mem Room members can override their displayname on a room-specific basis mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events +mem Users cannot kick users from a room they are not in +mem Users cannot kick users who have already left a room +typ Typing notification sent to local room members +f,typ Typing notifications also sent to remote room members +typ Typing can be explicitly stopped +rct Read receipts are visible to /initialSync +rct Read receipts are sent as events +rct Receipts must be m.read +pro displayname updates affect room member events +pro avatar_url updates affect room member events gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users gst Guest non-joined user cannot call /events on shared room gst Guest non-joined user cannot call /events on invited room gst Guest non-joined user cannot call /events on joined room gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room +gst Guest non-joined user can call /events on world_readable room gst Guest non-joined users can get state for world_readable rooms gst Guest non-joined users can get individual state for world_readable rooms gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms +gst Guest non-joined users can room initialSync for world_readable rooms gst Guest non-joined users can get individual state for world_readable rooms after leaving gst Guest non-joined users cannot send messages to guest_access rooms if not joined gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined +gst Guest users can sync from shared guest_access rooms if joined +gst Guest users can sync from invited guest_access rooms if joined +gst Guest users can sync from joined guest_access rooms if joined gst Guest users can sync from default guest_access rooms if joined ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users +ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users +ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users +ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users +ath m.room.history_visibility == "default" allows/forbids appropriately for Real users ath Real non-joined user cannot call /events on shared room ath Real non-joined user cannot call /events on invited room ath Real non-joined user cannot call /events on joined room ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room +ath Real non-joined user can call /events on world_readable room ath Real non-joined users can get state for world_readable rooms ath Real non-joined users can get individual state for world_readable rooms ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving +ath Real non-joined users can room initialSync for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms after leaving ath Real non-joined users cannot send messages to guest_access rooms if not joined ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined +ath Real users can sync from shared guest_access rooms if joined +ath Real users can sync from invited guest_access rooms if joined +ath Real users can sync from joined guest_access rooms if joined ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries +ath Only see history_visibility changes on boundaries f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from +fgt Forgotten room messages cannot be paginated +fgt Forgetting room does not show up in v2 /sync +fgt Can forget room you've been kicked from fgt Can't forget room you're still in -mem Can re-join room if re-invited -ath Only original members of the room can see messages from erased users +fgt Can re-join room if re-invited +ath Only original members of the room can see messages from erased users mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works +mem /joined_members return joined members +ctx /context/ on joined room works +ctx /context/ on non world readable room does not work +ctx /context/ returns correct number of events +ctx /context/ with lazy_load_members filter works get /event/ on joined room works get /event/ on non world readable room does not work get /event/ does not allow access to events before the user joined mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct +mem Can get rooms/{roomId}/members at a given point +mem Can filter rooms/{roomId}/members +upg /upgrade creates a new room +upg /upgrade should preserve room visibility for public rooms +upg /upgrade should preserve room visibility for private rooms +upg /upgrade copies >100 power levels to the new room +upg /upgrade copies the power levels to the new room +upg /upgrade preserves the power level of the upgrading user in old and new rooms +upg /upgrade copies important state to the new room +upg /upgrade copies ban events to the new room +upg local user has push rules copied to upgraded room +f,upg remote user has push rules copied to upgraded room +upg /upgrade moves aliases to the new room +upg /upgrade moves remote aliases to the new room +upg /upgrade preserves direct room state +upg /upgrade preserves room federation ability +upg /upgrade restricts power levels in the old room +upg /upgrade restricts power levels in the old room when the old PLs are unusual +upg /upgrade to an unknown version is rejected +upg /upgrade is rejected if the user can't send state events +upg /upgrade of a bogus room fails gracefully +upg Cannot send tombstone event that points to the same room +f,upg Local and remote users' homeservers remove a room from their public directory on upgrade +rst Name/topic keys are correct f,pub Can get remote public room list pub Can paginate public room list -pub Can search public room list +pub Can search public room list syn Can create filter syn Can download filter syn Can sync syn Can sync a joined room syn Full state sync includes joined rooms syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id +syn Newly joined room has correct timeline in incremental sync +syn Newly joined room includes presence in incremental sync +syn Get presence for newly joined members in incremental sync +syn Can sync a room with a single message +syn Can sync a room with a message with a transaction id syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync +syn A filtered timeline reaches its limit +syn Syncing a new room with a large timeline limit isn't limited +syn A full_state incremental update returns only recent timeline +syn A prev_batch token can be used in the v1 messages API +syn A next_batch token can be used in the v1 messages API +syn User sees their own presence in a sync syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync +syn User sees updates to presence from other users in the incremental sync. +syn State is included in the timeline in the initial sync +f,syn State from remote users is included in the state in the initial sync syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync +syn Changes to state are included in an gapped incremental sync +f,syn State from remote users is included in the timeline in an incremental sync +syn A full_state incremental update returns all state +syn When user joins a room the state is included in the next sync +syn A change to displayname should not result in a full state sync syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync +syn When user joins a room the state is included in a gapped sync +syn When user joins and leaves a room in the same batch, the full state is still included in the next sync syn Current state appears in timeline in private history syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after +syn Current state appears in timeline in private history with many messages after syn Rooms a user is invited to appear in an initial sync syn Rooms a user is invited to appear in an incremental sync syn Newly joined room is included in an incremental sync after invite syn Sync can be polled for updates syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync +syn Left rooms appear in the leave section of sync syn Newly left rooms appear in the leave section of incremental sync syn We should see our own leave event, even if history_visibility is restricted (SYN-662) syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) syn Newly left rooms appear in the leave section of gapped sync syn Previously left rooms don't appear in the leave section of sync syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync +syn Archived rooms only contain history from before the user left +syn Banned rooms appear in the leave section of sync syn Newly banned rooms appear in the leave section of incremental sync syn Newly banned rooms appear in the leave section of incremental sync syn Typing events appear in initial sync syn Typing events appear in incremental sync syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated +syn Read receipts appear in initial v2 /sync +syn New read receipts appear in incremental v2 /sync +syn Can pass a JSON filter as a query parameter +syn Can request federation format via the filter +syn Read markers appear in incremental v2 /sync +syn Read markers appear in initial v2 /sync +syn Read markers can be updated syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 +syn The only membership state included in an initial sync is for all the senders in the timeline +syn The only membership state included in an incremental sync is for senders in the timeline +syn The only membership state included in a gapped incremental sync is for senders in the timeline +syn Gapped incremental syncs include all state changes +syn Old leaves are present in gapped incremental syncs +syn Leaves are present in non-gapped incremental syncs +syn Old members are included in gappy incr LL sync if they start speaking +syn Members from the gap are included in gappy incr LL sync +syn We don't send redundant membership state across incremental syncs by default +syn We do send redundant membership state across incremental syncs if asked +syn Unnamed room comes with a name summary +syn Named room comes with just joined member count summary +syn Room summary only has 5 heroes +syn Room summary counts change when membership changes +rmv User can create and send/receive messages in a room with version 1 rmv User can create and send/receive messages in a room with version 1 (2 subtests) rmv local user can join room with version 1 rmv User can invite local user to room with version 1 rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 +rmv User can invite remote user to room with version 1 rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 +rmv Can reject invites over federation for rooms with version 1 +rmv Can receive redactions from regular users over federation in room version 1 +rmv User can create and send/receive messages in a room with version 2 rmv User can create and send/receive messages in a room with version 2 (2 subtests) rmv local user can join room with version 2 rmv User can invite local user to room with version 2 rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 +rmv User can invite remote user to room with version 2 rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 +rmv Can reject invites over federation for rooms with version 2 +rmv Can receive redactions from regular users over federation in room version 2 +rmv User can create and send/receive messages in a room with version 3 rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 +rmv local user can join room with version 3 +rmv User can invite local user to room with version 3 +rmv remote user can join room with version 3 +rmv User can invite remote user to room with version 3 +rmv Remote user can backfill in a room with version 3 +rmv Can reject invites over federation for rooms with version 3 +rmv Can receive redactions from regular users over federation in room version 3 +rmv User can create and send/receive messages in a room with version 4 rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 +rmv local user can join room with version 4 +rmv User can invite local user to room with version 4 +rmv remote user can join room with version 4 +rmv User can invite remote user to room with version 4 +rmv Remote user can backfill in a room with version 4 +rmv Can reject invites over federation for rooms with version 4 +rmv Can receive redactions from regular users over federation in room version 4 +rmv User can create and send/receive messages in a room with version 5 rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys +rmv local user can join room with version 5 +rmv User can invite local user to room with version 5 +rmv remote user can join room with version 5 +rmv User can invite remote user to room with version 5 +rmv Remote user can backfill in a room with version 5 +rmv Can reject invites over federation for rooms with version 5 +rmv Can receive redactions from regular users over federation in room version 5 +rmv User can create and send/receive messages in a room with version 6 +rmv User can create and send/receive messages in a room with version 6 (2 subtests) +rmv local user can join room with version 6 +rmv User can invite local user to room with version 6 +rmv remote user can join room with version 6 +rmv User can invite remote user to room with version 6 +rmv Remote user can backfill in a room with version 6 +rmv Can reject invites over federation for rooms with version 6 +rmv Can receive redactions from regular users over federation in room version 6 +rmv Inbound federation rejects invites which include invalid JSON for room version 6 +rmv Outbound federation rejects invite response which include invalid JSON for room version 6 +rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 +rmv Server rejects invalid JSON in a version 6 room +pre Presence changes are reported to local room members +f,pre Presence changes are also reported to remote room members +pre Presence changes to UNAVAILABLE are reported to local room members +f,pre Presence changes to UNAVAILABLE are reported to remote room members +v1s Newly created users see their own presence in /initialSync (SYT-34) +dvk Can upload device keys dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dvk Can create backup version -dvk Can update backup version -dvk Responds correctly when backup is empty -dvk Can backup keys -dvk Can update keys with better versions -dvk Will not update keys with worse versions -dvk Will not back up to an old backup version -dvk Can delete backup -dvk Deleted & recreated backups are empty -dvk Can create more than 10 backup versions -dvk Can upload self-signing keys -dvk Fails to upload self-signing keys with no auth -dvk Fails to upload self-signing key without master key -dvk Changing master key notifies local users -dvk Changing user-signing key notifies local users -f,dvk can fetch self-signing keys over federation -f,dvk uploading self-signing key notifies over federation -f,dvk uploading signed devices gets propagated over federation +dvk Can query device keys using POST +dvk Can query specific device keys using POST +dvk query for user with no keys returns empty key dict +dvk Can claim one time key using POST +f,dvk Can query remote device keys using POST +f,dvk Can claim remote one time key using POST +dvk Local device key changes appear in v2 /sync +dvk Local new device changes appear in v2 /sync +dvk Local delete device changes appear in v2 /sync +dvk Local update device changes appear in v2 /sync +dvk Can query remote device keys using POST after notification +f,dev Device deletion propagates over federation +f,dev If remote user leaves room, changes device and rejoins we see update in sync +f,dev If remote user leaves room we no longer receive device updates +dvk Local device key changes appear in /keys/changes +dvk New users appear in /keys/changes +f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes +dvk Get left notifs in sync and /keys/changes when other user leaves +dvk Get left notifs for other users in sync and /keys/changes when user leaves +f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes +dkb Can create backup version +dkb Can update backup version +dkb Responds correctly when backup is empty +dkb Can backup keys +dkb Can update keys with better versions +dkb Will not update keys with worse versions +dkb Will not back up to an old backup version +dkb Can delete backup +dkb Deleted & recreated backups are empty +dkb Can create more than 10 backup versions +xsk Can upload self-signing keys +xsk Fails to upload self-signing keys with no auth +xsk Fails to upload self-signing key without master key +xsk Changing master key notifies local users +xsk Changing user-signing key notifies local users +f,xsk can fetch self-signing keys over federation +f,xsk uploading self-signing key notifies over federation +f,xsk uploading signed devices gets propagated over federation tag Can add tag tag Can remove tag tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync +v1s Tags appear in the v1 /events stream +v1s Tags appear in the v1 /initalSync +v1s Tags appear in the v1 room initial sync tag Tags appear in an initial v2 /sync tag Newly updated tags appear in an incremental v2 /sync tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events +tag local user has tags copied to the new room +f,tag remote user has tags copied to the new room +sch Can search for an event by body +sch Can get context around search results +sch Can back-paginate search results +sch Search works across an upgraded room and its predecessor +sch Search results with rank ordering do not include redacted events +sch Search results with recent ordering do not include redacted events acc Can add account data acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync +acc Can get account data without syncing +acc Can get room account data without syncing +v1s Latest account data comes down in /initialSync +v1s Latest account data comes down in room initialSync +v1s Account data appears in v1 /events stream +v1s Room account data appears in v1 /events stream +acc Latest account data appears in v2 /sync acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -std Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -std Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync +oid Can generate a openid access_token that can be exchanged for information about a user +oid Invalid openid access tokens are rejected +oid Requests to userinfo without access tokens are rejected +std Can send a message directly to a device using PUT /sendToDevice +std Can recv a device message using /sync +std Can recv device messages until they are acknowledged +std Device messages with the same txn_id are deduplicated +std Device messages wake up /sync +std Can recv device messages over federation +fsd Device messages over federation wake up /sync +std Can send messages with a wildcard device id +std Can send messages with a wildcard device id to two devices +std Wildcard device messages wake up /sync +fsd Wildcard device messages over federation wake up /sync +adm /whois +nsp /purge_history +nsp /purge_history by ts +nsp Can backfill purged history +nsp Shutdown room +ign Ignore user in existing room +ign Ignore invite in full sync +ign Ignore invite in incremental sync fky Checking local federation server fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected +fky Federation key API can act as a notary server via a GET request +fky Federation key API can act as a notary server via a POST request +fky Key notary server should return an expired key if it can't find any others +fky Key notary server must not overwrite a valid key with a spurious result from the origin server +fqu Non-numeric ports in server names are rejected fqu Outbound federation can query profile data fqu Inbound federation can query profile data fqu Outbound federation can query room alias directory fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join +fsj Outbound federation can query v1 /send_join fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join +fmj Outbound federation passes make_join failures through to the client +fsj Inbound federation can receive v1 /send_join fsj Inbound federation can receive v2 /send_join fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers +fsj Inbound /v1/send_join rejects incorrectly-signed joins +fsj Inbound /v1/send_join rejects joins from other servers fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support +frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support +frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail +frv Outbound federation correctly handles unsupported room versions +frv A pair of servers can establish a join in a v2 room +fsj Outbound federation rejects send_join responses with no m.room.create event +frv Outbound federation rejects m.room.create events with an unknown room version +fsj Event with an invalid signature in the send_join response should not cause room join to fail +fsj Inbound: send_join rejects invalid JSON for room version 6 fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room +fed Inbound federation can receive events +fed Inbound federation can receive redacted events +fed Ephemeral messages received from servers are correctly expired +fed Events whose auth_events are in the wrong room do not mess up the room state +fed Inbound federation can return events +fed Inbound federation redacts events from erased users +fme Outbound federation can request missing events +fme Inbound federation can return missing events for world_readable visibility +fme Inbound federation can return missing events for shared visibility +fme Inbound federation can return missing events for invite visibility +fme Inbound federation can return missing events for joined visibility +fme outliers whose auth_events are in a different room are correctly rejected +fbk Outbound federation can backfill events +fbk Inbound federation can backfill events +fbk Backfill checks the events requested belong to the room +fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +fiv Outbound federation can send invites via v1 API +fiv Outbound federation can send invites via v2 API +fiv Inbound federation can receive invites via v1 API +fiv Inbound federation can receive invites via v2 API +fiv Inbound federation can receive invite and reject when remote replies with a 403 +fiv Inbound federation can receive invite and reject when remote replies with a 500 +fiv Inbound federation can receive invite and reject when remote is unreachable +fiv Inbound federation rejects invites which are not signed by the sender +fiv Inbound federation can receive invite rejections +fiv Inbound federation rejects incorrectly-signed invite rejections +fsl Inbound /v1/send_leave rejects leaves from other servers +fst Inbound federation can get state for a room fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room +fst Inbound federation can get state_ids for a room fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name +fst Federation rejects inbound events where the prev_events cannot be found +fst Room state at a rejected message event is the same as its predecessor +fst Room state at a rejected state event is the same as its predecessor +fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state +fst Federation handles empty auth_events in state_ids sanely +fst Getting state checks the events requested belong to the room +fst Getting state IDs checks the events requested belong to the room +fst Should not be able to take over the room by pretending there is no PL event +fpb Inbound federation can get public room list +fed Outbound federation sends receipts +fed Inbound federation rejects receipts from wrong remote +fed Inbound federation ignores redactions from invalid servers room > v3 +fed An event which redacts an event in a different room should be ignored +fed An event which redacts itself should be ignored +fed A pair of events which redact each other should be ignored +fdk Local device key changes get to remote servers +fdk Server correctly handles incoming m.device_list_update +fdk Server correctly resyncs when client query keys and there is no remote cache +fdk Server correctly resyncs when server leaves and rejoins a room +fdk Local device key changes get to remote servers with correct prev_id +fdk Device list doesn't change if remote server is down +fdk If a device list update goes missing, the server resyncs on the next one +fst Name/topic keys are correct +fau Remote servers cannot set power levels in rooms without existing powerlevels +fau Remote servers should reject attempts by non-creators to set the power levels +fau Inbound federation rejects typing notifications from wrong remote +fau Users cannot set notifications powerlevel higher than their own +fed Forward extremities remain so even after the next events are populated as outliers +fau Banned servers cannot send events +fau Banned servers cannot /make_join +fau Banned servers cannot /send_join +fau Banned servers cannot /make_leave +fau Banned servers cannot /send_leave +fau Banned servers cannot /invite +fau Banned servers cannot get room state +fau Banned servers cannot get room state ids +fau Banned servers cannot backfill +fau Banned servers cannot /event_auth +fau Banned servers cannot get missing events +fau Server correctly handles transactions that break edu limits +fau Inbound federation correctly soft fails events +fau Inbound federation accepts a second soft-failed event +fau Inbound federation correctly handles soft failed events as extremities +med Can upload with Unicode file name +med Can download with Unicode file name locally +f,med Can download with Unicode file name over federation +med Alternative server names do not cause a routing loop +med Can download specifying a different Unicode file name med Can upload without a file name med Can download without a file name locally -f,med Can download without a file name over federation +f,med Can download without a file name over federation med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name +med Can download file 'ascii' +med Can download file 'name with spaces' +med Can download file 'name;with;semicolons' +med Can download specifying a different ASCII file name med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login +med Can fetch images in room +med POSTed media can be thumbnailed +f,med Remote media can be thumbnailed +med Test URL preview +med Can read configuration endpoint +nsp Can quarantine media in rooms +udr User appears in user directory +udr User in private room doesn't appear in user directory +udr User joining then leaving public room appears and dissappears from directory +udr Users appear/disappear from directory when join_rules are changed +udr Users appear/disappear from directory when history_visibility are changed +udr Users stay in directory when join_rules are changed but history_visibility is world_readable +f,udr User in remote room doesn't appear in user directory after server left room +udr User directory correctly update on display name change +udr User in shared private room does appear in user directory +udr User in shared private room does appear in user directory until leave +udr User in dir while user still shares private rooms +nsp Create group +nsp Add group rooms +nsp Remove group rooms +nsp Get local group profile +nsp Get local group users +nsp Add/remove local group rooms +nsp Get local group summary +nsp Get remote group profile +nsp Get remote group users +nsp Add/remove remote group rooms +nsp Get remote group summary +nsp Add local group users +nsp Remove self from local group +nsp Remove other from local group +nsp Add remote group users +nsp Remove self from remote group +nsp Listing invited users of a remote group when not a member returns a 403 +nsp Add group category +nsp Remove group category +nsp Get group categories +nsp Add group role +nsp Remove group role +nsp Get group roles +nsp Add room to group summary +nsp Adding room to group summary keeps room_id when fetching rooms in group +nsp Adding multiple rooms to group summary have correct order +nsp Remove room from group summary +nsp Add room to group summary with category +nsp Remove room from group summary with category +nsp Add user to group summary +nsp Adding multiple users to group summary have correct order +nsp Remove user from group summary +nsp Add user to group summary with role +nsp Remove user from group summary with role +nsp Local group invites come down sync +nsp Group creator sees group in sync +nsp Group creator sees group in initial sync +nsp Get/set local group publicity +nsp Bulk get group publicity +nsp Joinability comes down summary +nsp Set group joinable and join it +nsp Group is not joinable by default +nsp Group is joinable over federation +nsp Room is transitioned on local and remote groups upon room upgrade +3pd Can bind 3PID via home server +3pd Can bind and unbind 3PID via homeserver +3pd Can unbind 3PID via homeserver when bound out of band +3pd 3PIDs are unbound after account deactivation +3pd Can bind and unbind 3PID via /unbind by specifying the identity server +3pd Can bind and unbind 3PID via /unbind without specifying the identity server +app AS can create a user +app AS can create a user with an underscore +app AS can create a user with inhibit_login app AS cannot create users outside its own namespace app Regular users cannot register within the AS namespace -app AS can make room aliases +app AS can make room aliases app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users +app AS-ghosted users can use rooms via AS +app AS-ghosted users can use rooms themselves +app Ghost user must register before joining room +app AS can set avatar for ghosted users +app AS can set displayname for ghosted users app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering +app Inviting an AS-hosted user asks the AS server +app Accesing an AS-hosted room alias asks the AS server +app Events in rooms with AS-hosted room aliases are sent to AS server +app AS user (not ghost) can join room without registering app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules +app HS provides query metadata +app HS can provide query metadata on a single protocol +app HS will proxy request for 3PU mapping +app HS will proxy request for 3PL mapping +app AS can publish rooms in their own list +app AS and main public room lists are separate +app AS can deactivate a user +psh Test that a message is pushed +psh Invites are pushed +psh Rooms with names are correctly named in pushed +psh Rooms with canonical alias are correctly named in pushed +psh Rooms with many users are correctly pushed +psh Don't get pushed for rooms you've muted +psh Rejected events are not pushed +psh Can add global push rule for room +psh Can add global push rule for sender +psh Can add global push rule for content +psh Can add global push rule for override +psh Can add global push rule for underride +psh Can add global push rule for content +psh New rules appear before old rules by default +psh Can add global push rule before an existing rule +psh Can add global push rule after an existing rule +psh Can delete a push rule +psh Can disable a push rule +psh Adding the same push rule twice is idempotent +psh Messages that notify from another user increment unread notification count +psh Messages that highlight from another user increment unread highlight count +psh Can change the actions of default rules psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule +psh Can change the actions of a user specified rule psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers +psh Can fetch a user's pushers psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules +psh Adding a push rule wakes up an incremental /sync +psh Disabling a push rule wakes up an incremental /sync +psh Enabling a push rule wakes up an incremental /sync +psh Setting actions for a push rule wakes up an incremental /sync +psh Can enable/disable default rules psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 +psh Test that rejected pushers are removed. +psh Notifications can be viewed with GET /notifications +psh Trying to add push rule with no scope fails with 400 +psh Trying to add push rule with invalid scope fails with 400 +psh Trying to add push rule with missing template fails with 400 +psh Trying to add push rule with missing rule_id fails with 400 +psh Trying to add push rule with empty rule_id fails with 400 +psh Trying to add push rule with invalid template fails with 400 +psh Trying to add push rule with rule_id with slashes fails with 400 +psh Trying to add push rule with override rule without conditions fails with 400 +psh Trying to add push rule with underride rule without conditions fails with 400 +psh Trying to add push rule with condition without kind fails with 400 +psh Trying to add push rule with content rule without pattern fails with 400 +psh Trying to add push rule with no actions fails with 400 +psh Trying to add push rule with invalid action fails with 400 +psh Trying to add push rule with invalid attr fails with 400 +psh Trying to add push rule with invalid value for enabled fails with 400 +psh Trying to get push rules with no trailing slash fails with 400 +psh Trying to get push rules with scope without trailing slash fails with 400 +psh Trying to get push rules with template without tailing slash fails with 400 +psh Trying to get push rules with unknown scope fails with 400 +psh Trying to get push rules with unknown template fails with 400 +psh Trying to get push rules with unknown attribute fails with 400 psh Trying to get push rules with unknown rule_id fails with 404 -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) +psh Rooms with names are correctly named in pushes +v1s GET /initialSync with non-numeric 'limit' +v1s GET /events with non-numeric 'limit' +v1s GET /events with negative 'limit' +v1s GET /events with non-numeric 'timeout' +ath Event size limits +syn Check creating invalid filters returns 4xx +f,pre New federated private chats get full presence information (SYN-115) +pre Left room members do not cause problems for presence +crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) +typ Typing notifications don't leak +ban Non-present room members cannot ban others +psh Getting push rules doesn't corrupt the cache SYN-390 +inv Test that we can be reinvited to a room we created +syn Multiple calls to /sync should not cause 500 errors +gst Guest user can call /events on another world_readable room (SYN-606) +gst Real user can call /events on another world_readable room (SYN-606) gst Events come down the correct room pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list std Can send a to-device message to two users which both receive it using /sync +fme Outbound federation will ignore a missing event with bad JSON for room version 6 +fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 +jso Invalid JSON integers +jso Invalid JSON floats +jso Invalid JSON special values +inv Can invite users to invite-only rooms (2 subtests) +plv setting 'm.room.name' respects room powerlevel (2 subtests) +psh Messages that notify from another user increment notification_count +psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count +dvk Can claim one time key using POST (2 subtests) +fdk Can query remote device keys using POST (1 subtests) +fdk Can claim remote one time key using POST (2 subtests) +fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py index 0b334ba..3d21fa4 100755 --- a/tests/sytest/are-we-synapse-yet.py +++ b/tests/sytest/are-we-synapse-yet.py @@ -11,7 +11,7 @@ import sys # The main complexity is grouping tests sensibly into features like 'Registration' # and 'Federation'. Then it just checks the ones which are passing and calculates # percentages for each group. Produces results like: -# +# # Client-Server APIs: 29% (196/666 tests) # ------------------- # Registration : 62% (20/32 tests) @@ -28,11 +28,13 @@ import sys # ✓ POST /register can create a user # ✓ POST /register downcases capitals in usernames # ... -# +# # You can also tack `-v` on to see exactly which tests each category falls under. test_mappings = { "nsp": "Non-Spec API", + "unk": "Unknown API (no group specified)", + "app": "Application Services API", "f": "Federation", # flag to mark test involves federation "federation_apis": { @@ -50,6 +52,7 @@ test_mappings = { "fpb": "Public Room API", "fdk": "Device Key APIs", "fed": "Federation API", + "fsd": "Send-to-Device APIs", }, "client_apis": { @@ -61,6 +64,8 @@ test_mappings = { "pro": "Profile", "dev": "Devices", "dvk": "Device Keys", + "dkb": "Device Key Backup", + "xsk": "Cross-signing Keys", "pre": "Presence", "crm": "Create Room", "syn": "Sync API", @@ -98,7 +103,7 @@ test_mappings = { "adm": "Server Admin API", "ign": "Ignore Users", "udr": "User Directory APIs", - "app": "Application Services API", + "jso": "Enforced canonical JSON", }, } @@ -156,20 +161,22 @@ def print_stats(header_name, gid_to_tests, gid_to_name, verbose): total_tests = 0 for gid, tests in gid_to_tests.items(): group_total = len(tests) + if group_total == 0: + continue group_passing = 0 test_names_and_marks = [] for name, passing in tests.items(): if passing: group_passing += 1 test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - + total_tests += group_total total_passing += group_passing pct = "{0:.0f}%".format(group_passing/group_total * 100) line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) subsections.append(line) subsection_test_names[line] = test_names_and_marks - + pct = "{0:.0f}%".format(total_passing/total_tests * 100) print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) print("-" * (len(header_name)+1)) @@ -186,7 +193,6 @@ def main(results_tap_path, verbose): test_name_to_group_id = {} fed_tests = set() client_tests = set() - groupless_tests = set() with open("./are-we-synapse-yet.list", "r") as f: for line in f.readlines(): test_name = " ".join(line.split(" ")[1:]).strip() @@ -212,8 +218,12 @@ def main(results_tap_path, verbose): # test_name: OK # } }, + "appservice": { + "app": {}, + }, "nonspec": { - "nsp": {} + "nsp": {}, + "unk": {} }, } with open(results_tap_path, "r") as f: @@ -224,10 +234,11 @@ def main(results_tap_path, verbose): name = test_result["name"] group_id = test_name_to_group_id.get(name) if not group_id: - groupless_tests.add(name) - # raise Exception("The test '%s' doesn't have a group" % (name,)) + summary["nonspec"]["unk"][name] = test_result["ok"] if group_id == "nsp": summary["nonspec"]["nsp"][name] = test_result["ok"] + elif group_id == "app": + summary["appservice"]["app"][name] = test_result["ok"] elif group_id in test_mappings["federation_apis"]: group = summary["federation"].get(group_id, {}) group[name] = test_result["ok"] @@ -243,12 +254,7 @@ def main(results_tap_path, verbose): print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - if verbose: - print("The following tests don't have a group:") - for name in groupless_tests: - print(" %s" % (name,)) - else: - print("%d tests don't have a group" % len(groupless_tests)) + print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) @@ -257,4 +263,4 @@ if __name__ == '__main__': parser.add_argument("tap_file", help="path to results.tap") parser.add_argument("-v", action="store_true", help="show individual test names in output") args = parser.parse_args() - main(args.tap_file, args.v) + main(args.tap_file, args.v) \ No newline at end of file From 6924dfc8ea56d8e8347b78364480ea2fce5a5905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 6 Feb 2021 15:27:43 +0100 Subject: [PATCH 13/62] improvement: better appservice compatibility and optimizations --- src/appservice_server.rs | 2 +- src/client_server/state.rs | 2 +- src/client_server/sync.rs | 404 ++++++++++++++++++++----------------- src/database.rs | 7 +- src/database/rooms.rs | 4 + src/database/sending.rs | 3 +- src/main.rs | 32 ++- src/ruma_wrapper.rs | 19 +- src/server_server.rs | 4 +- 9 files changed, 270 insertions(+), 207 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..ec504b5 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; -use log::warn; +use log::{info, warn}; use ruma::api::OutgoingRequest; use std::{ convert::{TryFrom, TryInto}, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index cecb79d..faa415d 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -234,7 +234,7 @@ pub async fn get_state_events_for_empty_key_route( .1; Ok(get_state_events_for_empty_key::Response { - content: serde_json::value::to_raw_value(&event) + content: serde_json::value::to_raw_value(&event.content) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } .into()) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 3136116..494c773 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -95,15 +95,7 @@ pub async fn sync_events_route( // Database queries: - let current_state = db.rooms.room_state_full(&room_id)?; - let current_members = current_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .map(|(key, value)| (&key.1, value)) // Only keep state key - .collect::>(); - let encrypted_room = current_state - .get(&(EventType::RoomEncryption, "".to_owned())) - .is_some(); + let current_state_hash = db.rooms.current_state_hash(&room_id)?; // These type is Option>. The outer Option is None when there is no event between // since and the current room state, meaning there should be no updates. @@ -115,69 +107,85 @@ pub async fn sync_events_route( .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); - let since_state = since_state_hash.as_ref().map(|state_hash| { - state_hash - .as_ref() - .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) - }); + let ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) = if since_state_hash != None && Some(¤t_state_hash) != since_state_hash.as_ref() { + let current_state = db.rooms.room_state_full(&room_id)?; + let current_members = current_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .map(|(key, value)| (&key.1, value)) // Only keep state key + .collect::>(); + let encrypted_room = current_state + .get(&(EventType::RoomEncryption, "".to_owned())) + .is_some(); + let since_state = since_state_hash.as_ref().map(|state_hash| { + state_hash + .as_ref() + .and_then(|state_hash| db.rooms.state_full(&room_id, &state_hash).ok()) + }); - let since_encryption = since_state.as_ref().map(|state| { - state - .as_ref() - .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) - }); - - // Calculations: - let new_encrypted_room = - encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); - - let send_member_count = since_state.as_ref().map_or(false, |since_state| { - since_state.as_ref().map_or(true, |since_state| { - current_members.len() - != since_state - .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .count() - }) - }); - - let since_sender_member = since_state.as_ref().map(|since_state| { - since_state.as_ref().and_then(|state| { + let since_encryption = since_state.as_ref().map(|state| { state - .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) - .and_then(|pdu| { - serde_json::from_value::< + .as_ref() + .map(|state| state.get(&(EventType::RoomEncryption, "".to_owned()))) + }); + + // Calculations: + let new_encrypted_room = + encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); + + let send_member_count = since_state.as_ref().map_or(false, |since_state| { + since_state.as_ref().map_or(true, |since_state| { + current_members.len() + != since_state + .iter() + .filter(|(key, _)| key.0 == EventType::RoomMember) + .count() + }) + }); + + let since_sender_member = since_state.as_ref().map(|since_state| { + since_state.as_ref().and_then(|state| { + state + .get(&(EventType::RoomMember, sender_user.as_str().to_owned())) + .and_then(|pdu| { + serde_json::from_value::< Raw, >(pdu.content.clone()) .expect("Raw::from_value always works") .deserialize() .map_err(|_| Error::bad_database("Invalid PDU in database.")) .ok() - }) - }) - }); + }) + }) + }); - if encrypted_room { - for (user_id, current_member) in current_members { - let current_membership = serde_json::from_value::< - Raw, - >(current_member.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; + if encrypted_room { + for (user_id, current_member) in current_members { + let current_membership = serde_json::from_value::< + Raw, + >(current_member.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; - let since_membership = - since_state - .as_ref() - .map_or(MembershipState::Join, |since_state| { - since_state - .as_ref() - .and_then(|since_state| { - since_state - .get(&(EventType::RoomMember, user_id.clone())) - .and_then(|since_member| { - serde_json::from_value::< + let since_membership = + since_state + .as_ref() + .map_or(MembershipState::Join, |since_state| { + since_state + .as_ref() + .and_then(|since_state| { + since_state + .get(&(EventType::RoomMember, user_id.clone())) + .and_then(|since_member| { + serde_json::from_value::< Raw, >( since_member.content.clone() @@ -188,50 +196,158 @@ pub async fn sync_events_route( Error::bad_database("Invalid PDU in database.") }) .ok() - }) - }) - .map_or(MembershipState::Leave, |member| member.membership) - }); + }) + }) + .map_or(MembershipState::Leave, |member| member.membership) + }); - let user_id = UserId::try_from(user_id.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + let user_id = UserId::try_from(user_id.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - match (since_membership, current_membership) { - (MembershipState::Leave, MembershipState::Join) => { - // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { - device_list_updates.insert(user_id); + match (since_membership, current_membership) { + (MembershipState::Leave, MembershipState::Join) => { + // A new user joined an encrypted room + if !share_encrypted_room(&db, &sender_user, &user_id, &room_id) { + device_list_updates.insert(user_id); + } } + (MembershipState::Join, MembershipState::Leave) => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} } - (MembershipState::Join, MembershipState::Leave) => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} } } - } - let joined_since_last_sync = since_sender_member.map_or(false, |member| { - member.map_or(true, |member| member.membership != MembershipState::Join) - }); + let joined_since_last_sync = since_sender_member.map_or(false, |member| { + member.map_or(true, |member| member.membership != MembershipState::Join) + }); - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + db.rooms + .room_members(&room_id) + .filter_map(|user_id| Some(user_id.ok()?)) + .filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&db, sender_user, user_id, &room_id) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + let joined_member_count = db.rooms.room_members(&room_id).count(); + let invited_member_count = db.rooms.room_members_invited(&room_id).count(); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in db + .rooms + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .map(|(_, pdu)| { + let content = serde_json::from_value::< + Raw, + >(pdu.content.clone()) + .expect("Raw::from_value always works") + .deserialize() + .map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = + UserId::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (db.rooms.is_joined(&user_id, &room_id)? + || db.rooms.is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + .filter_map(|u| u.ok()) // Filter out buggy users + // Filter for possible heroes + .filter_map(|u| u) + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + ( + Some(joined_member_count), + Some(invited_member_count), + heroes, + ) + } else { + (None, None, Vec::new()) + }; + + let state_events = if joined_since_last_sync { db.rooms - .room_members(&room_id) - .filter_map(|user_id| Some(user_id.ok()?)) - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, sender_user, user_id, &room_id) - }), - ); - } + .room_state_full(&room_id)? + .into_iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect() + } else { + match since_state { + None => Vec::new(), + Some(Some(since_state)) => current_state + .iter() + .filter(|(key, value)| { + since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) + }) + .filter(|(_, value)| { + !timeline_pdus.iter().any(|(_, timeline_pdu)| { + timeline_pdu.kind == value.kind + && timeline_pdu.state_key == value.state_key + }) + }) + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + Some(None) => current_state + .iter() + .map(|(_, pdu)| pdu.to_sync_state_event()) + .collect(), + } + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + } else { + (Vec::new(), None, None, false, Vec::new()) + }; // Look for device list updates in this room device_list_updates.extend( @@ -240,71 +356,6 @@ pub async fn sync_events_route( .filter_map(|r| r.ok()), ); - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - let joined_member_count = db.rooms.room_members(&room_id).count(); - let invited_member_count = db.rooms.room_members_invited(&room_id).count(); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in db - .rooms - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) - .map(|(_, pdu)| { - let content = serde_json::from_value::< - Raw, - >(pdu.content.clone()) - .expect("Raw::from_value always works") - .deserialize() - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - .filter_map(|u| u.ok()) // Filter out buggy users - // Filter for possible heroes - .filter_map(|u| u) - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - ( - Some(joined_member_count), - Some(invited_member_count), - heroes, - ) - } else { - (None, None, Vec::new()) - }; - let notification_count = if send_notification_counts { if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_user)? { Some( @@ -385,34 +436,7 @@ pub async fn sync_events_route( events: room_events, }, state: sync_events::State { - events: if joined_since_last_sync { - db.rooms - .room_state_full(&room_id)? - .into_iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect() - } else { - match since_state { - None => Vec::new(), - Some(Some(since_state)) => current_state - .iter() - .filter(|(key, value)| { - since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) - }) - .filter(|(_, value)| { - !timeline_pdus.iter().any(|(_, timeline_pdu)| { - timeline_pdu.kind == value.kind - && timeline_pdu.state_key == value.state_key - }) - }) - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - Some(None) => current_state - .iter() - .map(|(_, pdu)| pdu.to_sync_state_event()) - .collect(), - } - }, + events: state_events, }, ephemeral: sync_events::Ephemeral { events: edus }, }; diff --git a/src/database.rs b/src/database.rs index 607e1be..afcd58f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -27,7 +27,7 @@ pub struct Config { server_name: Box, database_path: String, #[serde(default = "default_cache_capacity")] - cache_capacity: u64, + cache_capacity: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -48,7 +48,7 @@ fn true_fn() -> bool { true } -fn default_cache_capacity() -> u64 { +fn default_cache_capacity() -> u32 { 1024 * 1024 * 1024 } @@ -93,8 +93,7 @@ impl Database { pub async fn load_or_create(config: Config) -> Result { let db = sled::Config::default() .path(&config.database_path) - .cache_capacity(config.cache_capacity) - .print_profile_on_drop(false) + .cache_capacity(config.cache_capacity as u64) .open()?; info!("Opened sled database at {}", config.database_path); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4081944..b35d006 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1010,6 +1010,10 @@ impl Rooms { .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || self + .room_members(&room_id) + .filter_map(|r| r.ok()) + .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) { sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } diff --git a/src/database/sending.rs b/src/database/sending.rs index 74aad32..0a66f73 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::info; +use log::{error, info}; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, @@ -131,6 +131,7 @@ impl Sending { }; prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); + last_failed_try.insert(server.clone(), match last_failed_try.get(&server) { Some(last_failed) => { (last_failed.0+1, Instant::now()) diff --git a/src/main.rs b/src/main.rs index 93ab560..65434a5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -15,6 +15,7 @@ pub use database::Database; pub use error::{ConduitLogger, Error, Result}; pub use pdu::PduEvent; pub use rocket::State; +use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; use log::LevelFilter; @@ -154,7 +155,13 @@ fn setup_rocket() -> rocket::Rocket { server_server::get_profile_information_route, ], ) - .register(catchers![not_found_catcher]) + .register(catchers![ + not_found_catcher, + forbidden_catcher, + unknown_token_catcher, + missing_token_catcher, + bad_json_catcher + ]) .attach(AdHoc::on_attach("Config", |rocket| async { let config = rocket .figment() @@ -186,3 +193,26 @@ async fn main() { fn not_found_catcher(_req: &'_ Request<'_>) -> String { "404 Not Found".to_owned() } + +#[catch(580)] +fn forbidden_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::Forbidden, "Forbidden.")) +} + +#[catch(581)] +fn unknown_token_catcher() -> Result<()> { + Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown token.", + )) +} + +#[catch(582)] +fn missing_token_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::MissingToken, "Missing token.")) +} + +#[catch(583)] +fn bad_json_catcher() -> Result<()> { + Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) +} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 0fdca74..45fcc7f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -45,7 +45,7 @@ where http::request::Request>, >>::Error: std::fmt::Debug, { - type Error = (); // TODO: Better error handling + type Error = (); type Owned = Data; type Borrowed = Self::Owned; @@ -82,7 +82,9 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) + .map_or(false, |as_token| { + dbg!(token.as_deref()) == dbg!(Some(as_token)) + }) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { @@ -105,7 +107,8 @@ where ); if !db.users.exists(&user_id).unwrap() { - return Failure((Status::Unauthorized, ())); + // Forbidden + return Failure((Status::raw(580), ())); } // TODO: Check if appservice is allowed to be that user @@ -119,15 +122,15 @@ where AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { if let Some(token) = token { match db.users.find_from_token(&token).unwrap() { - // TODO: M_UNKNOWN_TOKEN - None => return Failure((Status::Unauthorized, ())), + // Unknown Token + None => return Failure((Status::raw(581), ())), Some((user_id, device_id)) => { (Some(user_id), Some(device_id.into()), false) } } } else { - // TODO: M_MISSING_TOKEN - return Failure((Status::Unauthorized, ())); + // Missing Token + return Failure((Status::raw(582), ())); } } AuthScheme::ServerSignatures => (None, None, false), @@ -163,7 +166,7 @@ where }), Err(e) => { warn!("{:?}", e); - Failure((Status::BadRequest, ())) + Failure((Status::raw(583), ())) } } }) diff --git a/src/server_server.rs b/src/server_server.rs index 7ff9e3f..3fea4da 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -557,7 +557,7 @@ pub async fn send_transaction_message_route<'a>( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let next_room_state = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( &pdu, @@ -569,6 +569,8 @@ pub async fn send_transaction_message_route<'a>( &db.admin, )?; + db.rooms.set_room_state(&room_id, &next_room_state)?; + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } From ea1e4625d1987183f0bb669cb20bbabd08c71d26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Feb 2021 13:20:00 +0100 Subject: [PATCH 14/62] fix: default config options --- conduit-example.toml | 6 +++--- src/database.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 70d3ce4..b82da2c 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,12 +23,12 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#registration_disabled = false +#allow_registration = true # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#encryption_disabled = false -#federation_disabled = false +#allow_encryption = true +#allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time diff --git a/src/database.rs b/src/database.rs index afcd58f..9fce293 100644 --- a/src/database.rs +++ b/src/database.rs @@ -32,7 +32,7 @@ pub struct Config { max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] max_concurrent_requests: u16, - #[serde(default)] + #[serde(default = "true_fn")] allow_registration: bool, #[serde(default = "true_fn")] allow_encryption: bool, From d49911c5e01ca1e1a6d14533bcf6ae47a146fe49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Feb 2021 17:38:45 +0100 Subject: [PATCH 15/62] Add 'm.login.token' authentication --- Cargo.lock | 86 ++++++++++++++++++++++++++++++++++-- Cargo.toml | 2 + src/appservice_server.rs | 2 +- src/client_server/session.rs | 73 +++++++++++++++++++++--------- src/database.rs | 1 + src/database/globals.rs | 13 +++++- src/database/sending.rs | 2 +- 7 files changed, 150 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5be6aa..78ff405 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,6 +182,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time 0.1.44", + "winapi 0.3.9", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -197,6 +210,7 @@ dependencies = [ "http", "image", "js_int", + "jsonwebtoken", "log", "rand", "regex", @@ -243,7 +257,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time", + "time 0.2.23", "version_check", ] @@ -578,7 +592,7 @@ checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if 0.1.10", "libc", - "wasi", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] @@ -850,6 +864,20 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -1044,6 +1072,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -1204,6 +1243,17 @@ dependencies = [ "syn", ] +[[package]] +name = "pem" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +dependencies = [ + "base64 0.13.0", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "2.1.0" @@ -1538,7 +1588,7 @@ dependencies = [ "rocket_http", "serde", "state", - "time", + "time 0.2.23", "tokio", "ubyte", "version_check", @@ -1575,7 +1625,7 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time", + "time 0.2.23", "tokio", "tokio-rustls", "uncased", @@ -1969,6 +2019,17 @@ dependencies = [ "libc", ] +[[package]] +name = "simple_asn1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +dependencies = [ + "chrono", + "num-bigint", + "num-traits", +] + [[package]] name = "slab" version = "0.4.2" @@ -2168,6 +2229,17 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "time" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi 0.3.9", +] + [[package]] name = "time" version = "0.2.23" @@ -2498,6 +2570,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" version = "0.2.69" diff --git a/Cargo.toml b/Cargo.toml index 56a04e5..f7fbdc5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,8 @@ ring = "0.16.19" trust-dns-resolver = "0.19.6" # Used to find matching events for appservices regex = "1.4.2" +# jwt jsonwebtokens +jsonwebtoken = "7.2.0" [features] default = ["conduit_bin"] diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ec504b5..986909b 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use http::header::{HeaderValue, CONTENT_TYPE}; -use log::{info, warn}; +use log::warn; use ruma::api::OutgoingRequest; use std::{ convert::{TryFrom, TryInto}, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 48fbea2..1b2583c 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -8,6 +8,13 @@ use ruma::{ }, UserId, }; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +struct Claims { + sub: String, + exp: usize, +} #[cfg(feature = "conduit_bin")] use rocket::{get, post}; @@ -40,40 +47,62 @@ pub async fn login_route( body: Ruma>, ) -> ConduitResult { // Validate login method - let user_id = - // TODO: Other login methods - if let (login::IncomingUserInfo::MatrixId(username), login::IncomingLoginInfo::Password { password }) = - (&body.user, &body.login_info) - { - let user_id = UserId::parse_with_server_name(username.to_string(), db.globals.server_name()) - .map_err(|_| Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid." - ))?; - let hash = db.users.password_hash(&user_id)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password." - ))?; + // TODO: Other login methods + let user_id = match &body.login_info { + login::IncomingLoginInfo::Password { password } => { + let username = if let login::IncomingUserInfo::MatrixId(matrix_id) = &body.user { + matrix_id + } else { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); + }; + let user_id = + UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + ))?; if hash.is_empty() { return Err(Error::BadRequest( ErrorKind::UserDeactivated, - "The user has been deactivated" + "The user has been deactivated", )); } - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); if !hash_matches { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Wrong username or password.")); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + )); } user_id - } else { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - }; + } + login::IncomingLoginInfo::Token { token } => { + if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { + let token = jsonwebtoken::decode::( + &token, + &jwt_decoding_key, + &jsonwebtoken::Validation::default(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; + let username = token.claims.sub; + UserId::parse_with_server_name(username, db.globals.server_name()).map_err( + |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), + )? + } else { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Token login is not supported (server has no jwt decoding key).", + )); + } + } + }; // Generate new device id if the user didn't specify one let device_id = body diff --git a/src/database.rs b/src/database.rs index 9fce293..8fcffd9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -38,6 +38,7 @@ pub struct Config { allow_encryption: bool, #[serde(default = "false_fn")] allow_federation: bool, + jwt_secret: Option, } fn false_fn() -> bool { diff --git a/src/database/globals.rs b/src/database/globals.rs index 3e24d82..ccd6284 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -11,12 +11,13 @@ pub const COUNTER: &str = "c"; #[derive(Clone)] pub struct Globals { + pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, reqwest_client: reqwest::Client, - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host dns_resolver: TokioAsyncResolver, + jwt_decoding_key: Option>, } impl Globals { @@ -62,6 +63,11 @@ impl Globals { .build() .unwrap(); + let jwt_decoding_key = config + .jwt_secret + .as_ref() + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + Ok(Self { globals, config, @@ -73,6 +79,7 @@ impl Globals { Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), + jwt_decoding_key, }) } @@ -126,4 +133,8 @@ impl Globals { pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } + + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { + self.jwt_decoding_key.as_ref() + } } diff --git a/src/database/sending.rs b/src/database/sending.rs index 0a66f73..fd32793 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::{error, info}; +use log::info; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, From 4a92a29b566d27876ae85e5366272a695e17689b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 16/62] State resolution outline for /send --- Cargo.lock | 152 +++++++------------ Cargo.toml | 8 +- src/main.rs | 2 +- src/pdu.rs | 2 +- src/ruma_wrapper.rs | 14 +- src/server_server.rs | 349 +++++++++++++++++++++++++++++++++++++++---- 6 files changed, 384 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff405..9ab184c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,21 +229,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" [[package]] name = "constant_time_eq" @@ -645,9 +635,9 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] @@ -674,9 +664,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" dependencies = [ "bytes", "fnv", @@ -772,9 +762,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -1063,9 +1053,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1148,12 +1138,12 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1168,18 +1158,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.13.0+1.1.1i" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "045e4dc48af57aad93d665885789b43222ae26f4886494da12d1ed58d309dcb6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" dependencies = [ "autocfg", "cc", @@ -1202,9 +1192,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ "cfg-if 1.0.0", "instant", @@ -1216,9 +1206,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" @@ -1326,9 +1316,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1393,9 +1383,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -1507,9 +1497,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", "bytes", @@ -1536,7 +1526,6 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1636,7 +1625,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "js_int", @@ -1654,7 +1643,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "http", "percent-encoding", @@ -1669,7 +1658,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1680,7 +1669,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "ruma-api", "ruma-common", @@ -1694,7 +1683,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "assign", "http", @@ -1713,7 +1702,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "maplit", @@ -1726,7 +1715,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-common", @@ -1740,7 +1729,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1751,7 +1740,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "js_int", "ruma-api", @@ -1766,7 +1755,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "paste", "rand", @@ -1780,7 +1769,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro2", "quote", @@ -1791,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "serde", ] @@ -1799,7 +1788,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "form_urlencoded", "itoa", @@ -1812,7 +1801,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1823,7 +1812,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" +source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" dependencies = [ "base64 0.12.3", "ring", @@ -1889,12 +1878,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2012,9 +1995,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2060,13 +2043,12 @@ checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2094,7 +2076,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=timo-spec-comp#a1c15253f0777baad251da47c3f2c016cfed6f7e" +source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" dependencies = [ "itertools", "maplit", @@ -2177,9 +2159,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" dependencies = [ "proc-macro2", "quote", @@ -2295,9 +2277,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ "bytes", "fnv", @@ -2365,9 +2347,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] @@ -2544,9 +2526,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2644,30 +2626,6 @@ version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] - [[package]] name = "web-sys" version = "0.3.46" diff --git a/Cargo.toml b/Cargo.toml index f7fbdc5..c2db3d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "ee814aa84934530d76f5e4b275d739805b49bdef" } -# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "unstable-join" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +# ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec", "gen-eventid"] } -#state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = { version = "0.2.23" } diff --git a/src/main.rs b/src/main.rs index 65434a5..4cab764 100644 --- a/src/main.rs +++ b/src/main.rs @@ -190,7 +190,7 @@ async fn main() { } #[catch(404)] -fn not_found_catcher(_req: &'_ Request<'_>) -> String { +fn not_found_catcher(_: &Request<'_>) -> String { "404 Not Found".to_owned() } diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..f6ec415 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -17,7 +17,7 @@ use std::{ time::UNIX_EPOCH, }; -#[derive(Deserialize, Serialize, Debug)] +#[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: EventId, pub room_id: RoomId, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..e2f44cd 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::{AuthScheme, OutgoingRequest}, + api::{AuthScheme, IncomingRequest, OutgoingRequest}, identifiers::{DeviceId, UserId}, Outgoing, }; @@ -29,7 +29,7 @@ use { /// This struct converts rocket requests into ruma structs by converting them into http requests /// first. -pub struct Ruma { +pub struct Ruma { pub body: T::Incoming, pub sender_user: Option, pub sender_device: Option>, @@ -40,10 +40,7 @@ pub struct Ruma { #[cfg(feature = "conduit_bin")] impl<'a, T: Outgoing + OutgoingRequest> FromTransformedData<'a> for Ruma where - ::Incoming: TryFrom>> + std::fmt::Debug, - <::Incoming as std::convert::TryFrom< - http::request::Request>, - >>::Error: std::fmt::Debug, + T::Incoming: IncomingRequest, { type Error = (); type Owned = Data; @@ -152,8 +149,7 @@ where let http_request = http_request.body(body.clone()).unwrap(); debug!("{:?}", http_request); - - match ::Incoming::try_from(http_request) { + match ::try_from_http_request(http_request) { Ok(t) => Success(Ruma { body: t, sender_user, @@ -173,7 +169,7 @@ where } } -impl Deref for Ruma { +impl Deref for Ruma { type Target = T::Incoming; fn deref(&self) -> &Self::Target { diff --git a/src/server_server.rs b/src/server_server.rs index 3fea4da..d68e9fa 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,7 +1,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; -use log::{info, warn}; +use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -11,17 +11,18 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::get_missing_events, + event::{get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - EventId, RoomId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; +use state_res::StateMap; use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, @@ -476,6 +477,34 @@ pub async fn get_public_rooms_route( .into()) } +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum PrevEvents { + Sequential(T), + Fork(Vec), +} + +impl IntoIterator for PrevEvents { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + match self { + Self::Sequential(item) => vec![item].into_iter(), + Self::Fork(list) => list.into_iter(), + } + } +} + +impl PrevEvents { + pub fn new(id: &[T]) -> Self { + match id { + [] => panic!("All events must have previous event"), + [single_id] => Self::Sequential(single_id.clone()), + rest => Self::Fork(rest.to_vec()), + } + } +} + #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -532,55 +561,313 @@ pub async fn send_transaction_message_route<'a>( // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); for pdu in &body.pdus { - // Ruma/PduEvent/StateEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this - // state-res checks signatures - 2. Passes signature checks, otherwise event is dropped. - - // 3. Passes hash checks, otherwise it is redacted before being processed further. - // TODO: redact event if hashing fails let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let keys = db.globals.keypair(); + let mut pub_key_set = BTreeMap::new(); + pub_key_set.insert( + "ed25519:1".to_string(), + String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), + ); + let mut pub_key_map = BTreeMap::new(); + pub_key_map.insert("domain".to_string(), pub_key_set); + + let value = + match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + } + } else { + value + } + } + Err(_e) => { + resolved_map.insert(event_id, Err("Room is unknown to this server".into())); + continue; + } + }; + let pdu = serde_json::from_value::( serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), ) .expect("all ruma pdus are conduit pdus"); - let room_id = &pdu.room_id; // If we have no idea about this room skip the PDU - if !db.rooms.exists(room_id)? { + if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); + // TODO: remove the need to convert to state_res + let event = pdu.convert_for_state_res(); + let previous = pdu + .prev_events + .first() + .map(|id| { + db.rooms + .get_pdu(id) + .expect("todo") + .map(|ev| ev.convert_for_state_res()) + }) + .flatten(); - let next_room_state = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - value, - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + // 4. + let auth_events = db.rooms.get_auth_events( + &pdu.room_id, + &pdu.kind, + &pdu.sender, + pdu.state_key.as_deref(), + pdu.content.clone(), )?; - - db.rooms.set_room_state(&room_id, &next_room_state)?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + auth_events + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with auth events".into()), + ); + continue; } - resolved_map.insert(event_id, Ok::<(), String>(())); + let mut previous_states = vec![]; + for id in &pdu.prev_events { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + previous_states.push(state); + } else { + // fetch the state + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. + let state_at_event = if previous_states.is_empty() { + // State is empty + Default::default() + } else if previous_states.len() == 1 { + previous_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &previous_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous.clone(), + state_at_event + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Event failed auth with state_at + resolved_map.insert( + event.event_id(), + Err("Event has failed auth check with state at the event".into()), + ); + continue; + } + + // The event could still be soft failed + append_state_soft(&db, &pdu)?; + + // Gather the forward extremities and resolve + let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; + let mut fork_states = vec![]; + for id in &forward_extrems { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + fork_states.push(state); + } else { + // This is probably an error?? + match db + .sending + .send_federation_request( + &db.globals, + body.body.origin, + get_room_state_ids::v1::Request { + room_id: &pdu.room_id, + event_id: id, + }, + ) + .await + { + Ok(res) => todo!(), + Err(e) => panic!(e), + } + } + } + + // 6. + let state_at_forks = if fork_states.is_empty() { + // State is empty + Default::default() + } else if fork_states.len() == 1 { + fork_states[0].clone() + } else { + match state_res::StateResolution::resolve( + &pdu.room_id, + &RoomVersionId::Version6, + &fork_states + .into_iter() + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, v.event_id)) + .collect::>() + }) + .collect::>(), + None, + &db.rooms, + ) { + Ok(res) => res + .into_iter() + .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .collect(), + Err(e) => panic!("{:?}", e), + } + }; + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &event, + previous, + state_at_forks + .into_iter() + .map(|(k, v)| (k, v.convert_for_state_res())) + .collect(), + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail + resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + } else { + append_state(&db, &pdu)?; + // Event has passed all auth/stateres checks + resolved_map.insert(event.event_id(), Ok(())); + } } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 4b9976aa743321a0c062fac9ffd2de737531b717 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 31 Dec 2020 08:40:49 -0500 Subject: [PATCH 17/62] Update state-res, use the new Event trait This also bumps ruma to latest and removes js_int infavor of the ruma re-export --- Cargo.lock | 140 +++++++++++++++++++------------- Cargo.toml | 8 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 2 +- src/client_server/membership.rs | 22 +++-- src/client_server/message.rs | 7 +- src/database/rooms.rs | 68 ++++++++++------ src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 116 +++++++++++++------------- src/server_server.rs | 64 ++++++++------- 11 files changed, 252 insertions(+), 183 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ab184c..b05a3c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "rand", "regex", + "rand 0.7.3", "reqwest", "ring", "rocket", @@ -585,6 +585,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi", +] + [[package]] name = "gif" version = "0.11.1" @@ -847,9 +858,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.1.9" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96797f53235a1d6dc985f244a69de54b04c45b7e0e357a35c85a45a847d92f2" +checksum = "fcae89e078a96b781b38f36225bb3a174b8f6e905dfec550dd16a13539c82acc" dependencies = [ "serde", ] @@ -1396,11 +1407,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.0", + "rand_hc 0.3.0", ] [[package]] @@ -1410,7 +1433,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.0", ] [[package]] @@ -1419,7 +1452,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +dependencies = [ + "getrandom 0.2.0", ] [[package]] @@ -1428,7 +1470,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.0", ] [[package]] @@ -1443,7 +1494,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1571,7 +1622,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1625,7 +1676,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "js_int", @@ -1643,7 +1694,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "http", "percent-encoding", @@ -1658,7 +1709,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1669,7 +1720,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "ruma-api", "ruma-common", @@ -1683,7 +1734,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "assign", "http", @@ -1702,7 +1753,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "maplit", @@ -1715,7 +1766,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-common", @@ -1729,7 +1780,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1740,7 +1791,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "js_int", "ruma-api", @@ -1755,21 +1806,21 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand", + "rand 0.8.0", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", + "ruma-serde-macros", "serde", - "strum", ] [[package]] name = "ruma-identifiers-macros" version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro2", "quote", @@ -1780,7 +1831,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "serde", ] @@ -1788,7 +1839,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "form_urlencoded", "itoa", @@ -1800,8 +1851,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +version = "0.2.3" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1812,9 +1863,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=45d01011554f9d07739e9a5edf5498d8ac16f273#45d01011554f9d07739e9a5edf5498d8ac16f273" +source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -2076,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=conflict#e2c5bb401263e1b2fde60313acf5fc4ef072c74d" +source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" dependencies = [ "itertools", "maplit", @@ -2136,27 +2187,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "strum" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89a286a7e3b5720b9a477b23253bc50debac207c8d21505f8e70b36792f11b5" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "syn" version = "1.0.55" @@ -2176,7 +2206,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2416,7 +2446,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index c2db3d9..bf74e8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,14 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f33 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "45d01011554f9d07739e9a5edf5498d8ac16f273" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } -state-res = { git = "https://github.com/ruma/state-res", branch = "conflict", features = ["unstable-pre-spec", "gen-eventid"] } +# TODO: remove the gen-eventid feature +state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio @@ -37,8 +38,7 @@ log = "0.4.11" http = "0.2.1" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" + # Used for ruma wrapper serde_json = { version = "1.0.60", features = ["raw_value"] } # Used for appservice registration files diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..2bff20c 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -124,7 +124,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..f792062 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -131,7 +131,7 @@ pub async fn get_content_thumbnail_route( allow_remote: false, height: body.height, width: body.width, - method: body.method, + method: body.method.clone(), server_name: &body.server_name, media_id: &body.media_id, }, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..eb44085 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::StateEvent; +use state_res::Event; use std::{ collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, @@ -594,19 +594,19 @@ async fn join_room_by_id_helper( .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created .map(|r| { let (event_id, value) = r?; - state_res::StateEvent::from_id_canon_obj(event_id.clone(), value.clone()) + PduEvent::from_id_val(&event_id, value.clone()) .map(|ev| (event_id, Arc::new(ev))) .map_err(|e| { warn!("{:?}: {}", value, e); Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; + .collect::>>>()?; let control_events = event_map .values() - .filter(|pdu| pdu.is_power_event()) - .map(|pdu| pdu.event_id()) + .filter(|pdu| state_res::is_power_event(pdu)) + .map(|pdu| pdu.event_id.clone()) .collect::>(); // These events are not guaranteed to be sorted but they are resolved according to spec @@ -646,7 +646,8 @@ async fn join_room_by_id_helper( .cloned() .collect::>(); - let power_level = resolved_control_events.get(&(EventType::RoomPowerLevels, "".into())); + let power_level = + resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); // Sort the remaining non control events let sorted_event_ids = state_res::StateResolution::mainline_sort( room_id, @@ -685,8 +686,13 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( +<<<<<<< HEAD &PduEvent::from(&**pdu), utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +======= + &pdu, + &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), +>>>>>>> 6232d1f (Update state-res, use the new Event trait) count, pdu_id.clone().into(), &db.globals, @@ -695,7 +701,9 @@ async fn join_room_by_id_helper( )?; if state_events.contains(ev_id) { - state.insert((pdu.kind(), pdu.state_key()), pdu_id); + if let Some(key) = &pdu.state_key { + state.insert((pdu.kind(), key.to_string()), pdu_id); + } } } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..c56cc94 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,10 @@ use ruma::{ events::EventContent, EventId, }; -use std::convert::{TryFrom, TryInto}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -46,7 +49,7 @@ pub async fn send_message_event_route( return Ok(send_message_event::Response { event_id }.into()); } - let mut unsigned = serde_json::Map::new(); + let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); let event_id = db.rooms.build_and_append_pdu( diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b35d006..f0129c6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Requester, StateEvent, StateMap, StateStore}; +use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; use std::{ collections::{BTreeMap, HashMap}, @@ -67,12 +67,8 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> state_res::Result> { +impl StateStore for Rooms { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { let pid = self .get_pdu_id(event_id) .map_err(StateError::custom)? @@ -91,7 +87,7 @@ impl StateStore for Rooms { .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, ) .map_err(Into::into) - .and_then(|pdu: StateEvent| { + .and_then(|pdu: PduEvent| { // conduit's PDU's always contain a room_id but some // of ruma's do not so this must be an Option if pdu.room_id() == room_id { @@ -112,7 +108,7 @@ impl Rooms { &self, room_id: &RoomId, state_hash: &StateHashId, - ) -> Result> { + ) -> Result> { self.stateid_pduid .scan_prefix(&state_hash) .values() @@ -141,7 +137,7 @@ impl Rooms { pdu, )) }) - .collect::>>() + .collect() } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -181,7 +177,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -205,7 +201,7 @@ impl Rooms { content: serde_json::Value, ) -> Result> { let auth_events = state_res::auth_types_for_event( - kind.clone(), + kind, sender, state_key.map(|s| s.to_string()), content, @@ -213,7 +209,13 @@ impl Rooms { let mut events = StateMap::new(); for (event_type, state_key) in auth_events { - if let Some((_, pdu)) = self.room_state_get(room_id, &event_type, &state_key)? { + if let Some((_, pdu)) = self.room_state_get( + room_id, + &event_type, + &state_key + .as_deref() + .expect("found a non state event in auth events"), + )? { events.insert((event_type, state_key), pdu); } } @@ -290,7 +292,10 @@ impl Rooms { } /// Returns the full room state. - pub fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) } else { @@ -795,23 +800,40 @@ impl Rooms { ErrorKind::Unknown, "Membership can't be the first event", ))?)? - .map(|pdu| pdu.convert_for_state_res()); + .map(Arc::new); event_auth::valid_membership_change( // TODO this is a bit of a hack but not sure how to have a type // declared in `state_res` crate easily convert to/from conduit::PduEvent - Requester { - prev_event_ids: prev_events.to_owned(), - room_id: &room_id, - content: &content, - state_key: Some(state_key.to_owned()), - sender: &sender, - }, + &Arc::new(PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater"), + room_id: room_id.clone(), + sender: sender.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key: Some(state_key.clone()), + prev_events, + depth: (prev_events.len() as u32).into(), + auth_events: auth_events + .into_iter() + .map(|(_, pdu)| pdu.event_id) + .collect(), + redacts, + unsigned: unsigned + .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), + hashes: ruma::events::pdu::EventHash { + sha256: "aaa".to_owned(), + }, + signatures: BTreeMap::new(), + }), prev_event, None, // TODO: third party invite &auth_events .iter() .map(|((ty, key), pdu)| { - Ok(((ty.clone(), key.clone()), pdu.convert_for_state_res())) + Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) .collect::>>()?, ) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..2b1b03d 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 9da0776..153dce9 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index f6ec415..c764700 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -33,8 +32,8 @@ pub struct PduEvent { pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, - #[serde(default, skip_serializing_if = "serde_json::Map::is_empty")] - pub unsigned: serde_json::Map, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub unsigned: BTreeMap, pub hashes: EventHash, pub signatures: BTreeMap, BTreeMap>, } @@ -227,61 +226,66 @@ impl PduEvent { ) .expect("Raw::from_value always works") } -} -impl From<&state_res::StateEvent> for PduEvent { - fn from(pdu: &state_res::StateEvent) -> Self { - Self { - event_id: pdu.event_id(), - room_id: pdu.room_id().clone(), - sender: pdu.sender().clone(), - origin_server_ts: (pdu - .origin_server_ts() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .try_into() - .expect("time is valid"), - kind: pdu.kind(), - content: pdu.content().clone(), - state_key: Some(pdu.state_key()), - prev_events: pdu.prev_event_ids(), - depth: *pdu.depth(), - auth_events: pdu.auth_events(), - redacts: pdu.redacts().cloned(), - unsigned: pdu.unsigned().clone().into_iter().collect(), - hashes: pdu.hashes().clone(), - signatures: pdu.signatures(), - } + pub fn from_id_val( + event_id: &EventId, + json: CanonicalJsonObject, + ) -> Result { + json.insert( + "event_id".to_string(), + ruma::serde::to_canonical_value(event_id).expect("event_id is a valid Value"), + ); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) } } -impl PduEvent { - pub fn convert_for_state_res(&self) -> Arc { - Arc::new( - // For consistency of eventId (just in case) we use the one - // generated by conduit for everything. - state_res::StateEvent::from_id_value( - self.event_id.clone(), - json!({ - "event_id": self.event_id, - "room_id": self.room_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "type": self.kind, - "content": self.content, - "state_key": self.state_key, - "prev_events": self.prev_events, - "depth": self.depth, - "auth_events": self.auth_events, - "redacts": self.redacts, - "unsigned": self.unsigned, - "hashes": self.hashes, - "signatures": self.signatures, - }), - ) - .expect("all conduit PDUs are state events"), - ) +impl state_res::Event for PduEvent { + fn event_id(&self) -> &EventId { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + &self.room_id + } + + fn sender(&self) -> &UserId { + &self.sender + } + fn kind(&self) -> EventType { + self.kind.clone() + } + + fn content(&self) -> serde_json::Value { + self.content.clone() + } + fn origin_server_ts(&self) -> std::time::SystemTime { + UNIX_EPOCH + std::time::Duration::from_millis(self.origin_server_ts.into()) + } + + fn state_key(&self) -> Option { + self.state_key.clone() + } + fn prev_events(&self) -> Vec { + self.prev_events.to_vec() + } + fn depth(&self) -> &UInt { + &self.depth + } + fn auth_events(&self) -> Vec { + self.auth_events.to_vec() + } + fn redacts(&self) -> Option<&EventId> { + self.redacts.as_ref() + } + fn hashes(&self) -> &EventHash { + &self.hashes + } + fn signatures(&self) -> BTreeMap, BTreeMap> { + self.signatures.clone() + } + fn unsigned(&self) -> &BTreeMap { + &self.unsigned } } @@ -315,7 +319,7 @@ pub struct PduBuilder { #[serde(rename = "type")] pub event_type: EventType, pub content: serde_json::Value, - pub unsigned: Option>, + pub unsigned: Option>, pub state_key: Option, pub redacts: Option, } diff --git a/src/server_server.rs b/src/server_server.rs index d68e9fa..58d85b1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,12 +20,13 @@ use ruma::{ directory::{IncomingFilter, IncomingRoomNetwork}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::StateMap; +use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, net::{IpAddr, SocketAddr}, + sync::Arc, time::{Duration, SystemTime}, }; @@ -610,17 +611,12 @@ pub async fn send_transaction_message_route<'a>( continue; } - // TODO: remove the need to convert to state_res - let event = pdu.convert_for_state_res(); + let event = Arc::new(pdu.clone()); + let previous = pdu .prev_events .first() - .map(|id| { - db.rooms - .get_pdu(id) - .expect("todo") - .map(|ev| ev.convert_for_state_res()) - }) + .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); // 4. @@ -637,27 +633,32 @@ pub async fn send_transaction_message_route<'a>( previous.clone(), auth_events .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) + .map(|(k, v)| (k, Arc::new(v))) .collect(), None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with auth events".into()), ); continue; } - let mut previous_states = vec![]; + let mut previous_states: Vec>> = vec![]; for id in &pdu.prev_events { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .collect(); previous_states.push(state); } else { // fetch the state @@ -693,7 +694,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -702,7 +703,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -712,17 +713,14 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_event, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Event failed auth with state_at resolved_map.insert( - event.event_id(), + pdu.event_id, Err("Event has failed auth check with state at the event".into()), ); continue; @@ -733,14 +731,20 @@ pub async fn send_transaction_message_route<'a>( // Gather the forward extremities and resolve let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states = vec![]; + let mut fork_states: Vec>> = vec![]; for id in &forward_extrems { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); - let state = db.rooms.state_full(&pdu.room_id, &state_hash)?; + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + fork_states.push(state); } else { // This is probably an error?? @@ -776,7 +780,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|map| { map.into_iter() - .map(|(k, v)| (k, v.event_id)) + .map(|(k, v)| (k, v.event_id.clone())) .collect::>() }) .collect::>(), @@ -785,7 +789,7 @@ pub async fn send_transaction_message_route<'a>( ) { Ok(res) => res .into_iter() - .map(|(k, v)| (k, db.rooms.get_pdu(&v).unwrap().unwrap())) + .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), Err(e) => panic!("{:?}", e), } @@ -795,20 +799,20 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks - .into_iter() - .map(|(k, v)| (k, v.convert_for_state_res())) - .collect(), + state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { // Soft fail - resolved_map.insert(event.event_id(), Err("Event has been soft failed".into())); + resolved_map.insert( + event.event_id().clone(), + Err("Event has been soft failed".into()), + ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id(), Ok(())); + resolved_map.insert(event.event_id().clone(), Ok(())); } } From 63af3d3da06d1fdb4d8e8be3637d2814efba799d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 3 Jan 2021 17:26:17 -0500 Subject: [PATCH 18/62] Step 5 in /send just fetches state from incoming server --- Cargo.lock | 78 ++++++------- src/database/rooms.rs | 29 +---- src/pdu.rs | 2 +- src/server_server.rs | 264 +++++++++++++++++++++++------------------- 4 files changed, 186 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b05a3c4..227e822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] @@ -44,9 +44,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -369,9 +369,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -576,11 +576,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -834,9 +834,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" @@ -1046,9 +1046,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1149,9 +1149,9 @@ checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.31" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -1178,9 +1178,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.59" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1407,7 +1407,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1452,7 +1452,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1494,25 +1494,25 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -2005,9 +2005,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" dependencies = [ "itoa", "ryu", @@ -2088,9 +2088,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" [[package]] name = "socket2" @@ -2111,9 +2111,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" dependencies = [ "version_check", ] @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#9b96204571521e216a618d102459d662c52a2210" +source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.55" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a571a711dddd09019ccc628e1b17fe87c59b09d513c06c026877aa708334f37a" +checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" dependencies = [ "proc-macro2", "quote", @@ -2214,18 +2214,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" dependencies = [ "proc-macro2", "quote", @@ -2752,9 +2752,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f0129c6..ef76c39 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -802,32 +802,9 @@ impl Rooms { ))?)? .map(Arc::new); event_auth::valid_membership_change( - // TODO this is a bit of a hack but not sure how to have a type - // declared in `state_res` crate easily convert to/from conduit::PduEvent - &Arc::new(PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key: Some(state_key.clone()), - prev_events, - depth: (prev_events.len() as u32).into(), - auth_events: auth_events - .into_iter() - .map(|(_, pdu)| pdu.event_id) - .collect(), - redacts, - unsigned: unsigned - .map_or_else(BTreeMap::new, |m| m.into_iter().collect()), - hashes: ruma::events::pdu::EventHash { - sha256: "aaa".to_owned(), - }, - signatures: BTreeMap::new(), - }), + Some(state_key.as_str()), + &sender, + content.clone(), prev_event, None, // TODO: third party invite &auth_events diff --git a/src/pdu.rs b/src/pdu.rs index c764700..2997317 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -229,7 +229,7 @@ impl PduEvent { pub fn from_id_val( event_id: &EventId, - json: CanonicalJsonObject, + mut json: CanonicalJsonObject, ) -> Result { json.insert( "event_id".to_string(), diff --git a/src/server_server.rs b/src/server_server.rs index 58d85b1..3c4308c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,13 +11,15 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + serde::Raw, + signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; @@ -578,32 +580,13 @@ pub async fn send_transaction_message_route<'a>( let mut pub_key_map = BTreeMap::new(); pub_key_map.insert("domain".to_string(), pub_key_set); - let value = - match ruma::signatures::verify_event(&pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => { - resolved_map - .insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - } - } else { - value - } - } - Err(_e) => { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - }; - - let pdu = serde_json::from_value::( - serde_json::to_value(&value).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("all ruma pdus are conduit pdus"); + let pdu = match signature_and_hash_check(&pub_key_map, value) { + Ok(pdu) => pdu, + Err(e) => { + resolved_map.insert(event_id, Err(e)); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -619,7 +602,10 @@ pub async fn send_transaction_message_route<'a>( .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) .flatten(); - // 4. + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // the auth events that would be correct for this pdu. Put another way we should use the auth events + // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( &pdu.room_id, &pdu.kind, @@ -627,6 +613,12 @@ pub async fn send_transaction_message_route<'a>( pdu.state_key.as_deref(), pdu.content.clone(), )?; + + let mut event_map: state_res::EventMap> = auth_events + .iter() + .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .collect(); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, @@ -635,7 +627,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), - None, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -646,66 +638,38 @@ pub async fn send_transaction_message_route<'a>( continue; } - let mut previous_states: Vec>> = vec![]; - for id in &pdu.prev_events { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? + let server_name = body.body.origin.clone(); + let (state_at_event, incoming_auth_events): (StateMap>, _) = match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) + .await? .into_iter() - .map(|((et, sk), ev)| ((et, Some(sk)), Arc::new(ev))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); - previous_states.push(state); - } else { - // fetch the state - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } - } - } - // 5. Passes authorization rules based on the state at the event, otherwise it is rejected. - let state_at_event = if previous_states.is_empty() { - // State is empty - Default::default() - } else if previous_states.len() == 1 { - previous_states[0].clone() - } else { - match state_res::StateResolution::resolve( - &pdu.room_id, - &RoomVersionId::Version6, - &previous_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, v)| (k, v.event_id.clone())) - .collect::>() - }) - .collect::>(), - None, - &db.rooms, - ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), - Err(e) => panic!("{:?}", e), + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; } }; @@ -713,8 +677,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event, - None, + state_at_event.clone(), // TODO: could this be &state avoid .clone + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -747,22 +711,7 @@ pub async fn send_transaction_message_route<'a>( fork_states.push(state); } else { - // This is probably an error?? - match db - .sending - .send_federation_request( - &db.globals, - body.body.origin, - get_room_state_ids::v1::Request { - room_id: &pdu.room_id, - event_id: id, - }, - ) - .await - { - Ok(res) => todo!(), - Err(e) => panic!(e), - } + todo!("we don't know of a pdu that is part of our known forks OOPS") } } @@ -773,6 +722,18 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // Add as much as we can to the `event_map` (less DB hits) + event_map.extend( + incoming_auth_events + .into_iter() + .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + ); + event_map.extend( + state_at_event + .into_iter() + .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), + ); + match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, @@ -784,7 +745,7 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - None, + &mut event_map, &db.rooms, ) { Ok(res) => res @@ -819,8 +780,74 @@ pub async fn send_transaction_message_route<'a>( Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } +fn signature_and_hash_check( + pub_key_map: &ruma::signatures::PublicKeyMap, + value: CanonicalJsonObject, +) -> std::result::Result { + let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".into()), + } + } else { + value + } + } + Err(_e) => return Err("Signature verification failed".into()), + }; + + serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// TODO: this needs to add events to the DB in a way that does not +/// effect the state of the room +async fn fetch_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + events: &[EventId], +) -> Result> { + let mut pdus = vec![]; + for id in events { + match db.rooms.get_pdu(id)? { + Some(pdu) => pdus.push(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: id }, + ) + .await + { + Ok(res) => { + let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(pdu) => { + // TODO: add to our DB somehow? + pdus.push(pdu); + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + } + } + Ok(pdus) +} + fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() + db.rooms.get_pdu_leaves(room_id) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -854,20 +881,15 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + // db.rooms.append_pdu( + // pdu, + // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + // count, + // pdu_id.clone().into(), + // &db.globals, + // &db.account_data, + // &db.admin, + // )?; Ok(()) } From cdeb1236a20e39d0f9635e1c0e7ed40882effd59 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 5 Jan 2021 09:21:41 -0500 Subject: [PATCH 19/62] Fix clippy warnings remove unused imports --- Cargo.lock | 2 +- src/appservice_server.rs | 2 +- src/client_server/membership.rs | 1 - src/database/globals.rs | 4 ++- src/database/sending.rs | 12 ++++----- src/pdu.rs | 7 +---- src/server_server.rs | 45 ++++++++++++++++++++------------- 7 files changed, 39 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 227e822..9dc0b38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#bfadbdf57e26f26c2ea5b2ed50ce3e5f6fb914cd" +source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" dependencies = [ "itertools", "maplit", diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index eb44085..4e093c2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -675,7 +675,6 @@ async fn join_room_by_id_helper( .iter() .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - // this is a `state_res::StateEvent` that holds a `ruma::Pdu` let pdu = event_map .get(ev_id) .expect("Found event_id in sorted events that is not in resolved state"); diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..beb7de5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,9 +9,11 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +pub type DestinationCache = Arc, (String, Option)>>>; + #[derive(Clone)] pub struct Globals { - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: DestinationCache, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, diff --git a/src/database/sending.rs b/src/database/sending.rs index fd32793..d99c4f3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -79,7 +79,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -125,7 +125,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -157,7 +157,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -186,7 +186,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -220,7 +220,7 @@ impl Sending { } pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -330,7 +330,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/pdu.rs b/src/pdu.rs index 2997317..86fbc9f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,12 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::UNIX_EPOCH, -}; +use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { diff --git a/src/server_server.rs b/src/server_server.rs index 3c4308c..3de3636 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,4 @@ -use crate::{client_server, pdu, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -11,14 +11,13 @@ use ruma::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, transactions::send_transaction_message, }, OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - serde::Raw, signatures::{CanonicalJsonObject, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; @@ -220,7 +219,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &ServerName, ) -> (String, Option) { let mut host = None; @@ -594,13 +593,14 @@ pub async fn send_transaction_message_route<'a>( continue; } + let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - - let previous = pdu - .prev_events - .first() - .map(|id| db.rooms.get_pdu(id).expect("todo").map(Arc::new)) - .flatten(); + // Fetch any unknown events or retrieve them from the DB + let previous = + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { + mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not @@ -616,14 +616,14 @@ pub async fn send_transaction_message_route<'a>( let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - auth_events + &auth_events .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect(), @@ -638,7 +638,6 @@ pub async fn send_transaction_message_route<'a>( continue; } - let server_name = body.body.origin.clone(); let (state_at_event, incoming_auth_events): (StateMap>, _) = match db .sending .send_federation_request( @@ -652,8 +651,18 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids) - .await? + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) .collect(); @@ -677,8 +686,8 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous.clone(), - state_at_event.clone(), // TODO: could this be &state avoid .clone - None, // TODO: third party invite + &state_at_event, + None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))? { @@ -760,7 +769,7 @@ pub async fn send_transaction_message_route<'a>( &RoomVersionId::Version6, &event, previous, - state_at_forks, + &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? From 8de0d9f9ced7c11d24bd38d20f871bae11ed863e Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 08:52:30 -0500 Subject: [PATCH 20/62] Remove StateStore trait from state-res collect events needed --- Cargo.lock | 62 +++++++++--------- Cargo.toml | 2 +- src/client_server/membership.rs | 4 -- src/database/rooms.rs | 100 ++++++++++++++++++---------- src/server_server.rs | 111 +++++++++++++++++++------------- 5 files changed, 163 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dc0b38..f621d16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -558,7 +558,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project 1.0.3", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -587,13 +587,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] @@ -722,7 +722,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project 1.0.3", "socket2", "tokio", "tower-service", @@ -1272,11 +1272,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal 1.0.3", ] [[package]] @@ -1292,9 +1292,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1309,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" [[package]] name = "pin-utils" @@ -1416,13 +1416,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" +checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" dependencies = [ "libc", "rand_chacha 0.3.0", - "rand_core 0.6.0", + "rand_core 0.6.1", "rand_hc 0.3.0", ] @@ -1443,7 +1443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1457,11 +1457,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b34ba8cfb21243bd8df91854c830ff0d785fff2e82ebd4434c2644cb9ada18" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.0", + "getrandom 0.2.1", ] [[package]] @@ -1479,7 +1479,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.6.0", + "rand_core 0.6.1", ] [[package]] @@ -1569,7 +1569,7 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "serde", "serde_urlencoded", "tokio", @@ -1809,7 +1809,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" dependencies = [ "paste", - "rand 0.8.0", + "rand 0.8.1", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2028,9 +2028,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" dependencies = [ "dtoa", "linked-hash-map", @@ -2127,7 +2127,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=event-trait#e5d32e44adb66c5932a81d2c8a8d840abd17c870" +source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" dependencies = [ "itertools", "maplit", @@ -2189,9 +2189,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" dependencies = [ "proc-macro2", "quote", @@ -2398,7 +2398,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.1", "tracing-attributes", "tracing-core", ] diff --git a/Cargo.toml b/Cargo.toml index bf74e8a..fdcc4ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "event-trait", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 4e093c2..ea14268 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -618,7 +618,6 @@ async fn join_room_by_id_helper( &room_id, &control_events, &mut event_map, - &db.rooms, &event_ids, ); @@ -629,7 +628,6 @@ async fn join_room_by_id_helper( &sorted_control_events, &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); @@ -654,7 +652,6 @@ async fn join_room_by_id_helper( &events_to_sort, power_level, &mut event_map, - &db.rooms, ); let resolved_events = state_res::StateResolution::iterative_auth_check( @@ -663,7 +660,6 @@ async fn join_room_by_id_helper( &sorted_event_ids, &resolved_control_events, &mut event_map, - &db.rooms, ) .expect("iterative auth check failed on resolved events"); diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ef76c39..fe4f23c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -67,40 +67,6 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) } -impl StateStore for Rooms { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> state_res::Result> { - let pid = self - .get_pdu_id(event_id) - .map_err(StateError::custom)? - .ok_or_else(|| { - StateError::NotFound(format!( - "PDU via room_id and event_id not found in the db: {}", - event_id.as_str() - )) - })?; - - serde_json::from_slice( - &self - .pduid_pdu - .get(pid) - .map_err(StateError::custom)? - .ok_or_else(|| StateError::NotFound("PDU via pduid not found in db.".into()))?, - ) - .map_err(Into::into) - .and_then(|pdu: PduEvent| { - // conduit's PDU's always contain a room_id but some - // of ruma's do not so this must be an Option - if pdu.room_id() == room_id { - Ok(Arc::new(pdu)) - } else { - Err(StateError::NotFound( - "Found PDU for incorrect room in db.".into(), - )) - } - }) - } -} - impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -222,6 +188,72 @@ impl Rooms { Ok(events) } + /// Returns a Vec of the related auth events to the given `event`. + /// + /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. + pub fn auth_events_full( + &self, + room_id: &RoomId, + event_ids: &[EventId], + ) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + if let Some(ev) = self.get_pdu(&ev_id)? { + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) + } + + /// Returns a Vec representing the difference in auth chains of the given `events`. + /// + /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). + pub fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + use std::collections::BTreeSet; + + let mut chains = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_events_full(room_id, &ids)? + .into_iter() + .map(|pdu| pdu.event_id) + .collect::>(); + chains.push(chain); + } + + if let Some(chain) = chains.first() { + let rest = chains.iter().skip(1).flatten().cloned().collect(); + let common = chain.intersection(&rest).collect::>(); + + Ok(chains + .iter() + .flatten() + .filter(|id| !common.contains(&id)) + .cloned() + .collect::>() + .into_iter() + .collect()) + } else { + Ok(vec![]) + } + } + /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. diff --git a/src/server_server.rs b/src/server_server.rs index 3de3636..f68475c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -603,7 +603,7 @@ pub async fn send_transaction_message_route<'a>( }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be get the pdu.auth_events not + // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not // the auth events that would be correct for this pdu. Put another way we should use the auth events // the pdu claims are its auth events let auth_events = db.rooms.get_auth_events( @@ -637,50 +637,56 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 4. - let (state_at_event, incoming_auth_events): (StateMap>, _) = match db - .sending - .send_federation_request( - &db.globals, - server_name.clone(), - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) - .collect(); - - ( - state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) - .await?, + // Step 5. event passes auth based on state at the event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, + server_name.clone(), + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - } - Err(_) => { - resolved_map.insert( - event.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await + { + Ok(res) => { + let state = + fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .collect(); + + ( + state, + fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + .await? + .into_iter() + .map(Arc::new) + .collect(), + ) + } + Err(_) => { + resolved_map.insert( + event.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; if !state_res::event_auth::auth_check( &RoomVersionId::Version6, @@ -698,6 +704,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + // End of step 5. // The event could still be soft failed append_state_soft(&db, &pdu)?; @@ -724,18 +731,30 @@ pub async fn send_transaction_message_route<'a>( } } - // 6. + // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() } else if fork_states.len() == 1 { fork_states[0].clone() } else { + let auth_events = fork_states + .iter() + .map(|map| { + db.rooms.auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + }) + .collect(); + // Add as much as we can to the `event_map` (less DB hits) event_map.extend( incoming_auth_events .into_iter() - .map(|pdu| (pdu.event_id().clone(), Arc::new(pdu))), + .map(|pdu| (pdu.event_id().clone(), pdu)), ); event_map.extend( state_at_event @@ -754,8 +773,8 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), + &auth_events, &mut event_map, - &db.rooms, ) { Ok(res) => res .into_iter() From 168ae8dca00d480ad28d80e65918853f1802091b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 6 Jan 2021 15:05:09 -0500 Subject: [PATCH 21/62] Fill event_map with all events that will be needed for resolution --- Cargo.lock | 2 +- src/server_server.rs | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f621d16..7a79dbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,8 +212,8 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "regex", "rand 0.7.3", + "regex", "reqwest", "ring", "rocket", diff --git a/src/server_server.rs b/src/server_server.rs index f68475c..e87c05c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -565,7 +565,7 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this - + // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); // 2. Passes signature checks, otherwise event is dropped. @@ -741,16 +741,24 @@ pub async fn send_transaction_message_route<'a>( let auth_events = fork_states .iter() .map(|map| { - db.rooms.auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), - ) + db.rooms + .auth_events_full( + pdu.room_id(), + &map.values() + .map(|pdu| pdu.event_id().clone()) + .collect::>(), + ) + .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) }) - .collect(); + .collect::>>()?; - // Add as much as we can to the `event_map` (less DB hits) + // Add everything we will need to event_map + event_map.extend( + auth_events + .iter() + .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) + .flatten(), + ); event_map.extend( incoming_auth_events .into_iter() @@ -773,7 +781,10 @@ pub async fn send_transaction_message_route<'a>( .collect::>() }) .collect::>(), - &auth_events, + auth_events + .into_iter() + .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) + .collect(), &mut event_map, ) { Ok(res) => res From d0b8d0f5fdaf2ee62b6d14702cda5d2a154c241b Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 12 Jan 2021 08:26:52 -0500 Subject: [PATCH 22/62] Fix signature/hash checks, fetch recursive auth events --- src/client_server/membership.rs | 7 +- src/database/rooms.rs | 4 +- src/pdu.rs | 12 +- src/server_server.rs | 240 +++++++++++++++++++++++++------- 4 files changed, 193 insertions(+), 70 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ea14268..29b6c14 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -681,13 +681,8 @@ async fn join_room_by_id_helper( pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); db.rooms.append_pdu( -<<<<<<< HEAD - &PduEvent::from(&**pdu), - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), -======= &pdu, - &utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), ->>>>>>> 6232d1f (Update state-res, use the new Event trait) + utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fe4f23c..88a772b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -20,7 +20,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use sled::IVec; -use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore}; +use state_res::{event_auth, Event, StateMap}; use std::{ collections::{BTreeMap, HashMap}, @@ -193,7 +193,7 @@ impl Rooms { /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. pub fn auth_events_full( &self, - room_id: &RoomId, + _room_id: &RoomId, event_ids: &[EventId], ) -> Result> { let mut result = BTreeMap::new(); diff --git a/src/pdu.rs b/src/pdu.rs index 86fbc9f..750f9cf 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -4,7 +4,7 @@ use ruma::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, + serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; @@ -286,12 +286,11 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn process_incoming_pdu( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { - let mut value = - serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); + let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); let event_id = EventId::try_from(&*format!( "${}", @@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu( )) .expect("ruma's reference hashes are valid event ids"); - value.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - (event_id, value) } diff --git a/src/server_server.rs b/src/server_server.rs index e87c05c..141d5bb 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,4 +1,5 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; +use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ + device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -18,13 +20,14 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - signatures::{CanonicalJsonObject, PublicKeyMap}, + serde::to_canonical_value, + signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use state_res::{Event, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::Debug, net::{IpAddr, SocketAddr}, sync::Arc, @@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } + dbg!(&*body); + for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { Ok(edu) => match edu.edu_type.as_str() { @@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>( } "m.presence" => {} "m.receipt" => {} + "m.device_list_update" => {} _ => {} }, Err(_err) => { @@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>( for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. // 3. Passes hash checks, otherwise it is redacted before being processed further. - let keys = db.globals.keypair(); - let mut pub_key_set = BTreeMap::new(); - pub_key_set.insert( - "ed25519:1".to_string(), - String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"), - ); + let server_name = body.body.origin.clone(); let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert("domain".to_string(), pub_key_set); + if let Some(sig) = value.get("signatures") { + match sig { + CanonicalJsonValue::Object(entity) => { + for key in entity.keys() { + // TODO: save this in a DB maybe... + // fetch the public signing key + let res = db + .sending + .send_federation_request( + &db.globals, + Box::::try_from(key.to_string()).unwrap(), + get_server_keys::v2::Request::new(), + ) + .await?; - let pdu = match signature_and_hash_check(&pub_key_map, value) { + pub_key_map.insert( + res.server_key.server_name.to_string(), + res.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); + } + } + _ => { + resolved_map.insert( + event_id, + Err("`signatures` is not a JSON object".to_string()), + ); + continue; + } + } + } else { + resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); + continue; + } + + let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>( } }; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".into())); continue; } - let server_name = body.body.origin.clone(); let event = Arc::new(pdu.clone()); - // Fetch any unknown events or retrieve them from the DB + dbg!(&*event); + // Fetch any unknown prev_events or retrieve them from the DB let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? { - mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), _ => None, }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not - // the auth events that would be correct for this pdu. Put another way we should use the auth events - // the pdu claims are its auth events - let auth_events = db.rooms.get_auth_events( - &pdu.room_id, - &pdu.kind, - &pdu.sender, - pdu.state_key.as_deref(), - pdu.content.clone(), - )?; + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) + .await + { + Ok(events) => events, + Err(_) => { + resolved_map.insert( + pdu.event_id, + Err("Failed to recursively gather auth events".into()), + ); + continue; + } + }; let mut event_map: state_res::EventMap> = auth_events .iter() - .map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) .collect(); - if !state_res::event_auth::auth_check( + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &event, previous.clone(), - &auth_events - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + &pdu.auth_events + .iter() + .map(|id| { + event_map + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + Error::Conflict( + "Auth event not found, event failed recursive auth checks.", + ) + }) + }) + .collect::>>()?, None, // TODO: third party invite ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { + .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( pdu.event_id, Err("Event has failed auth check with auth events".into()), @@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, -) -> std::result::Result { - let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".into()), +) -> std::result::Result { + Ok( + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + error!("CONTENT HASH FAILED"); + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value } - } else { - value } - } - Err(_e) => return Err("Signature verification failed".into()), - }; - - serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }, ) - .map_err(|_| "Deserialization failed for JSON value".into()) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have missing events it fails. +async fn fetch_check_auth_events( + db: &Database, + origin: Box, + key_map: &PublicKeyMap, + event_ids: &[EventId], +) -> Result> { + let mut result = BTreeMap::new(); + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains_key(&ev_id) { + continue; + } + + let ev = match db.rooms.get_pdu(&ev_id)? { + Some(pdu) => pdu, + None => match db + .sending + .send_federation_request( + &db.globals, + origin.clone(), + get_event::v1::Request { event_id: &ev_id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + match signature_and_hash_check(key_map, value) { + Ok(mut val) => { + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map") + } + Err(e) => { + // TODO: I would assume we just keep going + error!("{:?}", e); + continue; + } + } + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }; + stack.extend(ev.auth_events()); + result.insert(ev.event_id().clone(), ev); + } + + Ok(result.into_iter().map(|(_, v)| v).collect()) } /// TODO: this needs to add events to the DB in a way that does not @@ -865,10 +988,21 @@ async fn fetch_events( .await { Ok(res) => { - let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); match signature_and_hash_check(key_map, value) { - Ok(pdu) => { + Ok(mut val) => { // TODO: add to our DB somehow? + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id) + .expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val) + .expect("CanonicalJsonObj is a valid JsonValue"), + ) + .expect("Pdu is valid Canonical JSON Map"); + pdus.push(pdu); } Err(e) => { @@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; db.rooms.append_pdu( pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db.globals, From d108a735a45e6fe9b0fcda00b6d0ebbeff043f4a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 22 Dec 2020 12:45:35 -0500 Subject: [PATCH 23/62] State resolution outline for /send --- src/server_server.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/server_server.rs b/src/server_server.rs index 141d5bb..31d6467 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1067,6 +1067,59 @@ fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { Ok(()) } +fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { + todo!() +} + +fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + +/// TODO: This should not write to the current room state (roomid_statehash) +fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + db.rooms.append_pdu( + pdu, + &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &db.globals, + &db.account_data, + &db.admin, + )?; + + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + + Ok(()) +} + #[cfg_attr( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") From 52392628e9eff02ed0db65481c551840ba879405 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 14:39:56 -0500 Subject: [PATCH 24/62] Convert uses of Box to a ref --- src/client_server/alias.rs | 2 +- src/client_server/directory.rs | 2 +- src/client_server/media.rs | 4 +- src/client_server/membership.rs | 4 +- src/database/sending.rs | 4 +- src/pdu.rs | 2 +- src/server_server.rs | 117 ++++++++++++++++++++++---------- 7 files changed, 92 insertions(+), 43 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..0dc40a9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -70,7 +70,7 @@ pub async fn get_alias_helper( .sending .send_federation_request( &db.globals, - room_alias.server_name().to_owned(), + room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 2bff20c..87d5fc8 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -137,7 +137,7 @@ pub async fn get_public_rooms_filtered_helper( .sending .send_federation_request( &db.globals, - other_server.to_owned(), + other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, since: since.as_deref(), diff --git a/src/client_server/media.rs b/src/client_server/media.rs index f792062..275038a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -77,7 +77,7 @@ pub async fn get_content_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content::Request { allow_remote: false, server_name: &body.server_name, @@ -126,7 +126,7 @@ pub async fn get_content_thumbnail_route( .sending .send_federation_request( &db.globals, - body.server_name.clone(), + &body.server_name, get_content_thumbnail::Request { allow_remote: false, height: body.height, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 29b6c14..40e4183 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -468,7 +468,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, @@ -547,7 +547,7 @@ async fn join_room_by_id_helper( .sending .send_federation_request( &db.globals, - remote_server.clone(), + remote_server, federation::membership::create_join_event::v2::Request { room_id, event_id: &event_id, diff --git a/src/database/sending.rs b/src/database/sending.rs index d99c4f3..e6cdc76 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -303,7 +303,7 @@ impl Sending { server_server::send_request( &globals, - server.clone(), + &*server, send_transaction_message::v1::Request { origin: globals.server_name(), pdus: &pdu_jsons, @@ -348,7 +348,7 @@ impl Sending { pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where diff --git a/src/pdu.rs b/src/pdu.rs index 750f9cf..340ddee 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -287,7 +287,7 @@ impl state_res::Event for PduEvent { /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub(crate) fn process_incoming_pdu( +pub(crate) fn gen_event_id_canonical_json( pdu: &Raw, ) -> (EventId, CanonicalJsonObject) { let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON"); diff --git a/src/server_server.rs b/src/server_server.rs index 31d6467..64e0a05 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -20,11 +20,12 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; -use state_res::{Event, StateMap}; +use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, convert::{TryFrom, TryInto}, @@ -36,7 +37,7 @@ use std::{ pub async fn send_request( globals: &crate::database::globals::Globals, - destination: Box, + destination: &ServerName, request: T, ) -> Result where @@ -50,7 +51,7 @@ where .actual_destination_cache .read() .unwrap() - .get(&destination) + .get(destination) .cloned(); let (actual_destination, host) = if let Some(result) = maybe_result { @@ -61,7 +62,7 @@ where .actual_destination_cache .write() .unwrap() - .insert(destination.clone(), result.clone()); + .insert(Box::::from(destination), result.clone()); result }; @@ -278,9 +279,9 @@ async fn find_actual_destination( (actual_destination, host) } -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() @@ -572,11 +573,9 @@ pub async fn send_transaction_message_route<'a>( // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks - // TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then - let (event_id, value) = crate::pdu::process_incoming_pdu(pdu); - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let server_name = body.body.origin.clone(); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { match sig { @@ -588,7 +587,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - Box::::try_from(key.to_string()).unwrap(), + <&ServerName>::try_from(key.as_str()).unwrap(), get_server_keys::v2::Request::new(), ) .await?; @@ -616,6 +615,9 @@ pub async fn send_transaction_message_route<'a>( continue; } + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. let mut val = match signature_and_hash_check(&pub_key_map, value) { Ok(pdu) => pdu, Err(e) => { @@ -625,15 +627,20 @@ pub async fn send_transaction_message_route<'a>( }; // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // to our PduEvent type also finally verifying the first step listed above val.insert( "event_id".to_owned(), to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), ); - let pdu = serde_json::from_value::( + let pdu = match serde_json::from_value::( serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); + ) { + Ok(pdu) => pdu, + Err(_) => { + resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); + continue; + } + }; // If we have no idea about this room skip the PDU if !db.rooms.exists(&pdu.room_id)? { @@ -644,18 +651,15 @@ pub async fn send_transaction_message_route<'a>( let event = Arc::new(pdu.clone()); dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = - match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. // Recursively gather all auth events checking that the previous auth events are valid. let auth_events: Vec = - match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events) - .await - { + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { Ok(events) => events, Err(_) => { resolved_map.insert( @@ -707,7 +711,7 @@ pub async fn send_transaction_message_route<'a>( .sending .send_federation_request( &db.globals, - server_name.clone(), + server_name, get_room_state_ids::v1::Request { room_id: pdu.room_id(), event_id: pdu.event_id(), @@ -716,8 +720,7 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = - fetch_events(&db, server_name.clone(), &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -734,7 +737,7 @@ pub async fn send_transaction_message_route<'a>( ( state, - fetch_events(&db, server_name.clone(), &pub_key_map, &res.auth_chain_ids) + fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) .await? .into_iter() .map(Arc::new) @@ -881,6 +884,52 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +async fn auth_each_event( + db: &Database, + value: CanonicalJsonObject, + event_id: EventId, + pub_key_map: &PublicKeyMap, + server_name: &ServerName, + auth_cache: EventMap>, +) -> std::result::Result { + // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. + // 2. Passes signature checks, otherwise event is dropped. + // 3. Passes hash checks, otherwise it is redacted before being processed further. + let mut val = signature_and_hash_check(&pub_key_map, value)?; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; + + // If we have no idea about this room skip the PDU + if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { + return Err("Room is unknown to this server".into()); + } + + // Fetch any unknown prev_events or retrieve them from the DB + let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + _ => None, + }; + + // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. + // Recursively gather all auth events checking that the previous auth events are valid. + let auth_events: Vec = + match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { + Ok(events) => events, + Err(_) => return Err("Failed to recursively gather auth events".into()), + }; + + Ok(pdu) +} + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -909,7 +958,7 @@ fn signature_and_hash_check( /// events `auth_events`. If the chain is found to have missing events it fails. async fn fetch_check_auth_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, event_ids: &[EventId], ) -> Result> { @@ -929,13 +978,13 @@ async fn fetch_check_auth_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: &ev_id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { val.insert( @@ -970,7 +1019,7 @@ async fn fetch_check_auth_events( /// effect the state of the room async fn fetch_events( db: &Database, - origin: Box, + origin: &ServerName, key_map: &PublicKeyMap, events: &[EventId], ) -> Result> { @@ -982,13 +1031,13 @@ async fn fetch_events( .sending .send_federation_request( &db.globals, - origin.clone(), + origin, get_event::v1::Request { event_id: id }, ) .await { Ok(res) => { - let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu); + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); match signature_and_hash_check(key_map, value) { Ok(mut val) => { // TODO: add to our DB somehow? From 4cf530c55b32c494f1dde191fc07c2bcfed4ceac Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 14 Jan 2021 21:32:22 -0500 Subject: [PATCH 25/62] Abstract event validation/fetching, add outlier and signing key DB trees Fixed the miss named commented out keys in conduit-example.toml. --- conduit-example.toml | 4 +- src/database.rs | 10 +- src/database/globals.rs | 77 ++++- src/database/rooms.rs | 97 ++---- src/error.rs | 7 +- src/main.rs | 1 + src/server_server.rs | 632 +++++++++++++++++++--------------------- 7 files changed, 415 insertions(+), 413 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index b82da2c..bb3ae33 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -23,11 +23,11 @@ port = 6167 max_request_size = 20_000_000 # in bytes # Disable registration. No new users will be able to register on this server -#allow_registration = true +#allow_registration = false # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work -#allow_encryption = true +#allow_encryption = false #allow_federation = false #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 diff --git a/src/database.rs b/src/database.rs index 8fcffd9..ea65d6f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -22,7 +22,7 @@ use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; use tokio::sync::Semaphore; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, database_path: String, @@ -102,7 +102,12 @@ impl Database { let (admin_sender, admin_receiver) = mpsc::unbounded(); let db = Self { - globals: globals::Globals::load(db.open_tree("global")?, config).await?, + globals: globals::Globals::load( + db.open_tree("global")?, + db.open_tree("servertimeout_signingkey")?, + config, + ) + .await?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, @@ -155,6 +160,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index beb7de5..7eb162b 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,10 @@ use crate::{database::Config, utils, Error, Result}; use log::error; -use ruma::ServerName; -use std::collections::HashMap; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + ServerName, ServerSigningKeyId, +}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -20,10 +23,15 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey } impl Globals { - pub async fn load(globals: sled::Tree, config: Config) -> Result { + pub async fn load( + globals: sled::Tree, + server_keys: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -82,6 +90,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, + servertimeout_signingkey: server_keys, }) } @@ -139,4 +148,66 @@ impl Globals { pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { self.jwt_decoding_key.as_ref() } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { + // Remove outdated keys + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, _) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + + if now > valid_until { + self.servertimeout_signingkey.remove(k)?; + } + } + + let mut key = origin.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice( + &(keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64) + .to_be_bytes(), + ); + + self.servertimeout_signingkey.insert( + key, + serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { + let (k, bytes) = item?; + let valid_until = k + .splitn(2, |&b| b == 0xff) + .nth(1) + .map(crate::utils::u64_from_bytes) + .ok_or_else(|| Error::bad_database("Invalid signing keys."))? + .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + // If these keys are still valid use em! + if valid_until > now { + return serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + } + } + Ok(BTreeMap::default()) + } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a772b..81abd62 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -65,6 +65,9 @@ pub struct Rooms { /// The state for a given state hash. pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + + /// Any pdu that has passed the steps up to auth with auth_events. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { @@ -188,72 +191,6 @@ impl Rooms { Ok(events) } - /// Returns a Vec of the related auth events to the given `event`. - /// - /// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`. - pub fn auth_events_full( - &self, - _room_id: &RoomId, - event_ids: &[EventId], - ) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - if let Some(ev) = self.get_pdu(&ev_id)? { - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) - } - - /// Returns a Vec representing the difference in auth chains of the given `events`. - /// - /// Each inner `Vec` of `event_ids` represents a state set (state at each forward extremity). - pub fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - use std::collections::BTreeSet; - - let mut chains = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_events_full(room_id, &ids)? - .into_iter() - .map(|pdu| pdu.event_id) - .collect::>(); - chains.push(chain); - } - - if let Some(chain) = chains.first() { - let rest = chains.iter().skip(1).flatten().cloned().collect(); - let common = chain.intersection(&rest).collect::>(); - - Ok(chains - .iter() - .flatten() - .filter(|id| !common.contains(&id)) - .cloned() - .collect::>() - .into_iter() - .collect()) - } else { - Ok(vec![]) - } - } - /// Generate a new StateHash. /// /// A unique hash made from hashing all PDU ids of the state joined with 0xff. @@ -475,6 +412,31 @@ impl Rooms { Ok(()) } + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns true if the event_id was previously inserted. + pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + let res = self + .eventid_outlierpdu + .insert( + event_id.as_bytes(), + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + ) + .map(|op| op.is_some())?; + Ok(res) + } + /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -516,6 +478,9 @@ impl Rooms { } } + // We no longer keep this pdu as an outlier + self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; // Mark as read first so the sending client doesn't get a notification even if appending diff --git a/src/error.rs b/src/error.rs index c57843c..fed545c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -122,10 +122,9 @@ impl log::Log for ConduitLogger { let output = format!("{} - {}", record.level(), record.args()); if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record + && (record.module_path().map_or(false, |path| { + path.starts_with("conduit::") || path.starts_with("state") + }) || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/main.rs b/src/main.rs index 4cab764..e5c0399 100644 --- a/src/main.rs +++ b/src/main.rs @@ -167,6 +167,7 @@ fn setup_rocket() -> rocket::Rocket { .figment() .extract() .expect("It looks like your config is invalid. Please take a look at the error"); + let data = Database::load_or_create(config) .await .expect("config is valid"); diff --git a/src/server_server.rs b/src/server_server.rs index 64e0a05..6907e34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1,5 +1,4 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma}; -use get_devices::v1::UserDevice; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{error, info, warn}; @@ -7,7 +6,6 @@ use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ federation::{ - device::get_devices, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, @@ -20,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::pdu::Pdu, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -28,9 +25,12 @@ use ruma::{ use state_res::{Event, EventMap, StateMap}; use std::{ collections::{BTreeMap, BTreeSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt::Debug, + future::Future, net::{IpAddr, SocketAddr}, + pin::Pin, + result::Result as StdResult, sync::Arc, time::{Duration, SystemTime}, }; @@ -575,6 +575,26 @@ pub async fn send_transaction_message_route<'a>( // We do not add the event_id field to the pdu here because of signature and hashes checks let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + continue; + } + }; + if !db.rooms.exists(&room_id)? { + resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); + continue; + } + let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); if let Some(sig) = value.get("signatures") { @@ -583,20 +603,12 @@ pub async fn send_transaction_message_route<'a>( for key in entity.keys() { // TODO: save this in a DB maybe... // fetch the public signing key - let res = db - .sending - .send_federation_request( - &db.globals, - <&ServerName>::try_from(key.as_str()).unwrap(), - get_server_keys::v2::Request::new(), - ) - .await?; + let origin = <&ServerName>::try_from(key.as_str()).unwrap(); + let keys = fetch_signing_keys(&db, origin).await?; pub_key_map.insert( - res.server_key.server_name.to_string(), - res.server_key - .verify_keys - .into_iter() + origin.to_string(), + keys.into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(), ); @@ -615,10 +627,31 @@ pub async fn send_transaction_message_route<'a>( continue; } - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = match signature_and_hash_check(&pub_key_map, value) { + // TODO: make this persist but not a DB Tree... + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) + // like structure so that once an auth event is known it would know (using indexes maybe) all of + // the auth events that it references. + let mut auth_cache = EventMap::new(); + + // 1. check the server is in the room (optional) + // 2. check content hash, redact if doesn't match + // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. persist this event as an outlier + // 7. if not timeline event: stop + let pdu = match validate_event( + &db, + value, + event_id.clone(), + &pub_key_map, + server_name, + // All the auth events gathered will be here + &mut auth_cache, + ) + .await + { Ok(pdu) => pdu, Err(e) => { resolved_map.insert(event_id, Err(e)); @@ -626,59 +659,31 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = match serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) { - Ok(pdu) => pdu, - Err(_) => { - resolved_map.insert(event_id, Err("Event is not a valid PDU".into())); - continue; - } - }; + let pdu = Arc::new(pdu.clone()); - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".into())); - continue; - } - - let event = Arc::new(pdu.clone()); - dbg!(&*event); // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), + let previous = match fetch_events( + &db, + server_name, + &pub_key_map, + &pdu.prev_events, + &mut auth_cache, + ) + .await + { + Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), _ => None, }; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => { - resolved_map.insert( - pdu.event_id, - Err("Failed to recursively gather auth events".into()), - ); - continue; - } - }; - - let mut event_map: state_res::EventMap> = auth_events + let mut event_map: state_res::EventMap> = auth_cache .iter() - .map(|v| (v.event_id().clone(), Arc::new(v.clone()))) + .map(|(k, v)| (k.clone(), v.clone())) .collect(); // Check that the event passes auth based on the auth_events let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &pdu.auth_events .iter() @@ -696,9 +701,10 @@ pub async fn send_transaction_message_route<'a>( None, // TODO: third party invite ) .map_err(|_e| Error::Conflict("Auth check failed"))?; + if !is_authed { resolved_map.insert( - pdu.event_id, + pdu.event_id().clone(), Err("Event has failed auth check with auth events".into()), ); continue; @@ -720,7 +726,14 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events(&db, server_name, &pub_key_map, &res.pdu_ids).await?; + let state = fetch_events( + &db, + server_name, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, + ) + .await?; // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -732,21 +745,26 @@ pub async fn send_transaction_message_route<'a>( let state = state .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), Arc::new(pdu))) + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); ( state, - fetch_events(&db, server_name, &pub_key_map, &res.auth_chain_ids) - .await? - .into_iter() - .map(Arc::new) - .collect(), + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await? + .into_iter() + .collect(), ) } Err(_) => { resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Fetching state for event failed".into()), ); continue; @@ -755,7 +773,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous.clone(), &state_at_event, None, // TODO: third party invite @@ -764,37 +782,21 @@ pub async fn send_transaction_message_route<'a>( { // Event failed auth with state_at resolved_map.insert( - pdu.event_id, + event_id, Err("Event has failed auth check with state at the event".into()), ); continue; } // End of step 5. - // The event could still be soft failed - append_state_soft(&db, &pdu)?; - // Gather the forward extremities and resolve - let forward_extrems = forward_extremity_ids(&db, &pdu.room_id)?; - let mut fork_states: Vec>> = vec![]; - for id in &forward_extrems { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); - let state = db - .rooms - .state_full(&pdu.room_id, &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); - - fork_states.push(state); - } else { - todo!("we don't know of a pdu that is part of our known forks OOPS") + let fork_states = match forward_extremity_ids(&db, &pdu) { + Ok(states) => states, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; } - } + }; // Step 6. event passes auth based on state of all forks and current room state let state_at_forks = if fork_states.is_empty() { @@ -803,19 +805,47 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { - let auth_events = fork_states - .iter() - .map(|map| { - db.rooms - .auth_events_full( - pdu.room_id(), - &map.values() - .map(|pdu| pdu.event_id().clone()) - .collect::>(), + let mut auth_events = vec![]; + // this keeps track if we error so we can break out of these inner loops + // to continue on with the incoming PDU's + let mut failed = false; + for map in &fork_states { + let mut state_auth = vec![]; + for pdu in map.values() { + let event = match auth_cache.get(pdu.event_id()) { + Some(aev) => aev.clone(), + // We should know about every event at this point but just incase... + None => match fetch_events( + &db, + server_name, + &pub_key_map, + &[pdu.event_id().clone()], + &mut auth_cache, ) - .map(|pdus| pdus.into_iter().map(Arc::new).collect::>()) - }) - .collect::>>()?; + .await + .map(|mut vec| vec.remove(0)) + { + Ok(aev) => aev.clone(), + Err(_) => { + resolved_map.insert( + event_id.clone(), + Err("Event has been soft failed".into()), + ); + failed = true; + break; + } + }, + }; + state_auth.push(event); + } + if failed { + break; + } + auth_events.push(state_auth); + } + if failed { + continue; + } // Add everything we will need to event_map event_map.extend( @@ -862,74 +892,163 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, - &event, + &pdu, previous, &state_at_forks, None, ) .map_err(|_e| Error::Conflict("Auth check failed"))? { - // Soft fail + // Soft fail, we add the event as an outlier. resolved_map.insert( - event.event_id().clone(), + pdu.event_id().clone(), Err("Event has been soft failed".into()), ); } else { append_state(&db, &pdu)?; // Event has passed all auth/stateres checks - resolved_map.insert(event.event_id().clone(), Ok(())); + resolved_map.insert(pdu.event_id().clone(), Ok(())); } } Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } -async fn auth_each_event( - db: &Database, +/// Validate any event that is given to us by another server. +/// +/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). +/// 2. Passes signature checks, otherwise event is dropped. +/// 3. Passes hash checks, otherwise it is redacted before being processed further. +/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). +/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +fn validate_event<'a>( + db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &PublicKeyMap, - server_name: &ServerName, - auth_cache: EventMap>, -) -> std::result::Result { - // Ruma/PduEvent satisfies - 1. Is a valid event, otherwise it is dropped. - // 2. Passes signature checks, otherwise event is dropped. - // 3. Passes hash checks, otherwise it is redacted before being processed further. - let mut val = signature_and_hash_check(&pub_key_map, value)?; + pub_key_map: &'a PublicKeyMap, + origin: &'a ServerName, + auth_cache: &'a mut EventMap>, +) -> Pin> + 'a + Send>> { + Box::pin(async move { + let mut val = signature_and_hash_check(&pub_key_map, value)?; - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type also finally verifying the first step listed above - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU".to_string())?; + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type also finally verifying the first step listed above + val.insert( + "event_id".to_owned(), + to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"), + ); + let pdu = serde_json::from_value::( + serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU".to_string())?; - // If we have no idea about this room skip the PDU - if !db.rooms.exists(&pdu.room_id).map_err(|e| e.to_string())? { - return Err("Room is unknown to this server".into()); - } + fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) + .await + .map_err(|_| "Event failed auth chain check".to_string())?; - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))), - _ => None, - }; + db.rooms + .append_pdu_outlier(pdu.event_id(), &pdu) + .map_err(|e| e.to_string())?; - // 4. Passes authorization rules based on the event's auth events, otherwise it is rejected. - // Recursively gather all auth events checking that the previous auth events are valid. - let auth_events: Vec = - match fetch_check_auth_events(&db, server_name, &pub_key_map, &pdu.prev_events).await { - Ok(events) => events, - Err(_) => return Err("Failed to recursively gather auth events".into()), - }; - - Ok(pdu) + Ok(pdu) + }) } +/// Find the event and auth it. +/// +/// 1. Look in the main timeline (pduid_pdu tree) +/// 2. Look at outlier pdu tree +/// 3. Ask origin server over federation +/// 4. TODO: Ask other servers over federation? +async fn fetch_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + events: &[EventId], + auth_cache: &mut EventMap>, +) -> Result>> { + let mut pdus = vec![]; + for id in events { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db.rooms.get_pdu_outlier(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; + + Arc::new(pdu) + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, + }, + }; + pdus.push(pdu); + } + Ok(pdus) +} + +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, +) -> Result> { + match db.globals.signing_keys_for(origin)? { + keys if !keys.is_empty() => Ok(keys), + _ => { + let keys = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + } +} fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -954,122 +1073,29 @@ fn signature_and_hash_check( ) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have missing events it fails. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], -) -> Result> { - let mut result = BTreeMap::new(); - let mut stack = event_ids.to_vec(); +fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { + let mut fork_states = vec![]; + for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + if let Some(id) = db.rooms.get_pdu_id(id)? { + let state_hash = db + .rooms + .pdu_state_hash(&id)? + .expect("found pdu with no statehash"); + let state = db + .rooms + .state_full(&pdu.room_id, &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains_key(&ev_id) { - continue; - } - - let ev = match db.rooms.get_pdu(&ev_id)? { - Some(pdu) => pdu, - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &ev_id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map") - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, - }; - stack.extend(ev.auth_events()); - result.insert(ev.event_id().clone(), ev); - } - - Ok(result.into_iter().map(|(_, v)| v).collect()) -} - -/// TODO: this needs to add events to the DB in a way that does not -/// effect the state of the room -async fn fetch_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - events: &[EventId], -) -> Result> { - let mut pdus = vec![]; - for id in events { - match db.rooms.get_pdu(id)? { - Some(pdu) => pdus.push(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - match signature_and_hash_check(key_map, value) { - Ok(mut val) => { - // TODO: add to our DB somehow? - val.insert( - "event_id".to_owned(), - to_canonical_value(&event_id) - .expect("EventId is a valid CanonicalJsonValue"), - ); - let pdu = serde_json::from_value::( - serde_json::to_value(val) - .expect("CanonicalJsonObj is a valid JsonValue"), - ) - .expect("Pdu is valid Canonical JSON Map"); - - pdus.push(pdu); - } - Err(e) => { - // TODO: I would assume we just keep going - error!("{:?}", e); - continue; - } - } - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + fork_states.push(state); + } else { + return Err(Error::Conflict( + "we don't know of a pdu that is part of our known forks OOPS", + )); } } - Ok(pdus) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - db.rooms.get_pdu_leaves(room_id) + Ok(fork_states) } fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { @@ -1078,9 +1104,12 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( - pdu, + &pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), @@ -1089,78 +1118,9 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { &db.admin, )?; - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // db.rooms.append_pdu( - // pdu, - // &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - // count, - // pdu_id.clone().into(), - // &db.globals, - // &db.account_data, - // &db.admin, - // )?; - - Ok(()) -} - -fn forward_extremity_ids(db: &Database, room_id: &RoomId) -> Result> { - todo!() -} - -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; - - for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - - Ok(()) -} - -/// TODO: This should not write to the current room state (roomid_statehash) -fn append_state_soft(db: &Database, pdu: &PduEvent) -> Result<()> { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?; - db.rooms.append_pdu( - pdu, - &utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, - )?; + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From 4b2eb5ab82d6fc80aac2ab78a0c02d3add245743 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 11:05:57 -0500 Subject: [PATCH 26/62] Fix ALL clippy warnings --- src/client_server/account.rs | 78 ++++------------------ src/client_server/membership.rs | 40 ++--------- src/client_server/message.rs | 6 +- src/client_server/profile.rs | 12 +--- src/client_server/redact.rs | 6 +- src/client_server/room.rs | 90 +++++-------------------- src/client_server/state.rs | 6 +- src/database/admin.rs | 6 +- src/database/rooms.rs | 113 ++++++++++++++------------------ src/database/sending.rs | 19 +++--- src/error.rs | 8 ++- src/server_server.rs | 4 +- 12 files changed, 106 insertions(+), 282 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 75544b7..6927a53 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -239,11 +239,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Make conduit bot join @@ -264,11 +260,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -302,11 +294,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.1 Join Rules @@ -323,11 +311,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -346,11 +330,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -367,11 +347,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 6. Events implied by name and topic @@ -390,11 +366,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( @@ -410,11 +382,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Room alias @@ -436,11 +404,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; @@ -463,11 +427,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.rooms.build_and_append_pdu( PduBuilder { @@ -486,11 +446,7 @@ pub async fn register_route( }, &user_id, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Send welcome message @@ -515,11 +471,7 @@ pub async fn register_route( }, &conduit_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -691,11 +643,7 @@ pub async fn deactivate_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 40e4183..70bb480 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -124,11 +124,7 @@ pub async fn leave_room_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -164,11 +160,7 @@ pub async fn invite_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -220,11 +212,7 @@ pub async fn kick_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -280,11 +268,7 @@ pub async fn ban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -332,11 +316,7 @@ pub async fn unban_user_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; @@ -685,9 +665,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; if state_events.contains(ev_id) { @@ -717,11 +695,7 @@ async fn join_room_by_id_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c56cc94..c64c390 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -68,11 +68,7 @@ pub async fn send_message_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.transaction_ids.add_txnid( diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..21759a8 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -63,11 +63,7 @@ pub async fn set_displayname_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update @@ -160,11 +156,7 @@ pub async fn set_avatar_url_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Presence update diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..282c35a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -31,11 +31,7 @@ pub async fn redact_event_route( }, &sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..631d87b 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -65,11 +65,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 2. Let the room creator join @@ -90,11 +86,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 3. Power levels @@ -135,11 +127,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4. Events set by preset @@ -175,11 +163,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.2 History Visibility @@ -196,11 +180,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 4.3 Guest Access @@ -225,11 +205,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // 5. Events listed in initial_state @@ -248,11 +224,7 @@ pub async fn create_room_route( pdu_builder, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -273,11 +245,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -295,11 +263,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -322,11 +286,7 @@ pub async fn create_room_route( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -413,11 +373,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Get the old room federations status @@ -457,11 +413,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Join the new room @@ -482,11 +434,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; // Recommended transferable state events list from the specs @@ -519,11 +467,7 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; } @@ -566,11 +510,7 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; db.flush().await?; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index faa415d..ae5e251 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -284,11 +284,7 @@ pub async fn send_state_event_for_key_helper( }, &sender_user, &room_id, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, )?; Ok(event_id) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..501722e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -60,11 +60,7 @@ impl Admin { }, &conduit_user, &conduit_room, - &db.globals, - &db.sending, - &db.admin, - &db.account_data, - &db.appservice, + &db, ) .unwrap(); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 81abd62..d62d4b0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2,7 +2,7 @@ mod edus; pub use edus::RoomEdus; -use crate::{pdu::PduBuilder, utils, Error, PduEvent, Result}; +use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use log::error; use regex::Regex; use ring::digest; @@ -447,9 +447,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, - globals: &super::globals::Globals, - account_data: &super::account_data::AccountData, - admin: &super::admin::Admin, + db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -486,7 +484,7 @@ impl Rooms { // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count, &globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; self.pduid_pdu.insert( &pdu_id, @@ -521,8 +519,8 @@ impl Rooms { ) })?, &pdu.sender, - account_data, - globals, + &db.account_data, + &db.globals, )?; } } @@ -540,10 +538,10 @@ impl Rooms { self.tokenids.insert(key, &[])?; } - if body.starts_with(&format!("@conduit:{}: ", globals.server_name())) + if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", globals.server_name()) + &format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid room alias"), )? @@ -570,10 +568,11 @@ impl Rooms { ); match parsed_config { Ok(yaml) => { - admin.send(AdminCommand::RegisterAppservice(yaml)); + db.admin + .send(AdminCommand::RegisterAppservice(yaml)); } Err(e) => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( format!( "Could not parse appservice config: {}", @@ -584,7 +583,7 @@ impl Rooms { } } } else { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain( "Expected code block in command body.", ), @@ -592,10 +591,10 @@ impl Rooms { } } "list_appservices" => { - admin.send(AdminCommand::ListAppservices); + db.admin.send(AdminCommand::ListAppservices); } _ => { - admin.send(AdminCommand::SendMessage( + db.admin.send(AdminCommand::SendMessage( message::MessageEventContent::text_plain(format!( "Command: {}, Args: {:?}", command, args @@ -696,17 +695,12 @@ impl Rooms { } /// Creates a new persisted data unit and adds it to a room. - #[allow(clippy::too_many_arguments)] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - globals: &super::globals::Globals, - sending: &super::sending::Sending, - admin: &super::admin::Admin, - account_data: &super::account_data::AccountData, - appservice: &super::appservice::Appservice, + db: &Database, ) -> Result { let PduBuilder { event_type, @@ -789,7 +783,7 @@ impl Rooms { if !match event_type { EventType::RoomEncryption => { // Only allow encryption events if it's allowed in the config - globals.allow_encryption() + db.globals.allow_encryption() } EventType::RoomMember => { let prev_event = self @@ -895,13 +889,13 @@ impl Rooms { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(globals.server_name()) + to_canonical_value(db.globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); ruma::signatures::hash_and_sign_event( - globals.server_name().as_str(), - globals.keypair(), + db.globals.server_name().as_str(), + db.globals.keypair(), &mut pdu_json, &RoomVersionId::Version6, ) @@ -922,24 +916,16 @@ impl Rooms { // Increment the last index and use that // This is also the next_batch/since value - let count = globals.next_count()?; + let count = db.globals.next_count()?; let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu_id, &pdu, &globals)?; + let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu( - &pdu, - pdu_json, - count, - pdu_id.clone().into(), - globals, - account_data, - admin, - )?; + self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -948,31 +934,28 @@ impl Rooms { for server in self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != globals.server_name()) + .filter(|server| &**server != db.globals.server_name()) { - sending.send_pdu(&server, &pdu_id)?; + db.sending.send_pdu(&server, &pdu_id)?; } - for appservice in appservice.iter_all().filter_map(|r| r.ok()) { + for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") .and_then(|users| users.as_sequence()) - .map_or_else( - || Vec::new(), - |users| { - users - .iter() - .map(|users| { - users - .get("regex") - .and_then(|regex| regex.as_str()) - .and_then(|regex| Regex::new(regex).ok()) - }) - .filter_map(|o| o) - .collect::>() - }, - ); + .map_or_else(Vec::new, |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }); let aliases = namespaces .get("aliases") .and_then(|users| users.get("regex")) @@ -989,29 +972,31 @@ impl Rooms { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, globals.server_name()).ok() + UserId::parse_with_server_name(string, db.globals.server_name()).ok() }); - if bridge_user_id.map_or(false, |bridge_user_id| { - self.is_joined(&bridge_user_id, room_id).unwrap_or(false) - }) || users.iter().any(|users| { + let user_is_joined = + |bridge_user_id| self.is_joined(&bridge_user_id, room_id).unwrap_or(false); + let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) || pdu.kind == EventType::RoomMember && pdu .state_key .as_ref() .map_or(false, |state_key| users.is_match(&state_key)) - }) || aliases.map_or(false, |aliases| { + }; + let matching_aliases = |aliases: Regex| { room_aliases .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) - }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || self - .room_members(&room_id) - .filter_map(|r| r.ok()) - .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + }; + + if bridge_user_id.map_or(false, user_is_joined) + || users.iter().any(matching_users) + || aliases.map_or(false, matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) { - sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } diff --git a/src/database/sending.rs b/src/database/sending.rs index e6cdc76..101daf3 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -149,6 +149,15 @@ impl Sending { let servernamepduid = key.clone(); let mut parts = servernamepduid.splitn(2, |&b| b == 0xff); + let exponential_backoff = |(tries, instant): &(u32, Instant)| { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60*60*24) { + min_elapsed_duration = Duration::from_secs(60*60*24); + } + + instant.elapsed() < min_elapsed_duration + }; if let Some((server, is_appservice, pdu_id)) = utils::string_from_bytes( parts .next() @@ -173,15 +182,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { - if last_failed_try.get(server).map_or(false, |(tries, instant)| { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; - if min_elapsed_duration > Duration::from_secs(60*60*24) { - min_elapsed_duration = Duration::from_secs(60*60*24); - } - - instant.elapsed() < min_elapsed_duration - }) { + if last_failed_try.get(server).map_or(false, exponential_backoff) { return false; } diff --git a/src/error.rs b/src/error.rs index fed545c..13efce6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,10 +121,12 @@ impl log::Log for ConduitLogger { fn log(&self, record: &log::Record<'_>) { let output = format!("{} - {}", record.level(), record.args()); + let match_mod_path = + |path: &str| path.starts_with("conduit::") || path.starts_with("state"); + if self.enabled(record.metadata()) - && (record.module_path().map_or(false, |path| { - path.starts_with("conduit::") || path.starts_with("state") - }) || record + && (record.module_path().map_or(false, match_mod_path) + || record .module_path() .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying && record.metadata().level() <= log::Level::Warn) diff --git a/src/server_server.rs b/src/server_server.rs index 6907e34..ae59583 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1113,9 +1113,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &db.globals, - &db.account_data, - &db.admin, + &db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time From db0aee3318b39d24ac37915ce49018117c0c03f2 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 15 Jan 2021 15:46:47 -0500 Subject: [PATCH 27/62] Use the auth_events for step 6, WIP forward_extremity_ids fn --- src/server_server.rs | 159 ++++++++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 55 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ae59583..77f0fa8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -569,7 +569,7 @@ pub async fn send_transaction_message_route<'a>( // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. let mut resolved_map = BTreeMap::new(); - for pdu in &body.pdus { + 'main_pdu_loop: for pdu in &body.pdus { // 1. Is a valid event, otherwise it is dropped. // Ruma/PduEvent/StateEvent satisfies this // We do not add the event_id field to the pdu here because of signature and hashes checks @@ -660,7 +660,6 @@ pub async fn send_transaction_message_route<'a>( }; let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB let previous = match fetch_events( &db, @@ -675,6 +674,7 @@ pub async fn send_transaction_message_route<'a>( _ => None, }; + // [auth_cache] At this point we have the auth chain of the incoming event. let mut event_map: state_res::EventMap> = auth_cache .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -688,7 +688,7 @@ pub async fn send_transaction_message_route<'a>( &pdu.auth_events .iter() .map(|id| { - event_map + auth_cache .get(id) .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) .ok_or_else(|| { @@ -790,7 +790,15 @@ pub async fn send_transaction_message_route<'a>( // End of step 5. // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids(&db, &pdu) { + let fork_states = match forward_extremity_ids( + &db, + &pdu, + server_name, + &pub_key_map, + &mut auth_cache, + ) + .await + { Ok(states) => states, Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); @@ -805,47 +813,44 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // TODO: remove this is for current debugging Jan, 15 2021 + let mut number_fetches = 0_u32; let mut auth_events = vec![]; // this keeps track if we error so we can break out of these inner loops // to continue on with the incoming PDU's - let mut failed = false; for map in &fork_states { let mut state_auth = vec![]; - for pdu in map.values() { - let event = match auth_cache.get(pdu.event_id()) { + for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { + let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), // We should know about every event at this point but just incase... None => match fetch_events( &db, server_name, &pub_key_map, - &[pdu.event_id().clone()], + &[auth_id.clone()], &mut auth_cache, ) .await - .map(|mut vec| vec.remove(0)) - { - Ok(aev) => aev.clone(), + .map(|mut vec| { + number_fetches += 1; + vec.remove(0) + }) { + Ok(aev) => aev, Err(_) => { resolved_map.insert( event_id.clone(), Err("Event has been soft failed".into()), ); - failed = true; - break; + continue 'main_pdu_loop; } }, }; state_auth.push(event); } - if failed { - break; - } auth_events.push(state_auth); } - if failed { - continue; - } + info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map event_map.extend( @@ -886,7 +891,13 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) .collect(), - Err(e) => panic!("{:?}", e), + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("State resolution failed, either an event could not be found or deserialization".into()), + ); + continue 'main_pdu_loop; + } } }; @@ -914,6 +925,7 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). @@ -955,6 +967,37 @@ fn validate_event<'a>( }) } +/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events +/// The check in `fetch_check_auth_events` is that a complete chain is found for the +/// events `auth_events`. If the chain is found to have any missing events it fails. +/// +/// The `auth_cache` is filled instead of returning a `Vec`. +async fn fetch_check_auth_events( + db: &Database, + origin: &ServerName, + key_map: &PublicKeyMap, + event_ids: &[EventId], + auth_cache: &mut EventMap>, +) -> Result<()> { + let mut stack = event_ids.to_vec(); + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if auth_cache.contains_key(&ev_id) { + continue; + } + + let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) + .await + .map(|mut vec| vec.remove(0))?; + + stack.extend(ev.auth_events()); + auth_cache.insert(ev.event_id().clone(), ev); + } + Ok(()) +} + /// Find the event and auth it. /// /// 1. Look in the main timeline (pduid_pdu tree) @@ -1000,36 +1043,6 @@ async fn fetch_events( Ok(pdus) } -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. -async fn fetch_check_auth_events( - db: &Database, - origin: &ServerName, - key_map: &PublicKeyMap, - event_ids: &[EventId], - auth_cache: &mut EventMap>, -) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| vec.remove(0))?; - - stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); - } - Ok(()) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. async fn fetch_signing_keys( @@ -1049,6 +1062,7 @@ async fn fetch_signing_keys( } } } + fn signature_and_hash_check( pub_key_map: &ruma::signatures::PublicKeyMap, value: CanonicalJsonObject, @@ -1073,9 +1087,23 @@ fn signature_and_hash_check( ) } -fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result>>> { +async fn forward_extremity_ids( + db: &Database, + pdu: &PduEvent, + origin: &ServerName, + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + + for incoming_leaf in &pdu.prev_events { + if !current_leaves.contains(incoming_leaf) { + current_leaves.push(incoming_leaf.clone()); + } + } + let mut fork_states = vec![]; - for id in &db.rooms.get_pdu_leaves(pdu.room_id())? { + for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1090,11 +1118,32 @@ fn forward_extremity_ids(db: &Database, pdu: &PduEvent) -> Result Date: Sat, 16 Jan 2021 16:37:20 -0500 Subject: [PATCH 28/62] Fixing the incoming events algorithm (review with time) --- src/database/rooms.rs | 2 +- src/server_server.rs | 237 ++++++++++++++++++++++++------------------ 2 files changed, 138 insertions(+), 101 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d62d4b0..325a2e2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -150,7 +150,7 @@ impl Rooms { } } - /// Returns the last state hash key added to the db. + /// Returns the state hash for this pdu. pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } diff --git a/src/server_server.rs b/src/server_server.rs index 77f0fa8..0eb7d6f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,6 +5,7 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ + client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -590,6 +591,8 @@ pub async fn send_transaction_message_route<'a>( continue; } }; + + // 1. check the server is in the room (optional) if !db.rooms.exists(&room_id)? { resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); continue; @@ -634,14 +637,13 @@ pub async fn send_transaction_message_route<'a>( // the auth events that it references. let mut auth_cache = EventMap::new(); - // 1. check the server is in the room (optional) // 2. check content hash, redact if doesn't match // 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events - // 6. persist this event as an outlier // 7. if not timeline event: stop - let pdu = match validate_event( + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (pdu, previous) = match validate_event( &db, value, event_id.clone(), @@ -659,59 +661,16 @@ pub async fn send_transaction_message_route<'a>( } }; - let pdu = Arc::new(pdu.clone()); - // Fetch any unknown prev_events or retrieve them from the DB - let previous = match fetch_events( - &db, - server_name, - &pub_key_map, - &pdu.prev_events, - &mut auth_cache, - ) - .await - { - Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)), - _ => None, + let single_prev = if previous.len() == 1 { + previous.first().cloned() + } else { + None }; - // [auth_cache] At this point we have the auth chain of the incoming event. - let mut event_map: state_res::EventMap> = auth_cache - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(); + // 6. persist the event as an outlier. + db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; - // Check that the event passes auth based on the auth_events - let is_authed = state_res::event_auth::auth_check( - &RoomVersionId::Version6, - &pdu, - previous.clone(), - &pdu.auth_events - .iter() - .map(|id| { - auth_cache - .get(id) - .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) - .ok_or_else(|| { - Error::Conflict( - "Auth event not found, event failed recursive auth checks.", - ) - }) - }) - .collect::>>()?, - None, // TODO: third party invite - ) - .map_err(|_e| Error::Conflict("Auth check failed"))?; - - if !is_authed { - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has failed auth check with auth events".into()), - ); - continue; - } - // End of step 4. - - // Step 5. event passes auth based on state at the event + // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -757,9 +716,7 @@ pub async fn send_transaction_message_route<'a>( &res.auth_chain_ids, &mut auth_cache, ) - .await? - .into_iter() - .collect(), + .await?, ) } Err(_) => { @@ -771,10 +728,11 @@ pub async fn send_transaction_message_route<'a>( } }; + // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous.clone(), + single_prev.clone(), &state_at_event, None, // TODO: third party invite ) @@ -787,10 +745,34 @@ pub async fn send_transaction_message_route<'a>( ); continue; } - // End of step 5. + // End of step 10. + + // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it + let current_state = db + .rooms + .room_state_full(pdu.room_id())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(); + + if !state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + single_prev.clone(), + ¤t_state, + None, + ) + .map_err(|_e| Error::Conflict("Auth check failed"))? + { + // Soft fail, we add the event as an outlier. + resolved_map.insert( + pdu.event_id().clone(), + Err("Event has been soft failed".into()), + ); + }; // Gather the forward extremities and resolve - let fork_states = match forward_extremity_ids( + let fork_states = match forward_extremities( &db, &pdu, server_name, @@ -806,7 +788,9 @@ pub async fn send_transaction_message_route<'a>( } }; - // Step 6. event passes auth based on state of all forks and current room state + // 13. start state-res with all previous forward extremities minus the ones that are in + // the prev_events of this event plus the new one created by this event and use + // the result as the new room state let state_at_forks = if fork_states.is_empty() { // State is empty Default::default() @@ -852,6 +836,7 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); + let mut event_map = EventMap::new(); // Add everything we will need to event_map event_map.extend( auth_events @@ -904,7 +889,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - previous, + single_prev, &state_at_forks, None, ) @@ -925,14 +910,19 @@ pub async fn send_transaction_message_route<'a>( Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) } +/// An async function that can recursively calls itself. +type AsyncRecursiveResult<'a, T> = Pin> + 'a + Send>>; + /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// Validate any event that is given to us by another server. /// /// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this). -/// 2. Passes signature checks, otherwise event is dropped. -/// 3. Passes hash checks, otherwise it is redacted before being processed further. -/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively). -/// 5. Once the event has passed all checks it can be added as an outlier to the DB. +/// 2. check content hash, redact if doesn't match +/// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events +/// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" +/// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. if not timeline event: stop +/// 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events fn validate_event<'a>( db: &'a Database, value: CanonicalJsonObject, @@ -940,9 +930,24 @@ fn validate_event<'a>( pub_key_map: &'a PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> Pin> + 'a + Send>> { +) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { Box::pin(async move { - let mut val = signature_and_hash_check(&pub_key_map, value)?; + let mut val = + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + Ok(ver) => { + if let ruma::signatures::Verified::Signatures = ver { + match ruma::signatures::redact(&value, &RoomVersionId::Version6) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_string()), + } + } else { + value + } + } + Err(_e) => { + return Err("Signature verification failed".to_string()); + } + }; // Now that we have checked the signature and hashes we can add the eventID and convert // to our PduEvent type also finally verifying the first step listed above @@ -959,11 +964,42 @@ fn validate_event<'a>( .await .map_err(|_| "Event failed auth chain check".to_string())?; - db.rooms - .append_pdu_outlier(pdu.event_id(), &pdu) + let pdu = Arc::new(pdu.clone()); + + // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + .await .map_err(|e| e.to_string())?; - Ok(pdu) + // Check that the event passes auth based on the auth_events + let is_authed = state_res::event_auth::auth_check( + &RoomVersionId::Version6, + &pdu, + if previous.len() == 1 { + previous.first().cloned() + } else { + None + }, + &pdu.auth_events + .iter() + .map(|id| { + auth_cache + .get(id) + .map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone())) + .ok_or_else(|| { + "Auth event not found, event failed recursive auth checks.".to_string() + }) + }) + .collect::, _>>()?, + None, // TODO: third party invite + ) + .map_err(|_e| "Auth check failed".to_string())?; + + if !is_authed { + return Err("Event has failed auth check with auth events".to_string()); + } + + Ok((pdu, previous)) }) } @@ -990,7 +1026,10 @@ async fn fetch_check_auth_events( let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await - .map(|mut vec| vec.remove(0))?; + .map(|mut vec| { + vec.pop() + .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) + })??; stack.extend(ev.auth_events()); auth_cache.insert(ev.event_id().clone(), ev); @@ -1028,11 +1067,12 @@ async fn fetch_events( { Ok(res) => { let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - Arc::new(pdu) + pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, @@ -1063,31 +1103,11 @@ async fn fetch_signing_keys( } } -fn signature_and_hash_check( - pub_key_map: &ruma::signatures::PublicKeyMap, - value: CanonicalJsonObject, -) -> std::result::Result { - Ok( - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { - Ok(ver) => { - if let ruma::signatures::Verified::Signatures = ver { - error!("CONTENT HASH FAILED"); - match ruma::signatures::redact(&value, &RoomVersionId::Version6) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_string()), - } - } else { - value - } - } - Err(_e) => { - return Err("Signature verification failed".to_string()); - } - }, - ) -} - -async fn forward_extremity_ids( +/// Gather all state snapshots needed to resolve the current state of the room. +/// +/// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) +async fn forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, @@ -1102,6 +1122,8 @@ async fn forward_extremity_ids( } } + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; let mut fork_states = vec![]; for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { @@ -1109,6 +1131,10 @@ async fn forward_extremity_ids( .rooms .pdu_state_hash(&id)? .expect("found pdu with no statehash"); + + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } let state = db .rooms .state_full(&pdu.room_id, &state_hash)? @@ -1144,6 +1170,17 @@ async fn forward_extremity_ids( } } + // This guarantees that our current room state is included + if !includes_current_state && current_hash.is_some() { + fork_states.push( + db.rooms + .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect(), + ) + } + Ok(fork_states) } From 7309b2fba99b5c16704829a2729aad20f035ddc8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:08:59 -0500 Subject: [PATCH 29/62] WIP gather and update forward extremities --- src/database/rooms.rs | 18 ++++++ src/pdu.rs | 21 ++++++- src/server_server.rs | 138 ++++++++++++++++++++++++++++-------------- 3 files changed, 132 insertions(+), 45 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 325a2e2..665e328 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,6 +397,24 @@ impl Rooms { Ok(events) } + /// Force an update to the leaves of a room. + pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { + self.roomid_pduleaves.remove(key?)?; + } + + for event_id in event_ids.iter() { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + /// Replace the leaves of a room with a new event. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/pdu.rs b/src/pdu.rs index 340ddee..e38410f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -9,7 +9,7 @@ use ruma::{ }; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::{collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, time::UNIX_EPOCH}; #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { @@ -284,6 +284,25 @@ impl state_res::Event for PduEvent { } } +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { + self.event_id == other.event_id + } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { + self.event_id.partial_cmp(&other.event_id) + } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { + self.event_id.cmp(&other.event_id) + } +} + /// Generates a correct eventId for the incoming pdu. /// /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. diff --git a/src/server_server.rs b/src/server_server.rs index 0eb7d6f..16a1a8e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -5,7 +5,6 @@ use log::{error, info, warn}; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ - client::r0::state, federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ @@ -25,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -600,31 +599,21 @@ pub async fn send_transaction_message_route<'a>( let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(sig) = value.get("signatures") { - match sig { - CanonicalJsonValue::Object(entity) => { - for key in entity.keys() { - // TODO: save this in a DB maybe... - // fetch the public signing key - let origin = <&ServerName>::try_from(key.as_str()).unwrap(); - let keys = fetch_signing_keys(&db, origin).await?; - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } - } - _ => { - resolved_map.insert( - event_id, - Err("`signatures` is not a JSON object".to_string()), - ); - continue; - } - } + if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { + let sender = + UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); + let origin = sender.server_name(); + + // TODO: this could fail or the server not respond... + let keys = fetch_signing_keys(&db, origin).await?; + + pub_key_map.insert( + origin.to_string(), + keys.into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(), + ); } else { resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); continue; @@ -642,8 +631,9 @@ pub async fn send_transaction_message_route<'a>( // 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. if not timeline event: stop - // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (pdu, previous) = match validate_event( + // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // the events found in step 8 can be authed/resolved and appended to the DB + let (pdu, previous): (_, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -670,6 +660,9 @@ pub async fn send_transaction_message_route<'a>( // 6. persist the event as an outlier. db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all + // the checks in this list starting at 1. These are not timeline events. + // // Step 10. check the auth of the event passes based on the calculated state of the event let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db @@ -771,8 +764,12 @@ pub async fn send_transaction_message_route<'a>( ); }; - // Gather the forward extremities and resolve - let fork_states = match forward_extremities( + // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res + // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) + // + // calculate_forward_extremities takes care of adding the current state if not already in the state sets + // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. + let (mut fork_states, fork_ids) = match calculate_forward_extremities( &db, &pdu, server_name, @@ -788,6 +785,12 @@ pub async fn send_transaction_message_route<'a>( } }; + // add the incoming events to the mix of state snapshots + // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets + fork_states.insert(state_at_event.clone()); + + let fork_states = fork_states.into_iter().collect::>(); + // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -901,7 +904,9 @@ pub async fn send_transaction_message_route<'a>( Err("Event has been soft failed".into()), ); } else { - append_state(&db, &pdu)?; + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_state(&db, &pdu, &fork_ids)?; + // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } @@ -1106,25 +1111,52 @@ async fn fetch_signing_keys( /// Gather all state snapshots needed to resolve the current state of the room. /// /// Step 11. ensure that the state is derived from the previous current state (i.e. we calculated by doing state res -/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) -async fn forward_extremities( +/// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). +/// +/// The state snapshot of the incoming event __needs__ to be added to the resulting list. +async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, -) -> Result>>> { +) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; + let mut is_incoming_leaf = true; + // Make sure the incoming event is not already a forward extremity + // FIXME: I think this could happen if different servers send us the same event?? + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + + // If the incoming event is already referenced by an existing event + // then do nothing - it's not a candidate to be a new extremity if + // it has been referenced. + if already_referenced(db, pdu)? { + is_incoming_leaf = false; + // This event has been dealt with already?? + } + + // TODO: + // [dendrite] Checks if any other leaves have been referenced and removes them + // but as long as we update the pdu leaves here and for events on our server this + // should not be possible. + + // Remove any forward extremities that are referenced by this incoming events prev_events for incoming_leaf in &pdu.prev_events { - if !current_leaves.contains(incoming_leaf) { - current_leaves.push(incoming_leaf.clone()); + if current_leaves.contains(incoming_leaf) { + if let Some(pos) = current_leaves.iter().position(|x| *x == *incoming_leaf) { + current_leaves.remove(pos); + } } } let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + let mut includes_current_state = false; - let mut fork_states = vec![]; + let mut fork_states = BTreeSet::new(); for id in ¤t_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db @@ -1142,8 +1174,10 @@ async fn forward_extremities( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); - fork_states.push(state); + fork_states.insert(state); } else { + error!("Forward extremity not found... {}", id); + let res = db .sending .send_federation_request( @@ -1166,25 +1200,37 @@ async fn forward_extremities( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - fork_states.push(state); + fork_states.insert(state); } } + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.push( + fork_states.insert( db.rooms .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(), - ) + ); } - Ok(fork_states) + Ok((fork_states, dbg!(current_leaves))) } -fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { +/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) +fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { + Ok(false) +} + +fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1195,13 +1241,17 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> { let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, + pdu, utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), &db, )?; + // If we update the room leaves after calling append_pdu it will stick since append_pdu + // calls replace_pdu_leaves with only the given event. + db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; + // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From aa7b6b6e09ee2ea4af0fb5b6a5b7e0fd51ddd11b Mon Sep 17 00:00:00 2001 From: Valkum Date: Mon, 22 Feb 2021 19:06:10 +0100 Subject: [PATCH 30/62] Sync paths with CI pipeline due to dockerignore# As the docker ignore file includes the target dir, content in this dir is no accessible to the docker daemon. We circumvent this by providing the build artifact in a dir called cached_dir --- tests/Complement.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 24ee9ea..370db7c 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -9,7 +9,7 @@ ARG SCCACHE_ENDPOINT ARG SCCACHE_S3_USE_SSL COPY . . -RUN test -e target/release/conduit || cargo build --release --offline +RUN test -e cached_target/release/conduit || cargo build --release FROM valkum/docker-rust-ci:latest WORKDIR /workdir From 66af1ff6958d2096d549b98d2c830a09652d2f33 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:36:44 -0500 Subject: [PATCH 31/62] Update ruma and rocket to latest git rev and tokio to 1.0 Ruma updated the event signing validation code and there was a dep resolving failure with serde rocket and tokio so I updated rocket latest and tokio 1.0 to fix. --- Cargo.lock | 453 ++++++++++++++++++++++++-------------- Cargo.toml | 6 +- src/client_server/sync.rs | 3 +- 3 files changed, 294 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a79dbe..0561d0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -148,15 +148,15 @@ checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "5a4bad0c5981acc24bc09e532f35160f952e35422603f0563cd7a73c2c2e65a0" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -164,6 +164,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + [[package]] name = "cc" version = "1.0.66" @@ -191,7 +197,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -225,7 +231,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.0.2", "trust-dns-resolver", ] @@ -247,7 +253,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time 0.2.24", "version_check", ] @@ -402,9 +408,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "a3add2ec7727c9584a0ce75ee3c0f54f0ab692c7934450cc3a0287251e3a4f06" dependencies = [ "pear", "serde", @@ -472,9 +478,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" dependencies = [ "futures-channel", "futures-core", @@ -487,9 +493,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" dependencies = [ "futures-core", "futures-sink", @@ -497,15 +503,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" dependencies = [ "futures-core", "futures-task", @@ -514,15 +520,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -532,24 +538,24 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" dependencies = [ "once_cell", ] [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ "futures-channel", "futures-core", @@ -558,7 +564,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.3", + "pin-project-lite 0.2.4", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -593,7 +599,7 @@ checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.1+wasi-snapshot-preview1", ] [[package]] @@ -624,7 +630,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -632,8 +638,28 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.24", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.0.2", + "tokio-util 0.6.1", "tracing", "tracing-futures", ] @@ -655,9 +681,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -675,11 +701,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -690,7 +716,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] @@ -712,19 +748,43 @@ version = "0.13.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.3", + "pin-project 1.0.4", "socket2", - "tokio", + "tokio 0.2.24", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.4", + "socket2", + "tokio 1.0.2", "tower-service", "tracing", "want", @@ -736,10 +796,10 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.9", "native-tls", - "tokio", + "tokio 0.2.24", "tokio-tls", ] @@ -897,15 +957,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -918,9 +978,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" dependencies = [ "cfg-if 0.1.10", ] @@ -1015,21 +1075,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1044,6 +1106,16 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + [[package]] name = "native-tls" version = "0.2.7" @@ -1073,6 +1145,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1210,7 +1291,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "smallvec", "winapi 0.3.9", ] @@ -1272,11 +1353,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a83804639aad6ba65345661744708855f9fbcb71176ea8d28d05aeb11d975e7" +checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" dependencies = [ - "pin-project-internal 1.0.3", + "pin-project-internal 1.0.4", ] [[package]] @@ -1292,9 +1373,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bcc46b8f73443d15bc1c5fecbb315718491fa9187fa483f0e359323cde8b3a" +checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" dependencies = [ "proc-macro2", "quote", @@ -1309,9 +1390,9 @@ checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36743d754ccdf9954c2e352ce2d4b106e024c814f6499c2dadff80da9a442d8" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -1360,9 +1441,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1416,9 +1497,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24fcd450d3fa2b592732565aa4f17a27a61c65ece4726353e000939b0edee34" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1488,6 +1569,15 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" @@ -1495,7 +1585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ "getrandom 0.1.16", - "redox_syscall", + "redox_syscall 0.1.57", "rust-argon2", ] @@ -1521,9 +1611,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1533,9 +1623,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1553,13 +1643,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.9", "hyper-tls", "ipnet", "js-sys", @@ -1569,10 +1659,10 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.24", "tokio-tls", "url", "wasm-bindgen", @@ -1609,7 +1699,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "async-trait", "atomic", @@ -1628,8 +1718,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "ubyte", "version_check", "yansi", @@ -1638,7 +1728,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "devise", "glob", @@ -1650,23 +1740,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=c24f15c18f02319be83af4f3c1951dc220b52c5e#c24f15c18f02319be83af4f3c1951dc220b52c5e" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.2", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.4", "ref-cast", "smallvec", "state", - "time 0.2.23", - "tokio", + "time 0.2.24", + "tokio 1.0.2", "tokio-rustls", "uncased", "unicode-xid", @@ -1675,8 +1766,8 @@ dependencies = [ [[package]] name = "ruma" -version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.0.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "js_int", @@ -1693,8 +1784,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "http", "percent-encoding", @@ -1708,8 +1799,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.17.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.17.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1719,8 +1810,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.2.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.2.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "ruma-api", "ruma-common", @@ -1733,8 +1824,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.10.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.10.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "assign", "http", @@ -1752,8 +1843,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "maplit", @@ -1765,8 +1856,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-common", @@ -1779,8 +1870,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.22.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.22.0-alpha.2" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1790,8 +1881,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.0.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.1.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "js_int", "ruma-api", @@ -1805,11 +1896,11 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "paste", - "rand 0.8.1", + "rand 0.8.2", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1819,8 +1910,8 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.18.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro2", "quote", @@ -1830,16 +1921,13 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.1.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" -dependencies = [ - "serde", -] +version = "0.2.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" [[package]] name = "ruma-serde" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "form_urlencoded", "itoa", @@ -1851,8 +1939,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.2.3" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.3.0" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1862,8 +1950,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=210b6dd823ba89c5a44c3c9d913d377c4b54c896#210b6dd823ba89c5a44c3c9d913d377c4b54c896" +version = "0.6.0-alpha.1" +source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" dependencies = [ "base64 0.13.0", "ring", @@ -1902,11 +1990,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log", "ring", "sct", @@ -1985,18 +2073,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" dependencies = [ "proc-macro2", "quote", @@ -2088,9 +2176,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" @@ -2127,7 +2215,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#d31c88408e7f69f5b0f18141efeaefff6b83637f" +source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" dependencies = [ "itertools", "maplit", @@ -2200,14 +2288,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.2", + "redox_syscall 0.2.4", "remove_dir_all", "winapi 0.3.9", ] @@ -2234,29 +2322,28 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" dependencies = [ "lazy_static", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" dependencies = [ "const_fn", "libc", @@ -2311,28 +2398,41 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.11", + "slab", +] + +[[package]] +name = "tokio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.7", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.4", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" dependencies = [ "proc-macro2", "quote", @@ -2341,14 +2441,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.0.2", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "rustls", - "tokio", - "webpki", + "pin-project-lite 0.2.4", + "tokio 1.0.2", ] [[package]] @@ -2358,7 +2468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.24", ] [[package]] @@ -2367,12 +2477,27 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio", + "tokio 0.2.24", +] + +[[package]] +name = "tokio-util" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.0.2", + "tokio-stream", ] [[package]] @@ -2398,7 +2523,7 @@ checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.1", + "pin-project-lite 0.2.4", "tracing-attributes", "tracing-core", ] @@ -2449,7 +2574,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "url", ] @@ -2469,7 +2594,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.24", "trust-dns-proto", ] @@ -2584,9 +2709,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" diff --git a/Cargo.toml b/Cargo.toml index fdcc4ec..dd37838 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,11 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18f02319be83af4f3c1951dc220b52c5e", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "210b6dd823ba89c5a44c3c9d913d377c4b54c896" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } @@ -29,7 +29,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", featu # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.0.2", features = ["macros", "time"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 494c773..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -698,7 +698,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} From 88c60605b457d0163b0b8d427e51cd07b0dd1f4c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 18 Jan 2021 19:41:38 -0500 Subject: [PATCH 32/62] Add ability to update room leaves with multiple eventIds Tokio seems a bit broken with Rocket... --- src/client_server/membership.rs | 2 ++ src/database/rooms.rs | 37 ++++++++++++++++----------------- src/server_server.rs | 7 ++----- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70bb480..1159185 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -665,6 +665,8 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + // TODO: can we simplify the DAG or should we copy it exactly?? + &pdu.prev_events, &db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 665e328..a3f3aab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -397,8 +397,11 @@ impl Rooms { Ok(events) } - /// Force an update to the leaves of a room. - pub fn force_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// `prev_events`. + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -415,21 +418,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room with a new event. - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_id: &EventId) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for key in self.roomid_pduleaves.scan_prefix(&prefix).keys() { - self.roomid_pduleaves.remove(key?)?; - } - - prefix.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&prefix, event_id.as_bytes())?; - - Ok(()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -465,6 +453,7 @@ impl Rooms { mut pdu_json: CanonicalJsonObject, count: u64, pdu_id: IVec, + leaves: &[EventId], db: &Database, ) -> Result<()> { // Make unsigned fields correct. This is not properly documented in the spec, but state @@ -497,7 +486,7 @@ impl Rooms { // We no longer keep this pdu as an outlier self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; - self.replace_pdu_leaves(&pdu.room_id, &pdu.event_id)?; + self.replace_pdu_leaves(&pdu.room_id, leaves)?; // Mark as read first so the sending client doesn't get a notification even if appending // fails @@ -943,7 +932,17 @@ impl Rooms { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = self.append_to_state(&pdu_id, &pdu, &db.globals)?; - self.append_pdu(&pdu, pdu_json, count, pdu_id.clone().into(), db)?; + // remove the + self.append_pdu( + &pdu, + pdu_json, + count, + pdu_id.clone().into(), + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + &[pdu.event_id.clone()], + db, + )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist diff --git a/src/server_server.rs b/src/server_server.rs index 16a1a8e..f782ad5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -24,7 +24,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, fmt::Debug, future::Future, @@ -1245,13 +1245,10 @@ fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> R utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), + &new_room_leaves, &db, )?; - // If we update the room leaves after calling append_pdu it will stick since append_pdu - // calls replace_pdu_leaves with only the given event. - db.rooms.force_pdu_leaves(pdu.room_id(), new_room_leaves)?; - // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist db.rooms.set_room_state(&pdu.room_id, &statehashid)?; From 602edfd8499726e21b51eaa4a4a8927381c876c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 24 Jan 2021 16:05:52 +0100 Subject: [PATCH 33/62] feature: push rule settings --- src/client_server/room.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 631d87b..4adc335 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -220,12 +220,8 @@ pub async fn create_room_route( continue; } - db.rooms.build_and_append_pdu( - pdu_builder, - &sender_user, - &room_id, - &db, - )?; + db.rooms + .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; } // 6. Events implied by name and topic From 05a4c0b325f1b8f1c7d3d5dbb56ee22b6e8af858 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 24 Jan 2021 20:18:40 -0500 Subject: [PATCH 34/62] Finish forward extremity gathering, use resolved state as new snapshot --- src/server_server.rs | 147 +++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 67 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index f782ad5..e733d24 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,6 +18,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -483,34 +484,6 @@ pub async fn get_public_rooms_route( .into()) } -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] -pub enum PrevEvents { - Sequential(T), - Fork(Vec), -} - -impl IntoIterator for PrevEvents { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Sequential(item) => vec![item].into_iter(), - Self::Fork(list) => list.into_iter(), - } - } -} - -impl PrevEvents { - pub fn new(id: &[T]) -> Self { - match id { - [] => panic!("All events must have previous event"), - [single_id] => Self::Sequential(single_id.clone()), - rest => Self::Fork(rest.to_vec()), - } - } -} - #[cfg_attr( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") @@ -605,8 +578,16 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - // TODO: this could fail or the server not respond... - let keys = fetch_signing_keys(&db, origin).await?; + let keys = match fetch_signing_keys(&db, origin).await { + Ok(keys) => keys, + Err(_) => { + resolved_map.insert( + event_id, + Err("Could not find signing keys for this server".to_string()), + ); + continue; + } + }; pub_key_map.insert( origin.to_string(), @@ -769,11 +750,12 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, fork_ids) = match calculate_forward_extremities( + let (mut fork_states, extremities) = match calculate_forward_extremities( &db, &pdu, server_name, &pub_key_map, + current_state, &mut auth_cache, ) .await @@ -791,6 +773,7 @@ pub async fn send_transaction_message_route<'a>( let fork_states = fork_states.into_iter().collect::>(); + let mut update_state = false; // 13. start state-res with all previous forward extremities minus the ones that are in // the prev_events of this event plus the new one created by this event and use // the result as the new room state @@ -800,11 +783,12 @@ pub async fn send_transaction_message_route<'a>( } else if fork_states.len() == 1 { fork_states[0].clone() } else { + // We do need to force an update to this rooms state + update_state = true; + // TODO: remove this is for current debugging Jan, 15 2021 let mut number_fetches = 0_u32; let mut auth_events = vec![]; - // this keeps track if we error so we can break out of these inner loops - // to continue on with the incoming PDU's for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { @@ -821,14 +805,12 @@ pub async fn send_transaction_message_route<'a>( .await .map(|mut vec| { number_fetches += 1; - vec.remove(0) + vec.pop() }) { - Ok(aev) => aev, - Err(_) => { - resolved_map.insert( - event_id.clone(), - Err("Event has been soft failed".into()), - ); + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); continue 'main_pdu_loop; } }, @@ -839,20 +821,19 @@ pub async fn send_transaction_message_route<'a>( } info!("{} event's were not in the auth_cache", number_fetches); - let mut event_map = EventMap::new(); // Add everything we will need to event_map - event_map.extend( + auth_cache.extend( auth_events .iter() .map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone()))) .flatten(), ); - event_map.extend( + auth_cache.extend( incoming_auth_events .into_iter() .map(|pdu| (pdu.event_id().clone(), pdu)), ); - event_map.extend( + auth_cache.extend( state_at_event .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), @@ -873,7 +854,7 @@ pub async fn send_transaction_message_route<'a>( .into_iter() .map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect()) .collect(), - &mut event_map, + &mut auth_cache, ) { Ok(res) => res .into_iter() @@ -905,14 +886,23 @@ pub async fn send_transaction_message_route<'a>( ); } else { // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_state(&db, &pdu, &fork_ids)?; + append_incoming_pdu( + &db, + &pdu, + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; // Event has passed all auth/stateres checks resolved_map.insert(pdu.event_id().clone(), Ok(())); } } - Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1029,6 +1019,7 @@ async fn fetch_check_auth_events( continue; } + // TODO: Batch these async calls so we can wait on multiple at once let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) .await .map(|mut vec| { @@ -1119,6 +1110,7 @@ async fn calculate_forward_extremities( pdu: &PduEvent, origin: &ServerName, pub_key_map: &PublicKeyMap, + current_state: BTreeMap<(EventType, Option), Arc>, auth_cache: &mut EventMap>, ) -> Result<(BTreeSet>>, Vec)> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; @@ -1126,17 +1118,13 @@ async fn calculate_forward_extremities( let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - if current_leaves.contains(pdu.event_id()) { - is_incoming_leaf = false; - // Not sure what to do here - } - + // // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if already_referenced(db, pdu)? { + if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { is_incoming_leaf = false; - // This event has been dealt with already?? + // Not sure what to do here } // TODO: @@ -1213,29 +1201,54 @@ async fn calculate_forward_extremities( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { - fork_states.insert( - db.rooms - .state_full(pdu.room_id(), current_hash.as_ref().unwrap())? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(), - ); + fork_states.insert(current_state); } Ok((fork_states, dbg!(current_leaves))) } -/// TODO: we need to know if the event is a prev_event (is this event already referenced in the DAG) -fn already_referenced(_db: &Database, _pdu: &PduEvent) -> Result { - Ok(false) -} - -fn append_state(db: &Database, pdu: &PduEvent, new_room_leaves: &[EventId]) -> Result<()> { +/// Update the room state to be the resolved state and add the fully auth'ed event +/// to the DB. +/// +/// TODO: If we force the state we need to validate all events in that state +/// any events we fetched from another server need to be fully verified? +fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: Option>>, +) -> Result<()> { let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let new = state + .into_iter() + .map(|((ev, k), pdu)| { + Ok(( + ( + ev, + k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + db.rooms + .get_pdu_id(pdu.event_id()) + .ok() + .flatten() + .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? + .to_vec(), + )) + }) + .collect::>()?; + + info!("Force update of state for {:?}", pdu); + + db.rooms.force_state(pdu.room_id(), new, &db.globals)?; + } + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 68f60933e6af03889440a5f9c58d10ce67359f21 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:33:41 -0500 Subject: [PATCH 35/62] Resolved state is set as the current room state on incoming events --- src/server_server.rs | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e733d24..14a1d0c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -870,36 +870,20 @@ pub async fn send_transaction_message_route<'a>( } }; - if !state_res::event_auth::auth_check( - &RoomVersionId::Version6, + // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + append_incoming_pdu( + &db, &pdu, - single_prev, - &state_at_forks, - None, - ) - .map_err(|_e| Error::Conflict("Auth check failed"))? - { - // Soft fail, we add the event as an outlier. - resolved_map.insert( - pdu.event_id().clone(), - Err("Event has been soft failed".into()), - ); - } else { - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( - &db, - &pdu, - &extremities, - if update_state { - Some(state_at_forks) - } else { - None - }, - )?; + &extremities, + if update_state { + Some(state_at_forks) + } else { + None + }, + )?; - // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); - } + // Event has passed all auth/stateres checks + resolved_map.insert(pdu.event_id().clone(), Ok(())); } Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) @@ -1210,8 +1194,7 @@ async fn calculate_forward_extremities( /// Update the room state to be the resolved state and add the fully auth'ed event /// to the DB. /// -/// TODO: If we force the state we need to validate all events in that state -/// any events we fetched from another server need to be fully verified? +/// TODO: Since all these events passed state resolution can we trust them to add fn append_incoming_pdu( db: &Database, pdu: &PduEvent, From e0453e2348b5a77ea9ac0b5e40296a303027875c Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 28 Jan 2021 15:50:45 -0500 Subject: [PATCH 36/62] Cleanup dbg prints and error messages --- src/server_server.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 14a1d0c..20d76f1 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -496,7 +496,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - dbg!(&*body); + // dbg!(&*body); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -1148,8 +1148,6 @@ async fn calculate_forward_extremities( fork_states.insert(state); } else { - error!("Forward extremity not found... {}", id); - let res = db .sending .send_federation_request( @@ -1188,7 +1186,7 @@ async fn calculate_forward_extremities( fork_states.insert(current_state); } - Ok((fork_states, dbg!(current_leaves))) + Ok((fork_states, current_leaves)) } /// Update the room state to be the resolved state and add the fully auth'ed event From 6fd3e1d1ddb2d9707f1713e962ee350a85e07795 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 11:20:33 -0500 Subject: [PATCH 37/62] Append state event that pass resolution to DB, update to tokio 1.1 --- Cargo.lock | 630 +++++++++++++------------------------- Cargo.toml | 12 +- src/client_server/sync.rs | 2 +- src/database.rs | 3 +- src/database/globals.rs | 14 +- src/server_server.rs | 113 +++++-- 6 files changed, 298 insertions(+), 476 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0561d0a..c7381be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - [[package]] name = "adler32" version = "1.2.0" @@ -48,6 +33,27 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-stream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.42" @@ -76,7 +82,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,32 +91,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.3", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -142,9 +128,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" [[package]] name = "bytemuck" @@ -158,12 +144,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -176,12 +156,6 @@ version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -211,7 +185,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64 0.13.0", + "base64", "directories", "http", "image", @@ -279,7 +253,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -288,7 +262,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const_fn", "crossbeam-utils", "lazy_static", @@ -303,10 +277,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -364,7 +344,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -391,7 +371,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -457,25 +437,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.12" @@ -564,7 +528,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -586,20 +550,20 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.1+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -612,12 +576,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" @@ -626,11 +584,11 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" dependencies = [ - "bytes 0.5.6", + "bytes", "fnv", "futures-core", "futures-sink", @@ -696,7 +654,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -705,18 +663,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 0.5.6", + "bytes", "http", ] @@ -744,11 +702,11 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" dependencies = [ - "bytes 0.5.6", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -792,15 +750,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.9", + "bytes", + "hyper", "native-tls", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", ] [[package]] @@ -853,16 +811,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "cfg-if", ] [[package]] @@ -873,7 +822,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -885,9 +834,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" dependencies = [ "either", ] @@ -900,18 +849,15 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" dependencies = [ "wasm-bindgen", ] @@ -925,30 +871,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" -dependencies = [ - "base64 0.12.3", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -957,9 +879,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.82" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" +checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" [[package]] name = "linked-hash-map" @@ -978,11 +900,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1033,16 +955,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1052,35 +964,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.7.7" @@ -1089,21 +972,19 @@ checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" dependencies = [ "libc", "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "socket2", + "winapi", ] [[package]] @@ -1135,14 +1016,12 @@ dependencies = [ ] [[package]] -name = "net2" -version = "0.2.37" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1216,12 +1095,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" - [[package]] name = "once_cell" version = "1.5.2" @@ -1235,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1288,12 +1161,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.1.57", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1382,12 +1255,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.4" @@ -1415,7 +1282,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1497,9 +1364,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -1542,7 +1409,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -1633,17 +1500,17 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", + "base64", + "bytes", "encoding_rs", "futures-core", "futures-util", @@ -1656,14 +1523,13 @@ dependencies = [ "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "serde", "serde_urlencoded", - "tokio 0.2.24", - "tokio-tls", + "tokio", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1693,7 +1559,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1752,7 +1618,7 @@ dependencies = [ "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.4", + "pin-project-lite", "ref-cast", "smallvec", "state", @@ -1767,7 +1633,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "js_int", @@ -1785,7 +1651,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "http", "percent-encoding", @@ -1800,7 +1666,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1811,7 +1677,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "ruma-api", "ruma-common", @@ -1825,7 +1691,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "assign", "http", @@ -1844,7 +1710,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "maplit", @@ -1857,7 +1723,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-common", @@ -1871,7 +1737,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1882,7 +1748,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "js_int", "ruma-api", @@ -1897,10 +1763,10 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "paste", - "rand 0.8.2", + "rand 0.8.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -1911,7 +1777,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro2", "quote", @@ -1922,12 +1788,12 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "form_urlencoded", "itoa", @@ -1940,7 +1806,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1951,9 +1817,9 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0635b407290abf5f34d726e1e690c92c07c738e5#0635b407290abf5f34d726e1e690c92c07c738e5" +source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64 0.13.0", + "base64", "ring", "ruma-identifiers", "ruma-serde", @@ -1967,18 +1833,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -1994,7 +1854,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.13.0", + "base64", "log", "ring", "sct", @@ -2014,7 +1874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2073,18 +1933,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdd36f49e35b61d49efd8aa7fc068fd295961fd2286d0b2ee9a4c7a14e99cc3" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.119" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552954ce79a059ddd5fd68c271592374bd15cab2274970380c000118aeffe1cd" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2186,9 +2046,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2215,15 +2075,15 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=no-db#ce1607af897ef2e60fb65989f1c47bca4d5024a0" +source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" dependencies = [ "itertools", + "log", "maplit", "ruma", "serde", "serde_json", "thiserror", - "tracing", ] [[package]] @@ -2277,9 +2137,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "syn" -version = "1.0.58" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2292,12 +2152,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "rand 0.8.2", + "rand 0.8.3", "redox_syscall 0.2.4", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2322,28 +2182,18 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "time" -version = "0.1.43" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273d3ed44dca264b0d6b3665e8d48fb515042d42466fad93d2a45b90ec4058f7" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2351,7 +2201,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2379,9 +2229,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2394,38 +2244,21 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.24" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.11", - "slab", -] - -[[package]] -name = "tokio" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca04cec6ff2474c638057b65798f60ac183e5e79d3448bb7163d36a39cff6ec" +checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", - "mio 0.7.7", + "mio", "num_cpus", "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2439,6 +2272,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2457,46 +2300,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" dependencies = [ "futures-core", - "pin-project-lite 0.2.4", - "tokio 1.0.2", -] - -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.24", + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" dependencies = [ - "bytes 0.5.6", + "async-stream", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "tokio-util" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae4751faa60b9f96dd8344d74592e5a17c0c9a220413dbc6942d14139bbfcc" -dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.4", - "tokio 1.0.2", + "pin-project-lite", + "tokio", "tokio-stream", ] @@ -2511,9 +2331,9 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2521,24 +2341,11 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", + "cfg-if", + "pin-project-lite", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" version = "0.1.17" @@ -2560,18 +2367,22 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", "tokio 0.2.24", @@ -2580,17 +2391,17 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", @@ -2622,15 +2433,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2709,17 +2511,17 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2727,9 +2529,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -2742,11 +2544,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2754,9 +2556,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2764,9 +2566,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -2777,15 +2579,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -2803,9 +2605,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2813,12 +2615,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2829,12 +2625,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -2853,7 +2643,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2862,17 +2652,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index dd37838..de6a966 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,24 +18,24 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0635b407290abf5f34d726e1e690c92c07c738e5" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "no-db", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.0.2", features = ["macros", "time"] } +tokio = { version = "1.1.0", features = ["macros", "time", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries log = "0.4.11" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" @@ -50,7 +50,7 @@ rand = "0.7.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.0" # Used for conduit::Error type thiserror = "1.0.22" # Used to generate thumbnails for images @@ -60,7 +60,7 @@ base64 = "0.13.0" # Used when hashing the state ring = "0.16.19" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices regex = "1.4.2" # jwt jsonwebtokens diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6cd518d..97b6ad2 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay => {} + _ = &mut delay, if delay.is_elapsed() => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index ea65d6f..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -106,8 +106,7 @@ impl Database { db.open_tree("global")?, db.open_tree("servertimeout_signingkey")?, config, - ) - .await?, + )?, users: users::Users { userid_password: db.open_tree("userid_password")?, userid_displayname: db.open_tree("userid_displayname")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 7eb162b..2ed6a9f 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,11 +27,7 @@ pub struct Globals { } impl Globals { - pub async fn load( - globals: sled::Tree, - server_keys: sled::Tree, - config: Config, - ) -> Result { + pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -83,11 +79,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, servertimeout_signingkey: server_keys, diff --git a/src/server_server.rs b/src/server_server.rs index 20d76f1..adf3c58 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -25,7 +25,7 @@ use ruma::{ }; use state_res::{Event, EventMap, StateMap}; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryFrom, fmt::Debug, future::Future, @@ -839,7 +839,7 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); - match state_res::StateResolution::resolve( + let res = match state_res::StateResolution::resolve( &pdu.room_id, &RoomVersionId::Version6, &fork_states @@ -856,10 +856,7 @@ pub async fn send_transaction_message_route<'a>( .collect(), &mut auth_cache, ) { - Ok(res) => res - .into_iter() - .map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap()))) - .collect(), + Ok(res) => res, Err(_) => { resolved_map.insert( pdu.event_id().clone(), @@ -867,7 +864,29 @@ pub async fn send_transaction_message_route<'a>( ); continue 'main_pdu_loop; } + }; + let mut resolved = BTreeMap::new(); + for (k, id) in res { + // We should know of the event but just incase + let pdu = match auth_cache.get(&id) { + Some(pdu) => pdu.clone(), + None => { + match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) + .await + .map(|mut vec| vec.pop()) + { + Ok(Some(aev)) => aev, + _ => { + resolved_map + .insert(event_id.clone(), Err("Failed to fetch event".into())); + continue 'main_pdu_loop; + } + } + } + }; + resolved.insert(k, pdu); } + resolved }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). @@ -1199,37 +1218,67 @@ fn append_incoming_pdu( new_room_leaves: &[EventId], state: Option>>, ) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + if let Some(state) = state { + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid.to_vec(), + ); + } + None => { + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + // TODO: can we use are current state if we just add this event to the end of our + // pduid_pdu tree?? + let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + + db.rooms.append_pdu( + &*pdu, + utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), + count, + pdu_id.clone().into(), + &new_room_leaves, + &db, + )?; + // TODO: is this ok... + db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pdu_id.to_vec(), + ); + } + } + } + + info!("Force update of state for {:?}", pdu); + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + } + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); - // Update the state of the room if needed - // We can tell if we need to do this based on wether state resolution took place or not - if let Some(state) = state { - let new = state - .into_iter() - .map(|((ev, k), pdu)| { - Ok(( - ( - ev, - k.ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - db.rooms - .get_pdu_id(pdu.event_id()) - .ok() - .flatten() - .ok_or_else(|| Error::Conflict("Resolved state contained unknown event"))? - .to_vec(), - )) - }) - .collect::>()?; - - info!("Force update of state for {:?}", pdu); - - db.rooms.force_state(pdu.room_id(), new, &db.globals)?; - } - // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; From 6661de50ab5069838f60893afea9a421f6f034e3 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Fri, 29 Jan 2021 21:45:33 -0500 Subject: [PATCH 38/62] Fix and integrate outlier tree, build forks after adding event to DB --- src/database.rs | 2 +- src/database/rooms.rs | 95 +++++++----- src/server_server.rs | 334 +++++++++++++++++++++++++----------------- 3 files changed, 263 insertions(+), 168 deletions(-) diff --git a/src/database.rs b/src/database.rs index b841ab9..a9cc362 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index a3f3aab..d459aee 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,9 +27,10 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, + time::Duration, }; -use super::admin::AdminCommand; +use super::{admin::AdminCommand, sending::Sending}; /// The unique identifier of each state group. /// @@ -67,7 +68,7 @@ pub struct Rooms { pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) pduid_outlierpdu: sled::Tree, } impl Rooms { @@ -85,13 +86,20 @@ impl Rooms { let mut pduid = room_id.as_bytes().to_vec(); pduid.push(0xff); pduid.extend_from_slice(&pduid_short?); - self.pduid_pdu.get(&pduid)?.map_or_else( - || Err(Error::bad_database("Failed to find PDU in state snapshot.")), - |b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }, - ) + match self.pduid_pdu.get(&pduid)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")), + None => self + .pduid_outlierpdu + .get(pduid)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + } }) .filter_map(|r| r.ok()) .map(|pdu| { @@ -137,12 +145,20 @@ impl Rooms { Ok::<_, Error>(Some(( pdu_id.clone().into(), - serde_json::from_slice::( - &self.pduid_pdu.get(&pdu_id)?.ok_or_else(|| { - Error::bad_database("PDU in state not found in database.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid PDU bytes in room state."))?, + match self.pduid_pdu.get(&pdu_id)? { + Some(b) => serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + None => self + .pduid_outlierpdu + .get(pdu_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + }, ))) }) } else { @@ -307,9 +323,12 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) @@ -328,13 +347,17 @@ impl Rooms { .get(event_id.as_bytes())? .map_or(Ok(None), |pdu_id| { Ok(Some( - serde_json::from_slice(&self.pduid_pdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("eventid_pduid points to nonexistent pdu.") - })?) + serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { + Some(b) => b, + None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, + }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) }) } + /// Returns the pdu. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { @@ -420,23 +443,27 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { + self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) + } else { + Ok(None) + } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, event_id: &EventId, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { + log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + + // we need to be able to find it by event_id + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let res = self - .eventid_outlierpdu + .pduid_outlierpdu .insert( - event_id.as_bytes(), + pdu_id, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -484,7 +511,9 @@ impl Rooms { } // We no longer keep this pdu as an outlier - self.eventid_outlierpdu.remove(pdu.event_id().as_bytes())?; + if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { + self.pduid_outlierpdu.remove(id)?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index adf3c58..ad0a1a4 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -614,7 +614,7 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (_, Vec>) = match validate_event( + let (pdu, previous): (Arc, Vec>) = match validate_event( &db, value, event_id.clone(), @@ -638,69 +638,75 @@ pub async fn send_transaction_message_route<'a>( None }; + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(pdu.event_id(), &pdu)?; + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = - match db - .sending - .send_federation_request( - &db.globals, + let (mut state_at_event, incoming_auth_events): ( + StateMap>, + Vec>, + ) = match db + .sending + .send_federation_request( + &db.globals, + server_name, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, + ) + .await + { + Ok(res) => { + let state = fetch_events( + &db, server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, + &pub_key_map, + &res.pdu_ids, + &mut auth_cache, ) - .await - { - Ok(res) => { - let state = fetch_events( + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + todo!("Server sent us an invalid state") + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( &db, server_name, &pub_key_map, - &res.pdu_ids, + &res.auth_chain_ids, &mut auth_cache, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -750,12 +756,25 @@ pub async fn send_transaction_message_route<'a>( // // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. - let (mut fork_states, extremities) = match calculate_forward_extremities( + let extremities = match calculate_forward_extremities(&db, &pdu).await { + Ok(fork_ids) => fork_ids, + Err(_) => { + resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); + continue; + } + }; + + // Now that the event has passed all auth it is added into the timeline, we do have to + // find the leaves otherwise we would do this sooner + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + let mut fork_states = match build_forward_extremity_snapshots( &db, - &pdu, + pdu.room_id(), server_name, - &pub_key_map, current_state, + &extremities, + &pub_key_map, &mut auth_cache, ) .await @@ -767,6 +786,9 @@ pub async fn send_transaction_message_route<'a>( } }; + // Make this the state after (since we appended_incoming_pdu this should agree with our servers + // current state). + state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // add the incoming events to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets fork_states.insert(state_at_event.clone()); @@ -840,7 +862,7 @@ pub async fn send_transaction_message_route<'a>( ); let res = match state_res::StateResolution::resolve( - &pdu.room_id, + pdu.room_id(), &RoomVersionId::Version6, &fork_states .into_iter() @@ -865,6 +887,7 @@ pub async fn send_transaction_message_route<'a>( continue 'main_pdu_loop; } }; + let mut resolved = BTreeMap::new(); for (k, id) in res { // We should know of the event but just incase @@ -890,10 +913,9 @@ pub async fn send_transaction_message_route<'a>( }; // Add the event to the DB and update the forward extremities (via roomid_pduleaves). - append_incoming_pdu( + update_resolved_state( &db, - &pdu, - &extremities, + pdu.room_id(), if update_state { Some(state_at_forks) } else { @@ -905,7 +927,10 @@ pub async fn send_transaction_message_route<'a>( resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { + pdus: dbg!(resolved_map), + } + .into()) } /// An async function that can recursively calls itself. @@ -1036,13 +1061,14 @@ async fn fetch_check_auth_events( Ok(()) } -/// Find the event and auth it. +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. /// /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? -async fn fetch_events( +pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, key_map: &PublicKeyMap, @@ -1071,6 +1097,13 @@ async fn fetch_events( .await .map_err(|_| Error::Conflict("Authentication of event failed"))?; + // create the pduid for this event but stick it in the outliers DB + let count = db.globals.next_count()?; + let mut pdu_id = pdu.room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); + + db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; pdu } Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), @@ -1084,7 +1117,7 @@ async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -async fn fetch_signing_keys( +pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, ) -> Result> { @@ -1108,26 +1141,28 @@ async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. -async fn calculate_forward_extremities( +pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, - origin: &ServerName, - pub_key_map: &PublicKeyMap, - current_state: BTreeMap<(EventType, Option), Arc>, - auth_cache: &mut EventMap>, -) -> Result<(BTreeSet>>, Vec)> { +) -> Result> { let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?; let mut is_incoming_leaf = true; // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? - // + if current_leaves.contains(pdu.event_id()) { + is_incoming_leaf = false; + // Not sure what to do here + } + // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - if current_leaves.contains(pdu.event_id()) || db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = false; - // Not sure what to do here + // + // We first check if know of the event and then don't include it as a forward + // extremity if it is a timeline event + if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); } // TODO: @@ -1144,11 +1179,34 @@ async fn calculate_forward_extremities( } } - let current_hash = db.rooms.current_state_hash(pdu.room_id())?; + // Add the incoming event only if it is a leaf, we do this after fetching all the + // state since we know we have already fetched the state of the incoming event so lets + // not do it again! + if is_incoming_leaf { + current_leaves.push(pdu.event_id().clone()); + } + + Ok(current_leaves) +} + +/// This should always be called after the incoming event has been appended to the DB. +/// +/// This guarentees that the incoming event will be in the state sets (at least our servers +/// and the sending server). +pub(crate) async fn build_forward_extremity_snapshots( + db: &Database, + room_id: &RoomId, + origin: &ServerName, + current_state: StateMap>, + current_leaves: &[EventId], + pub_key_map: &PublicKeyMap, + auth_cache: &mut EventMap>, +) -> Result>>> { + let current_hash = db.rooms.current_state_hash(room_id)?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); - for id in ¤t_leaves { + for id in current_leaves { if let Some(id) = db.rooms.get_pdu_id(id)? { let state_hash = db .rooms @@ -1158,14 +1216,21 @@ async fn calculate_forward_extremities( if current_hash.as_ref() == Some(&state_hash) { includes_current_state = true; } - let state = db + + let mut state_before = db .rooms - .state_full(&pdu.room_id, &state_hash)? + .state_full(room_id, &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect(); + .collect::>(); - fork_states.insert(state); + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } else { let res = db .sending @@ -1173,7 +1238,7 @@ async fn calculate_forward_extremities( &db.globals, origin, get_room_state_ids::v1::Request { - room_id: pdu.room_id(), + room_id, event_id: id, }, ) @@ -1181,41 +1246,38 @@ async fn calculate_forward_extremities( // TODO: This only adds events to the auth_cache, there is for sure a better way to // do this... - fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?; + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache) + let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) .await? .into_iter() .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); + .collect::>(); - fork_states.insert(state); + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } } - // Add the incoming event only if it is a leaf, we do this after fetching all the - // state since we know we have already fetched the state of the incoming event so lets - // not do it again! - if is_incoming_leaf { - current_leaves.push(pdu.event_id().clone()); - } - // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { fork_states.insert(current_state); } - Ok((fork_states, current_leaves)) + Ok(fork_states) } -/// Update the room state to be the resolved state and add the fully auth'ed event -/// to the DB. -/// -/// TODO: Since all these events passed state resolution can we trust them to add -fn append_incoming_pdu( +pub(crate) fn update_resolved_state( db: &Database, - pdu: &PduEvent, - new_room_leaves: &[EventId], + room_id: &RoomId, state: Option>>, ) -> Result<()> { // Update the state of the room if needed @@ -1236,44 +1298,50 @@ fn append_incoming_pdu( ); } None => { - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - // TODO: can we use are current state if we just add this event to the end of our - // pduid_pdu tree?? - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &*pdu, - utils::to_canonical_object(&*pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &new_room_leaves, - &db, - )?; - // TODO: is this ok... - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pdu_id.to_vec(), - ); + error!("We didn't append an event as an outlier\n{:?}", pdu); } } } - info!("Force update of state for {:?}", pdu); - - db.rooms - .force_state(pdu.room_id(), new_state, &db.globals)?; + db.rooms.force_state(room_id, new_state, &db.globals)?; } + Ok(()) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +pub(crate) fn append_incoming_pdu( + db: &Database, + pdu: &PduEvent, + new_room_leaves: &[EventId], + state: &StateMap>, +) -> Result<()> { + // Update the state of the room if needed + // We can tell if we need to do this based on wether state resolution took place or not + let mut new_state = HashMap::new(); + for ((ev_type, state_k), pdu) in state { + match db.rooms.get_pdu_id(pdu.event_id())? { + Some(pduid) => { + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + pduid.to_vec(), + ); + } + None => { + error!("We didn't append an event as an outlier\n{:?}", pdu); + } + } + } + + db.rooms + .force_state(pdu.room_id(), new_state, &db.globals)?; + let count = db.globals.next_count()?; let mut pdu_id = pdu.room_id.as_bytes().to_vec(); pdu_id.push(0xff); @@ -1281,7 +1349,7 @@ fn append_incoming_pdu( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + let state_hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; db.rooms.append_pdu( pdu, @@ -1292,9 +1360,7 @@ fn append_incoming_pdu( &db, )?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - db.rooms.set_room_state(&pdu.room_id, &statehashid)?; + db.rooms.set_room_state(pdu.room_id(), &state_hash)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; From e09be2f7ee31a97b615a86e5bdae8ac75ec93ff6 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 30 Jan 2021 12:43:43 -0500 Subject: [PATCH 39/62] Add incoming event to the current room state then resolve All state snapshots that are used in the resolve call are state after snapshots, they have the event inserted. --- src/server_server.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ad0a1a4..f55b377 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -768,9 +768,11 @@ pub async fn send_transaction_message_route<'a>( // find the leaves otherwise we would do this sooner append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + // This will create the state after any state snapshot it builds + // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( &db, - pdu.room_id(), + pdu.clone(), server_name, current_state, &extremities, @@ -1195,14 +1197,14 @@ pub(crate) async fn calculate_forward_extremities( /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, - room_id: &RoomId, + pdu: Arc, origin: &ServerName, - current_state: StateMap>, + mut current_state: StateMap>, current_leaves: &[EventId], pub_key_map: &PublicKeyMap, auth_cache: &mut EventMap>, ) -> Result>>> { - let current_hash = db.rooms.current_state_hash(room_id)?; + let current_hash = db.rooms.current_state_hash(pdu.room_id())?; let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); @@ -1219,7 +1221,7 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut state_before = db .rooms - .state_full(room_id, &state_hash)? + .state_full(pdu.room_id(), &state_hash)? .into_iter() .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect::>(); @@ -1238,7 +1240,7 @@ pub(crate) async fn build_forward_extremity_snapshots( &db.globals, origin, get_room_state_ids::v1::Request { - room_id, + room_id: pdu.room_id(), event_id: id, }, ) @@ -1269,6 +1271,9 @@ pub(crate) async fn build_forward_extremity_snapshots( // This guarantees that our current room state is included if !includes_current_state && current_hash.is_some() { + error!("Did not include current state"); + current_state.insert((pdu.kind(), pdu.state_key()), pdu); + fork_states.insert(current_state); } From 64374b4679f6b63dc36d3da7ab5d58753c1980d8 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 12:44:30 -0500 Subject: [PATCH 40/62] Use eventId when saving outliers --- src/client_server/sync.rs | 2 +- src/database.rs | 2 +- src/database/rooms.rs | 76 +++++++++++++++++++++++++-------------- src/server_server.rs | 69 +++++++++++++++++------------------ 4 files changed, 84 insertions(+), 65 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 97b6ad2..6cd518d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -701,7 +701,7 @@ pub async fn sync_events_route( let delay = tokio::time::sleep(duration); tokio::pin!(delay); tokio::select! { - _ = &mut delay, if delay.is_elapsed() => {} + _ = &mut delay => {} _ = watcher => {} } } diff --git a/src/database.rs b/src/database.rs index a9cc362..b841ab9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,7 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - pduid_outlierpdu: db.open_tree("pduid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d459aee..ee8f0ab 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -27,10 +27,9 @@ use std::{ convert::{TryFrom, TryInto}, mem, sync::Arc, - time::Duration, }; -use super::{admin::AdminCommand, sending::Sending}; +use super::admin::AdminCommand; /// The unique identifier of each state group. /// @@ -67,13 +66,16 @@ pub struct Rooms { pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) - /// Any pdu that has passed the steps up to auth with auth_events. - pub(super) pduid_outlierpdu: sled::Tree, + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: sled::Tree, } impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + /// + /// TODO: Should this check for outliers, it does now. pub fn state_full( &self, room_id: &RoomId, @@ -90,7 +92,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -118,6 +120,8 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + /// + /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -149,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .pduid_outlierpdu + .eventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -260,6 +264,8 @@ impl Rooms { } }; + // Because of outliers this could also be an eventID but that + // is handled by `state_full` let pdu_id_short = pdu_id .splitn(2, |&b| b == 0xff) .nth(1) @@ -325,9 +331,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -342,6 +351,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -349,9 +360,12 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self.pduid_outlierpdu.get(pdu_id)?.ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => self + .eventid_outlierpdu + .get(event_id.as_bytes())? + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })?, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -443,27 +457,34 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - if let Some(id) = self.eventid_pduid.get(event_id.as_bytes())? { - self.pduid_outlierpdu.get(id)?.map_or(Ok(None), |pdu| { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) - } else { - Ok(None) - } } /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.pduid_outlierpdu.len()); + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { + log::info!( + "Number of outlier pdu's {:#?}", + self.eventid_outlierpdu + .iter() + .map(|pair| { + let (_k, v) = pair.unwrap(); + serde_json::from_slice::(&v).unwrap() + }) + .collect::>() + ); - // we need to be able to find it by event_id - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &*pdu_id)?; + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); let res = self - .pduid_outlierpdu + .eventid_outlierpdu .insert( - pdu_id, + &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), ) .map(|op| op.is_some())?; @@ -511,9 +532,10 @@ impl Rooms { } // We no longer keep this pdu as an outlier - if let Some(id) = self.eventid_pduid.remove(pdu.event_id().as_bytes())? { - self.pduid_outlierpdu.remove(id)?; - } + let mut key = pdu.room_id().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.eventid_outlierpdu.remove(key)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?; diff --git a/src/server_server.rs b/src/server_server.rs index f55b377..5177f96 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -18,7 +18,6 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, - events::EventType, serde::to_canonical_value, signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap}, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, @@ -638,12 +637,8 @@ pub async fn send_transaction_message_route<'a>( None }; - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; + db.rooms.append_pdu_outlier(&pdu)?; // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -1079,37 +1074,28 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { + // `get_pdu` checks the outliers tree for us let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => Arc::new(pdu), - None => match db.rooms.get_pdu_outlier(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|_| Error::Conflict("Authentication of event failed"))?; - // create the pduid for this event but stick it in the outliers DB - let count = db.globals.next_count()?; - let mut pdu_id = pdu.room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - db.rooms.append_pdu_outlier(&pdu_id, &pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; pdus.push(pdu); @@ -1193,7 +1179,7 @@ pub(crate) async fn calculate_forward_extremities( /// This should always be called after the incoming event has been appended to the DB. /// -/// This guarentees that the incoming event will be in the state sets (at least our servers +/// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). pub(crate) async fn build_forward_extremity_snapshots( db: &Database, @@ -1303,7 +1289,18 @@ pub(crate) fn update_resolved_state( ); } None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); + let mut pduid = pdu.room_id().as_bytes().to_vec(); + pduid.push(0xff); + pduid.extend_from_slice(pdu.event_id().as_bytes()); + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| { + Error::Conflict("State contained non state event") + })?, + ), + pduid, + ); } } } From 591769d5f3fde6314ba84130898dc6202b9b5c98 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Feb 2021 17:02:56 -0500 Subject: [PATCH 41/62] Fiter PDU's before main incoming PDU loop --- src/database/rooms.rs | 11 +---- src/server_server.rs | 103 ++++++++++++++++++++++++++---------------- 2 files changed, 66 insertions(+), 48 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ee8f0ab..6ee29a6 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -466,16 +466,7 @@ impl Rooms { /// Returns true if the event_id was previously inserted. pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!( - "Number of outlier pdu's {:#?}", - self.eventid_outlierpdu - .iter() - .map(|pair| { - let (_k, v) = pair.unwrap(); - serde_json::from_slice::(&v).unwrap() - }) - .collect::>() - ); + log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 5177f96..2cfbc6e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -533,6 +533,54 @@ pub async fn send_transaction_message_route<'a>( } } + let mut resolved_map = BTreeMap::new(); + + let pdus_to_resolve = body + .pdus + .iter() + .filter_map(|pdu| { + // 1. Is a valid event, otherwise it is dropped. + // Ruma/PduEvent/StateEvent satisfies this + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); + + // If we have no idea about this room skip the PDU + let room_id = match value + .get("room_id") + .map(|id| match id { + CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), + _ => None, + }) + .flatten() + { + Some(id) => id, + None => { + resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); + return None; + } + }; + + // 1. check the server is in the room (optional) + match db.rooms.exists(&room_id) { + Ok(true) => {} + _ => { + resolved_map + .insert(event_id, Err("Room is unknown to this server".to_string())); + return None; + } + } + + // If we know of this pdu we don't need to continue processing it + // + // This check is essentially + if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { + return None; + } + + Some((event_id, value)) + }) + .collect::>(); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere? // SPEC: // Servers MUST strictly enforce the JSON format specified in the appendices. @@ -540,35 +588,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - let mut resolved_map = BTreeMap::new(); - 'main_pdu_loop: for pdu in &body.pdus { - // 1. Is a valid event, otherwise it is dropped. - // Ruma/PduEvent/StateEvent satisfies this - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu); - - // If we have no idea about this room skip the PDU - let room_id = match value - .get("room_id") - .map(|id| match id { - CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(), - _ => None, - }) - .flatten() - { - Some(id) => id, - None => { - resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string())); - continue; - } - }; - - // 1. check the server is in the room (optional) - if !db.rooms.exists(&room_id)? { - resolved_map.insert(event_id, Err("Room is unknown to this server".to_string())); - continue; - } - + 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -921,13 +941,13 @@ pub async fn send_transaction_message_route<'a>( )?; // Event has passed all auth/stateres checks - resolved_map.insert(pdu.event_id().clone(), Ok(())); } - Ok(send_transaction_message::v1::Response { - pdus: dbg!(resolved_map), + if !resolved_map.is_empty() { + warn!("These PDU's failed {:?}", resolved_map); } - .into()) + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) } /// An async function that can recursively calls itself. @@ -1139,6 +1159,7 @@ pub(crate) async fn calculate_forward_extremities( // Make sure the incoming event is not already a forward extremity // FIXME: I think this could happen if different servers send us the same event?? if current_leaves.contains(pdu.event_id()) { + error!("The incoming event is already present in get_pdu_leaves BUG"); is_incoming_leaf = false; // Not sure what to do here } @@ -1147,11 +1168,12 @@ pub(crate) async fn calculate_forward_extremities( // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. // - // We first check if know of the event and then don't include it as a forward - // extremity if it is a timeline event - if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - } + // We check this in the filter just before the main incoming PDU for loop + // so no already known event can make it this far. + // + // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { + // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); + // } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1219,7 +1241,12 @@ pub(crate) async fn build_forward_extremity_snapshots( } fork_states.insert(state_before); + } else if id == pdu.event_id() { + // We add this snapshot after `build_forward_extremity_snapshots` is + // called which we requested from the sending server } else { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); + let res = db .sending .send_federation_request( From 74d530ae0eff76bbdd7a130cd17a645b5455676f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 20:00:01 -0500 Subject: [PATCH 42/62] Address review issues, fix forward extremity calc Keep track of all prev_events since if we know that an event is a prev_event it is referenced and does not qualify as a forward extremity. --- src/client_server/push.rs | 5 +- src/database.rs | 3 +- src/database/globals.rs | 8 +- src/database/rooms.rs | 79 +++++---- src/server_server.rs | 341 ++++++++++++++++++-------------------- 5 files changed, 220 insertions(+), 216 deletions(-) diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..7c3e9d9 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -93,7 +93,10 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest( + ErrorKind::NotFound, + "Push rule not found.", + )) } } diff --git a/src/database.rs b/src/database.rs index b841ab9..3fb8442 100644 --- a/src/database.rs +++ b/src/database.rs @@ -159,7 +159,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - eventid_outlierpdu: db.open_tree("eventid_outlierpdu")?, + roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + prevevent_parent: db.open_tree("prevevent_parent")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 2ed6a9f..00b4568 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -27,7 +27,11 @@ pub struct Globals { } impl Globals { - pub fn load(globals: sled::Tree, server_keys: sled::Tree, config: Config) -> Result { + pub fn load( + globals: sled::Tree, + servertimeout_signingkey: sled::Tree, + config: Config, + ) -> Result { let bytes = &*globals .update_and_fetch("keypair", utils::generate_keypair)? .expect("utils::generate_keypair always returns Some"); @@ -84,7 +88,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, - servertimeout_signingkey: server_keys, + servertimeout_signingkey, }) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6ee29a6..abe8c65 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -68,7 +68,9 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: sled::Tree, + pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. + pub(super) prevevent_parent: sled::Tree, } impl Rooms { @@ -92,7 +94,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pduid)? .map(|b| { serde_json::from_slice::(&b) @@ -120,8 +122,6 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - /// - /// TODO: Should this check for outliers, it does now. pub fn state_get( &self, room_id: &RoomId, @@ -153,7 +153,7 @@ impl Rooms { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(pdu_id)? .map(|b| { serde_json::from_slice::(&b) @@ -203,7 +203,7 @@ impl Rooms { &event_type, &state_key .as_deref() - .expect("found a non state event in auth events"), + .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); } @@ -248,7 +248,7 @@ impl Rooms { let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), pdu_id) in state { + for ((event_type, state_key), id_long) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -266,7 +266,7 @@ impl Rooms { // Because of outliers this could also be an eventID but that // is handled by `state_full` - let pdu_id_short = pdu_id + let pdu_id_short = id_long .splitn(2, |&b| b == 0xff) .nth(1) .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; @@ -332,7 +332,7 @@ impl Rooms { serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, None => self - .eventid_outlierpdu + .roomeventid_outlierpdu .get(event_id.as_bytes())? .ok_or_else(|| { Error::bad_database("Event is not in pdu tree or outliers.") @@ -360,12 +360,10 @@ impl Rooms { Ok(Some( serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { Some(b) => b, - None => self - .eventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, + None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { + Some(b) => b, + None => return Ok(None), + }, }) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, )) @@ -373,6 +371,8 @@ impl Rooms { } /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. pub fn get_pdu_from_id(&self, pdu_id: &IVec) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -436,7 +436,7 @@ impl Rooms { /// Replace the leaves of a room. /// - /// The provided `event_ids` become the new leaves, this enables an event having multiple + /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); @@ -455,31 +455,42 @@ impl Rooms { Ok(()) } + pub fn is_pdu_referenced(&self, pdu: &PduEvent) -> Result { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(pdu.event_id().as_bytes()); + self.prevevent_parent.contains_key(key).map_err(Into::into) + } + /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu + self.roomeventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) } - /// Returns true if the event_id was previously inserted. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result { - log::info!("Number of outlier pdu's {}", self.eventid_outlierpdu.len()); + /// Append the PDU as an outlier. + /// + /// Any event given to this will be processed (state-res) on another thread. + pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + log::info!( + "Number of outlier pdu's {}", + self.roomeventid_outlierpdu.len() + ); let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - let res = self - .eventid_outlierpdu - .insert( - &key, - &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), - ) - .map(|op| op.is_some())?; - Ok(res) + self.eventid_pduid + .insert(pdu.event_id().as_bytes(), key.as_slice())?; + + self.roomeventid_outlierpdu.insert( + &key, + &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), + )?; + Ok(()) } /// Creates a new persisted data unit and adds it to a room. @@ -526,7 +537,15 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.eventid_outlierpdu.remove(key)?; + self.roomeventid_outlierpdu.remove(key)?; + + // We must keep track of all events that have been referenced. + for leaf in leaves { + let mut key = pdu.room_id().as_bytes().to_vec(); + key.extend_from_slice(leaf.as_bytes()); + self.prevevent_parent + .insert(key, pdu.event_id().as_bytes())?; + } self.replace_pdu_leaves(&pdu.room_id, leaves)?; @@ -541,6 +560,8 @@ impl Rooms { .expect("CanonicalJsonObject is always a valid String"), )?; + // This also replaces the eventid of any outliers with the correct + // pduid, removing the place holder. self.eventid_pduid .insert(pdu.event_id.as_bytes(), &*pdu_id)?; diff --git a/src/server_server.rs b/src/server_server.rs index 2cfbc6e..48d5956 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -571,8 +571,6 @@ pub async fn send_transaction_message_route<'a>( } // If we know of this pdu we don't need to continue processing it - // - // This check is essentially if let Ok(Some(_)) = db.rooms.get_pdu_id(&event_id) { return None; } @@ -664,64 +662,66 @@ pub async fn send_transaction_message_route<'a>( // the checks in this list starting at 1. These are not timeline events. // // Step 10. check the auth of the event passes based on the calculated state of the event - let (mut state_at_event, incoming_auth_events): ( - StateMap>, - Vec>, - ) = match db - .sending - .send_federation_request( - &db.globals, - server_name, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: pdu.event_id(), - }, - ) - .await - { - Ok(res) => { - let state = fetch_events( - &db, + // + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = + match db + .sending + .send_federation_request( + &db.globals, server_name, - &pub_key_map, - &res.pdu_ids, - &mut auth_cache, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: pdu.event_id(), + }, ) - .await?; - // Sanity check: there are no conflicting events in the state we received - let mut seen = BTreeSet::new(); - for ev in &state { - // If the key is already present - if !seen.insert((&ev.kind, &ev.state_key)) { - todo!("Server sent us an invalid state") - } - } - - let state = state - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect(); - - ( - state, - fetch_events( + .await + { + Ok(res) => { + let state = fetch_events( &db, server_name, &pub_key_map, - &res.auth_chain_ids, + &res.pdu_ids, &mut auth_cache, ) - .await?, - ) - } - Err(_) => { - resolved_map.insert( - pdu.event_id().clone(), - Err("Fetching state for event failed".into()), - ); - continue; - } - }; + .await?; + // Sanity check: there are no conflicting events in the state we received + let mut seen = BTreeSet::new(); + for ev in &state { + // If the key is already present + if !seen.insert((&ev.kind, &ev.state_key)) { + error!("Server sent us an invalid state"); + continue; + } + } + + let state = state + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect(); + + ( + state, + fetch_events( + &db, + server_name, + &pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, + ) + .await?, + ) + } + Err(_) => { + resolved_map.insert( + pdu.event_id().clone(), + Err("Fetching state for event failed".into()), + ); + continue; + } + }; // 10. This is the actual auth check for state at the event if !state_res::event_auth::auth_check( @@ -764,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( pdu.event_id().clone(), Err("Event has been soft failed".into()), ); + continue; }; // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res @@ -779,10 +780,6 @@ pub async fn send_transaction_message_route<'a>( } }; - // Now that the event has passed all auth it is added into the timeline, we do have to - // find the leaves otherwise we would do this sooner - append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; - // This will create the state after any state snapshot it builds // So current_state will have the incoming event inserted to it let mut fork_states = match build_forward_extremity_snapshots( @@ -805,10 +802,11 @@ pub async fn send_transaction_message_route<'a>( // Make this the state after (since we appended_incoming_pdu this should agree with our servers // current state). - state_at_event.insert((pdu.kind(), pdu.state_key()), pdu.clone()); - // add the incoming events to the mix of state snapshots + let mut state_after = state_at_event.clone(); + state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); + // Add the incoming event to the mix of state snapshots // Since we are using a BTreeSet (yea this may be overkill) we guarantee unique state sets - fork_states.insert(state_at_event.clone()); + fork_states.insert(state_after.clone()); let fork_states = fork_states.into_iter().collect::>(); @@ -826,39 +824,27 @@ pub async fn send_transaction_message_route<'a>( update_state = true; // TODO: remove this is for current debugging Jan, 15 2021 - let mut number_fetches = 0_u32; let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { let event = match auth_cache.get(auth_id) { Some(aev) => aev.clone(), - // We should know about every event at this point but just incase... - None => match fetch_events( - &db, - server_name, - &pub_key_map, - &[auth_id.clone()], - &mut auth_cache, - ) - .await - .map(|mut vec| { - number_fetches += 1; - vec.pop() - }) { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - }, + // The only events that haven't been added to the auth cache are + // events we have knowledge of previously + None => { + error!("Event was not present in auth_cache {}", auth_id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; + } }; state_auth.push(event); } auth_events.push(state_auth); } - info!("{} event's were not in the auth_cache", number_fetches); // Add everything we will need to event_map auth_cache.extend( @@ -873,7 +859,7 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| (pdu.event_id().clone(), pdu)), ); auth_cache.extend( - state_at_event + state_after .into_iter() .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); @@ -911,17 +897,12 @@ pub async fn send_transaction_message_route<'a>( let pdu = match auth_cache.get(&id) { Some(pdu) => pdu.clone(), None => { - match fetch_events(&db, server_name, &pub_key_map, &[id], &mut auth_cache) - .await - .map(|mut vec| vec.pop()) - { - Ok(Some(aev)) => aev, - _ => { - resolved_map - .insert(event_id.clone(), Err("Failed to fetch event".into())); - continue 'main_pdu_loop; - } - } + error!("Event was not present in auth_cache {}", id); + resolved_map.insert( + event_id.clone(), + Err("Event was not present in auth cache".into()), + ); + continue 'main_pdu_loop; } }; resolved.insert(k, pdu); @@ -929,7 +910,12 @@ pub async fn send_transaction_message_route<'a>( resolved }; - // Add the event to the DB and update the forward extremities (via roomid_pduleaves). + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + + // Set the new room state to the resolved state update_resolved_state( &db, pdu.room_id(), @@ -1046,8 +1032,6 @@ fn validate_event<'a>( /// TODO: don't add as outlier if event is fetched as a result of gathering auth_events /// The check in `fetch_check_auth_events` is that a complete chain is found for the /// events `auth_events`. If the chain is found to have any missing events it fails. -/// -/// The `auth_cache` is filled instead of returning a `Vec`. async fn fetch_check_auth_events( db: &Database, origin: &ServerName, @@ -1073,7 +1057,6 @@ async fn fetch_check_auth_events( })??; stack.extend(ev.auth_events()); - auth_cache.insert(ev.event_id().clone(), ev); } Ok(()) } @@ -1085,6 +1068,9 @@ async fn fetch_check_auth_events( /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation /// 4. TODO: Ask other servers over federation? +/// +/// If the event is unknown to the `auth_cache` it is added. This guarantees that any +/// event we need to know of will be present. pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, @@ -1118,6 +1104,7 @@ pub(crate) async fn fetch_events( Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), }, }; + auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); pdus.push(pdu); } Ok(pdus) @@ -1167,13 +1154,9 @@ pub(crate) async fn calculate_forward_extremities( // If the incoming event is already referenced by an existing event // then do nothing - it's not a candidate to be a new extremity if // it has been referenced. - // - // We check this in the filter just before the main incoming PDU for loop - // so no already known event can make it this far. - // - // if db.rooms.get_pdu_id(pdu.event_id())?.is_some() { - // is_incoming_leaf = db.rooms.get_pdu_outlier(pdu.event_id())?.is_some(); - // } + if db.rooms.is_pdu_referenced(pdu)? { + is_incoming_leaf = false; + } // TODO: // [dendrite] Checks if any other leaves have been referenced and removes them @@ -1217,74 +1200,79 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { - if let Some(id) = db.rooms.get_pdu_id(id)? { - let state_hash = db - .rooms - .pdu_state_hash(&id)? - .expect("found pdu with no statehash"); + match db.rooms.get_pdu_id(id)? { + // We can skip this because it is handled outside of this function + // The current server state and incoming event state are built to be + // the state after. + // This would be the incoming state from the server. + Some(_) if id == pdu.event_id() => {} + Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { + let state_hash = db + .rooms + .pdu_state_hash(&pduid)? + .expect("found pdu with no statehash"); - if current_hash.as_ref() == Some(&state_hash) { - includes_current_state = true; + if current_hash.as_ref() == Some(&state_hash) { + includes_current_state = true; + } + + let mut state_before = db + .rooms + .state_full(pdu.room_id(), &state_hash)? + .into_iter() + .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) + .collect::>(); + + // Now it's the state after + if let Some(pdu) = db.rooms.get_pdu_from_id(&pduid)? { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, Arc::new(pdu)); + } + + fork_states.insert(state_before); } + _ => { + error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - let mut state_before = db - .rooms - .state_full(pdu.room_id(), &state_hash)? - .into_iter() - .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) - .collect::>(); + let res = db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id: pdu.room_id(), + event_id: id, + }, + ) + .await?; - // Now it's the state after - if let Some(pdu) = db.rooms.get_pdu_from_id(&id)? { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, Arc::new(pdu)); + // TODO: This only adds events to the auth_cache, there is for sure a better way to + // do this... + fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; + + let mut state_before = + fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) + .await? + .into_iter() + .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) + .collect::>(); + + if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) + .await? + .pop() + { + let key = (pdu.kind.clone(), pdu.state_key()); + state_before.insert(key, pdu); + } + + // Now it's the state after + fork_states.insert(state_before); } - - fork_states.insert(state_before); - } else if id == pdu.event_id() { - // We add this snapshot after `build_forward_extremity_snapshots` is - // called which we requested from the sending server - } else { - error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); } } // This guarantees that our current room state is included - if !includes_current_state && current_hash.is_some() { - error!("Did not include current state"); + if !includes_current_state { current_state.insert((pdu.kind(), pdu.state_key()), pdu); fork_states.insert(current_state); @@ -1316,18 +1304,7 @@ pub(crate) fn update_resolved_state( ); } None => { - let mut pduid = pdu.room_id().as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(pdu.event_id().as_bytes()); - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid, - ); + error!("We are missing a state event for the current room state."); } } } @@ -1349,9 +1326,9 @@ pub(crate) fn append_incoming_pdu( // Update the state of the room if needed // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); - for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { + for ((ev_type, state_k), state_pdu) in state { + match db.rooms.get_pdu_id(state_pdu.event_id())? { + Some(state_pduid) => { new_state.insert( ( ev_type.clone(), @@ -1359,12 +1336,10 @@ pub(crate) fn append_incoming_pdu( .clone() .ok_or_else(|| Error::Conflict("State contained non state event"))?, ), - pduid.to_vec(), + state_pduid.to_vec(), ); } - None => { - error!("We didn't append an event as an outlier\n{:?}", pdu); - } + None => error!("We are missing a state event for the incoming event snapshot"), } } From 48601142f8afe96042eec0bdade94056f4054a99 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 22:48:43 -0500 Subject: [PATCH 43/62] Use auth_cache to avoid db, save state for every event when joining --- src/client_server/membership.rs | 14 +++------ src/database.rs | 1 + src/database/rooms.rs | 55 ++++++++++++++++++++++++++++++++- src/server_server.rs | 53 ++++++++++++++++++------------- 4 files changed, 90 insertions(+), 33 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 1159185..99c0b62 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -643,8 +643,6 @@ async fn join_room_by_id_helper( ) .expect("iterative auth check failed on resolved events"); - let mut state = HashMap::new(); - // filter the events that failed the auth check keeping the remaining events // sorted correctly for ev_id in sorted_event_ids @@ -660,24 +658,20 @@ async fn join_room_by_id_helper( let mut pdu_id = room_id.as_bytes().to_vec(); pdu_id.push(0xff); pdu_id.extend_from_slice(&count.to_be_bytes()); + + let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; + db.rooms.append_pdu( &pdu, utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - // TODO: can we simplify the DAG or should we copy it exactly?? &pdu.prev_events, &db, )?; - if state_events.contains(ev_id) { - if let Some(key) = &pdu.state_key { - state.insert((pdu.kind(), key.to_string()), pdu_id); - } - } + db.rooms.set_room_state(room_id, &hash)?; } - - db.rooms.force_state(room_id, state, &db.globals)?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/database.rs b/src/database.rs index 3fb8442..35b7bcd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -161,6 +161,7 @@ impl Database { roomid_statehash: db.open_tree("roomid_statehash")?, roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, + roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index abe8c65..43d5f7d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -69,6 +69,10 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. pub(super) roomeventid_outlierpdu: sled::Tree, + /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. + /// This allows us to skip any state snapshots that would for sure not have the outlier. + pub(super) roomeventid_outlierpducount: sled::Tree, + /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, } @@ -323,6 +327,15 @@ impl Rooms { .map_or(Ok(None), |pdu_id| self.pdu_count(&pdu_id).map(Some)) } + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + self.pduid_pdu + .scan_prefix(room_id.as_bytes()) + .last() + .map(|b| self.pdu_count(&b?.0)) + .transpose() + .map(|op| op.unwrap_or_default()) + } + /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -490,6 +503,8 @@ impl Rooms { &key, &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; + self.roomeventid_outlierpducount + .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; Ok(()) } @@ -537,7 +552,45 @@ impl Rooms { let mut key = pdu.room_id().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pdu.event_id().as_bytes()); - self.roomeventid_outlierpdu.remove(key)?; + if self.roomeventid_outlierpdu.remove(&key)?.is_some() { + if let Some(state_key) = pdu.state_key.as_deref() { + let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_short.get(&statekey)? { + Some(short) => utils::u64_from_bytes(&short).map_err(|_| { + Error::bad_database("Invalid short bytes in statekey_short.") + })?, + None => { + error!( + "This event has been inserted into the state snapshot tree previously." + ); + let short = db.globals.next_count()?; + self.statekey_short + .insert(&statekey, &short.to_be_bytes())?; + short + } + }; + + let mut start = pdu.room_id().as_bytes().to_vec(); + start.extend_from_slice( + &self + .roomeventid_outlierpducount + .get(&key)? + .unwrap_or_default(), + ); + for hash in self.pduid_statehash.range(start..).values() { + let mut hash = hash?.to_vec(); + hash.extend_from_slice(&short.to_be_bytes()); + + let _ = self.stateid_pduid.compare_and_swap( + hash, + Some(pdu.event_id().as_bytes()), + Some(pdu_id.as_ref()), + )?; + } + } + } // We must keep track of all events that have been referenced. for leaf in leaves { diff --git a/src/server_server.rs b/src/server_server.rs index 48d5956..780109c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -971,6 +971,7 @@ fn validate_event<'a>( } } Err(_e) => { + error!("{}", _e); return Err("Signature verification failed".to_string()); } }; @@ -988,7 +989,7 @@ fn validate_event<'a>( fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await - .map_err(|_| "Event failed auth chain check".to_string())?; + .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); @@ -1064,6 +1065,7 @@ async fn fetch_check_auth_events( /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// +/// 0. Look in the auth_cache /// 1. Look in the main timeline (pduid_pdu tree) /// 2. Look at outlier pdu tree /// 3. Ask origin server over federation @@ -1080,28 +1082,35 @@ pub(crate) async fn fetch_events( ) -> Result>> { let mut pdus = vec![]; for id in events { - // `get_pdu` checks the outliers tree for us - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|_| Error::Conflict("Authentication of event failed"))?; + let pdu = match auth_cache.get(id) { + Some(pdu) => pdu.clone(), + // `get_pdu` checks the outliers tree for us + None => match db.rooms.get_pdu(&id)? { + Some(pdu) => Arc::new(pdu), + None => match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("{:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu - } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + db.rooms.append_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), + }, }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); From 8f283510aa93189f6845a2950da32c8fb49fc1f5 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Wed, 3 Feb 2021 23:01:31 -0500 Subject: [PATCH 44/62] Fix unused import clippy warning --- Cargo.lock | 52 ++++++++++++++++++++++++++------- src/client_server/membership.rs | 6 ++-- src/client_server/session.rs | 3 +- src/server_server.rs | 3 +- 4 files changed, 46 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7381be..2565a35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,6 +97,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -172,7 +178,7 @@ dependencies = [ "num-integer", "num-traits", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -185,7 +191,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.1.0" dependencies = [ - "base64", + "base64 0.13.0", "directories", "http", "image", @@ -227,7 +233,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.24", + "time 0.2.25", "version_check", ] @@ -871,6 +877,20 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +dependencies = [ + "base64 0.12.3", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1509,7 +1529,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd281b1030aa675fb90aa994d07187645bb3c8fc756ca766e7c3070b439de9de" dependencies = [ - "base64", + "base64 0.13.0", "bytes", "encoding_rs", "futures-core", @@ -1584,8 +1604,8 @@ dependencies = [ "rocket_http", "serde", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "ubyte", "version_check", "yansi", @@ -1622,8 +1642,8 @@ dependencies = [ "ref-cast", "smallvec", "state", - "time 0.2.24", - "tokio 1.0.2", + "time 0.2.25", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -1819,7 +1839,7 @@ name = "ruma-signatures" version = "0.6.0-alpha.1" source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" dependencies = [ - "base64", + "base64 0.13.0", "ring", "ruma-identifiers", "ruma-serde", @@ -1833,7 +1853,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", "crossbeam-utils", @@ -1854,7 +1874,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64", + "base64 0.13.0", "log", "ring", "sct", @@ -2189,6 +2209,16 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "time" version = "0.2.25" diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 99c0b62..211388e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,9 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -use state_res::Event; +// use state_res::Event; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashSet}, convert::TryFrom, iter, sync::Arc, @@ -557,7 +557,7 @@ async fn join_room_by_id_helper( let room_state = send_join_response.room_state.state.iter().map(add_event_id); - let state_events = room_state + let _state_events = room_state .clone() .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 1b2583c..f8d64f0 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -117,8 +117,7 @@ pub async fn login_route( let device_exists = body.device_id.as_ref().map_or(false, |device_id| { db.users .all_device_ids(&user_id) - .find(|x| x.as_ref().map_or(false, |v| v == device_id)) - .is_some() + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { diff --git a/src/server_server.rs b/src/server_server.rs index 780109c..1e81d5e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -800,8 +800,7 @@ pub async fn send_transaction_message_route<'a>( } }; - // Make this the state after (since we appended_incoming_pdu this should agree with our servers - // current state). + // Make this the state after. let mut state_after = state_at_event.clone(); state_after.insert((pdu.kind(), pdu.state_key()), pdu.clone()); // Add the incoming event to the mix of state snapshots From 0cc6448dbe1d31d7e4f84f27ab9ca957b69ebe0f Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Tue, 9 Feb 2021 17:58:40 -0500 Subject: [PATCH 45/62] Temp disable rust_2018_idioms for CI --- Cargo.lock | 212 +++++++++++++--------------------------------------- src/main.rs | 2 +- 2 files changed, 53 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2565a35..956e372 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,27 +33,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-stream" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670df70cbc01729f901f94c887814b3c68db038aad1329a418bae178bc5295c" -dependencies = [ - "async-stream-impl", - "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3548b8efc9f8e8a5a0a2808c5bd8451a9031b9e5b879a79590304ae928b0a70" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-trait" version = "0.1.42" @@ -134,9 +113,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07aa6688c702439a1be0307b6a94dffe1168569e45b9500c1372bc580740d59" +checksum = "099e596ef14349721d9016f6b80dd3419ea1bf289ab9b44df8e4dfd3a005d5d9" [[package]] name = "bytemuck" @@ -195,7 +174,6 @@ dependencies = [ "directories", "http", "image", - "js_int", "jsonwebtoken", "log", "rand 0.7.3", @@ -211,7 +189,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.0.2", + "tokio", "trust-dns-resolver", ] @@ -373,9 +351,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if", ] @@ -602,28 +580,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 0.2.24", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - -[[package]] -name = "h2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b67e66362108efccd8ac053abafc8b7a8d86a37e6e48fc4f6f7485eb5e9e6a5" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.0.2", - "tokio-util 0.6.1", + "tokio", + "tokio-util", "tracing", "tracing-futures", ] @@ -684,21 +642,11 @@ dependencies = [ "http", ] -[[package]] -name = "http-body" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" -dependencies = [ - "bytes 1.0.1", - "http", -] - [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -708,47 +656,23 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", + "h2", "http", - "http-body 0.3.1", + "http-body", "httparse", "httpdate", "itoa", - "pin-project 1.0.4", + "pin-project 1.0.5", "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" -dependencies = [ - "bytes 1.0.1", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.0", - "http", - "http-body 0.4.0", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.4", - "socket2", - "tokio 1.0.2", + "tokio", "tower-service", "tracing", "want", @@ -769,9 +693,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "de910d521f7cc3135c4de8db1cb910e0b5ed1dc6f57c381cd07e8e661ce10094" dependencies = [ "matches", "unicode-bidi", @@ -780,9 +704,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" +checksum = "293f07a1875fa7e9c5897b51aa68b2d8ed8271b87e1a44cb64b9c3d98aabbc0d" dependencies = [ "bytemuck", "byteorder", @@ -899,9 +823,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "linked-hash-map" @@ -1007,16 +931,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - [[package]] name = "native-tls" version = "0.2.7" @@ -1044,15 +958,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1246,11 +1151,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 1.0.4", + "pin-project-internal 1.0.5", ] [[package]] @@ -1266,9 +1171,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1535,8 +1440,8 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.9", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", @@ -1569,9 +1474,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1631,7 +1536,7 @@ dependencies = [ "cookie", "either", "http", - "hyper 0.14.2", + "hyper", "indexmap", "log", "mime", @@ -1973,9 +1878,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" dependencies = [ "itoa", "ryu", @@ -1996,9 +1901,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "971be8f6e4d4a47163b405a3df70d14359186f9ab0f3a3ec37df144ca1ce089f" +checksum = "bdd2af560da3c1fdc02cb80965289254fc35dff869810061e2d8290ee48848ae" dependencies = [ "dtoa", "linked-hash-map", @@ -2079,9 +1984,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66a8cff4fa24853fdf6b51f75c6d7f8206d7c75cab4e467bcd7f25c2b1febe0" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2202,9 +2107,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ "once_cell", ] @@ -2274,9 +2179,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8efab2086f17abcddb8f756117665c958feee6b2e39974c2f1600592ab3a4195" +checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" dependencies = [ "autocfg", "bytes", @@ -2293,9 +2198,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2319,35 +2224,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.0.2", + "tokio", "webpki", ] -[[package]] -name = "tokio-stream" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-util" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb971a26599ffd28066d387f109746df178eff14d5ea1e235015c5601967a4b" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "async-stream", "bytes", "futures-core", "futures-sink", "log", "pin-project-lite", "tokio", - "tokio-stream", ] [[package]] @@ -2367,9 +2259,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "f7d40a22fd029e33300d8d89a5cc8ffce18bb7c587662f54629e94c9de5487f3" dependencies = [ "cfg-if", "pin-project-lite", @@ -2415,7 +2307,7 @@ dependencies = [ "rand 0.8.3", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "url", ] @@ -2435,7 +2327,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.24", + "tokio", "trust-dns-proto", ] @@ -2474,9 +2366,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] diff --git a/src/main.rs b/src/main.rs index e5c0399..9b64506 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +// #![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; From a601c29c978c85c6dd346b8395f01581c684ccdc Mon Sep 17 00:00:00 2001 From: Niklas Zender Date: Fri, 26 Feb 2021 14:55:06 +0000 Subject: [PATCH 46/62] Chore: Add Issue Template --- .gitlab/issue_templates/Issue Template.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .gitlab/issue_templates/Issue Template.md diff --git a/.gitlab/issue_templates/Issue Template.md b/.gitlab/issue_templates/Issue Template.md new file mode 100644 index 0000000..e1a0667 --- /dev/null +++ b/.gitlab/issue_templates/Issue Template.md @@ -0,0 +1,15 @@ +# Headline + +### Description + + + + + + + + + + + +/label ~conduit From 0dd8a15c4900ec9ba6fe1764b5ca31c4575bb199 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sat, 27 Feb 2021 16:09:22 -0500 Subject: [PATCH 47/62] Fix leaves not being replaced by correct eventId in membership Update ruma --- Cargo.lock | 62 ++++++++++++++++++++++++--------- Cargo.toml | 5 +-- src/client_server/membership.rs | 2 +- src/database/key_backups.rs | 32 ++++++++++------- 4 files changed, 69 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 956e372..d9dbbf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,7 +1558,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1569,6 +1569,8 @@ dependencies = [ "ruma-events", "ruma-federation-api", "ruma-identifiers", + "ruma-identity-service-api", + "ruma-push-gateway-api", "ruma-serde", "ruma-signatures", ] @@ -1576,7 +1578,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1591,7 +1593,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1602,7 +1604,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1616,7 +1618,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1635,7 +1637,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1648,7 +1650,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1662,7 +1664,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1673,7 +1675,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1688,7 +1690,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand 0.8.3", @@ -1702,7 +1704,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1713,12 +1715,40 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" + +[[package]] +name = "ruma-identity-service-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "ruma-api", + "ruma-common", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] + +[[package]] +name = "ruma-push-gateway-api" +version = "0.0.1" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" +dependencies = [ + "js_int", + "ruma-api", + "ruma-common", + "ruma-events", + "ruma-identifiers", + "ruma-serde", + "serde", + "serde_json", +] [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1731,7 +1761,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1742,7 +1772,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=bba442580d6cd7ed990b2b63387eed2238cbadc8#bba442580d6cd7ed990b2b63387eed2238cbadc8" +source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2000,7 +2030,7 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?rev=791c66d73cf064d09db0cdf767d5fef43a343425#791c66d73cf064d09db0cdf767d5fef43a343425" +source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", diff --git a/Cargo.toml b/Cargo.toml index de6a966..4a901e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,14 +18,15 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "c24f15c18 #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "bba442580d6cd7ed990b2b63387eed2238cbadc8" } +ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } # ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +# state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 211388e..e3b1827 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -666,7 +666,7 @@ async fn join_room_by_id_helper( utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), count, pdu_id.clone().into(), - &pdu.prev_events, + &[pdu.event_id.clone()], &db, )?; diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index a50e45e..4c65354 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -2,7 +2,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyData, Sessions}, + r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, RoomId, UserId, }; @@ -129,7 +129,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyData, + key_data: &KeyBackupData, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.to_string().as_bytes().to_vec(); @@ -153,7 +153,7 @@ impl KeyBackups { self.backupkeyid_backup.insert( &key, - &*serde_json::to_string(&key_data).expect("KeyData::to_string always works"), + &*serde_json::to_string(&key_data).expect("KeyBackupData::to_string always works"), )?; Ok(()) @@ -182,13 +182,17 @@ impl KeyBackups { .to_string()) } - pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + pub fn get_all( + &self, + user_id: &UserId, + version: &str, + ) -> Result> { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::::new(); for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { let (key, value) = r?; @@ -211,15 +215,16 @@ impl KeyBackups { ) .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; - let key_data = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))?; + let key_data = serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + })?; Ok::<_, Error>((room_id, session_id, key_data)) }) { let (room_id, session_id, key_data) = result?; rooms .entry(room_id) - .or_insert_with(|| Sessions { + .or_insert_with(|| RoomKeyBackup { sessions: BTreeMap::new(), }) .sessions @@ -234,7 +239,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> BTreeMap { + ) -> BTreeMap { let mut prefix = user_id.to_string().as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -257,7 +262,7 @@ impl KeyBackups { })?; let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyData in backupkeyid_backup is invalid.") + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; Ok::<_, Error>((session_id, key_data)) @@ -272,7 +277,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -284,8 +289,9 @@ impl KeyBackups { self.backupkeyid_backup .get(&key)? .map(|value| { - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid.")) + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + }) }) .transpose() } From f3253f2033691ec47719335d8e0c04b684c48899 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Sun, 28 Feb 2021 18:53:17 -0500 Subject: [PATCH 48/62] Move comments about Rooms trees to doc comments --- src/database/rooms.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 43d5f7d..4ad499c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -63,8 +63,11 @@ pub struct Rooms { /// Remember the state hash at events in the past. pub(super) pduid_statehash: sled::Tree, /// The state for a given state hash. - pub(super) statekey_short: sled::Tree, // StateKey = EventType + StateKey, Short = Count - pub(super) stateid_pduid: sled::Tree, // StateId = StateHash + Short, PduId = Count (without roomid) + /// + /// StateKey = EventType + StateKey, Short = Count + pub(super) statekey_short: sled::Tree, + /// StateId = StateHash + Short, PduId = Count (without roomid) + pub(super) stateid_pduid: sled::Tree, /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. @@ -583,11 +586,11 @@ impl Rooms { let mut hash = hash?.to_vec(); hash.extend_from_slice(&short.to_be_bytes()); - let _ = self.stateid_pduid.compare_and_swap( + let _ = dbg!(self.stateid_pduid.compare_and_swap( hash, Some(pdu.event_id().as_bytes()), Some(pdu_id.as_ref()), - )?; + )?); } } } @@ -921,12 +924,12 @@ impl Rooms { content.clone(), prev_event, None, // TODO: third party invite - &auth_events + dbg!(&auth_events .iter() .map(|((ty, key), pdu)| { Ok(((ty.clone(), key.clone()), Arc::new(pdu.clone()))) }) - .collect::>>()?, + .collect::>>()?), ) .map_err(|e| { log::error!("{}", e); From c9f4ff5cf8e20dba0e6dfc24de6acb83458e3b2d Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 08:23:28 -0500 Subject: [PATCH 49/62] Ask multiple servers for keys when not known or sending server failed --- src/database/rooms.rs | 2 +- src/server_server.rs | 61 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 53 insertions(+), 10 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4ad499c..992c97c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1605,7 +1605,7 @@ impl Rooms { }) } - /// Returns an iterator over all joined members of a room. + /// Returns an iterator of all servers participating in this room. pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index 1e81d5e..58c4b33 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -8,8 +8,8 @@ use ruma::{ federation::{ directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys, - VerifyKey, + get_remote_server_keys, get_server_keys, + get_server_version::v1 as get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state_ids}, query::get_profile_information, @@ -575,7 +575,7 @@ pub async fn send_transaction_message_route<'a>( return None; } - Some((event_id, value)) + Some((event_id, room_id, value)) }) .collect::>(); @@ -586,7 +586,7 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); @@ -595,7 +595,7 @@ pub async fn send_transaction_message_route<'a>( UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); let origin = sender.server_name(); - let keys = match fetch_signing_keys(&db, origin).await { + let keys = match fetch_signing_keys(&db, &room_id, origin).await { Ok(keys) => keys, Err(_) => { resolved_map.insert( @@ -1122,18 +1122,61 @@ pub(crate) async fn fetch_events( /// fetch them from the server and save to our DB. pub(crate) async fn fetch_signing_keys( db: &Database, + room_id: &RoomId, origin: &ServerName, ) -> Result> { match db.globals.signing_keys_for(origin)? { keys if !keys.is_empty() => Ok(keys), _ => { - let keys = db + match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) .await - .map_err(|_| Error::BadServerResponse("Failed to request server keys"))?; - db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + { + Ok(keys) => { + db.globals.add_signing_key(origin, &keys.server_key)?; + Ok(keys.server_key.verify_keys) + } + _ => { + for server in db.rooms.room_servers(room_id) { + let server = server?; + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + &server, + get_remote_server_keys::v2::Request::new( + &server, + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ), + ) + .await + { + let keys: Vec = keys.server_keys; + let key = keys.into_iter().fold(None, |mut key, next| { + if let Some(verified) = &key { + // rustc cannot elide this type for some reason + let v: &ServerSigningKeys = verified; + if v.verify_keys + .iter() + .zip(next.verify_keys.iter()) + .all(|(a, b)| a.1.key == b.1.key) + { + } + } else { + key = Some(next) + } + key + }); + } + } + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } + } } } } From 79c9de98cd6699df8647b70ef24d9dd0889a497a Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Mon, 1 Mar 2021 09:17:53 -0500 Subject: [PATCH 50/62] Add trusted_servers, filter servers to query keys by trusted_servers --- src/database.rs | 2 ++ src/database/globals.rs | 4 ++++ src/server_server.rs | 16 +++++++++++++--- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/database.rs b/src/database.rs index 35b7bcd..20cc7e1 100644 --- a/src/database.rs +++ b/src/database.rs @@ -39,6 +39,8 @@ pub struct Config { #[serde(default = "false_fn")] allow_federation: bool, jwt_secret: Option, + #[serde(default = "Vec::new")] + trusted_servers: Vec>, } fn false_fn() -> bool { diff --git a/src/database/globals.rs b/src/database/globals.rs index 00b4568..3c65e74 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -139,6 +139,10 @@ impl Globals { self.config.allow_federation } + pub fn trusted_servers(&self) -> &[Box] { + &self.config.trusted_servers + } + pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.dns_resolver } diff --git a/src/server_server.rs b/src/server_server.rs index 58c4b33..dcd72f7 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1138,7 +1138,9 @@ pub(crate) async fn fetch_signing_keys( Ok(keys.server_key.verify_keys) } _ => { - for server in db.rooms.room_servers(room_id) { + for server in db.rooms.room_servers(room_id).filter( + |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), + ) { let server = server?; if let Ok(keys) = db .sending @@ -1154,8 +1156,9 @@ pub(crate) async fn fetch_signing_keys( ) .await { + let mut trust = 0; let keys: Vec = keys.server_keys; - let key = keys.into_iter().fold(None, |mut key, next| { + let key = keys.iter().fold(None, |mut key, next| { if let Some(verified) = &key { // rustc cannot elide this type for some reason let v: &ServerSigningKeys = verified; @@ -1164,12 +1167,19 @@ pub(crate) async fn fetch_signing_keys( .zip(next.verify_keys.iter()) .all(|(a, b)| a.1.key == b.1.key) { + trust += 1; } } else { - key = Some(next) + key = Some(next.clone()) } key }); + + if trust == (keys.len() - 1) && key.is_some() { + let k = key.unwrap(); + db.globals.add_signing_key(origin, &k)?; + return Ok(k.verify_keys); + } } } Err(Error::BadServerResponse( From dd68031b692065469b4fde959e5751df2005f10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 2 Mar 2021 14:32:30 +0100 Subject: [PATCH 51/62] improvement: implement /receipt --- src/client_server/read_marker.rs | 52 ++++++++++++++++++++++++++++---- src/main.rs | 2 +- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 0c4ec1a..f7d3712 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -2,7 +2,8 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, r0::capabilities::get_capabilities, r0::read_marker::set_read_marker, + error::ErrorKind, + r0::{read_marker::set_read_marker, receipt::create_receipt}, }, events::{AnyEphemeralRoomEvent, AnyEvent, EventType}, }; @@ -83,13 +84,52 @@ pub async fn set_read_marker_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") )] -pub async fn set_receipt_route( +pub async fn create_receipt_route( db: State<'_, Database>, - body: Ruma, -) -> ConduitResult { - let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + db.rooms.edus.private_read_set( + &body.room_id, + &sender_user, + db.rooms + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + &db.globals, + )?; + + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(SystemTime::now()), + }, + ); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert( + body.event_id.to_owned(), + ruma::events::receipt::Receipts { + read: Some(user_receipts), + }, + ); + + db.rooms.edus.readreceipt_update( + &sender_user, + &body.room_id, + AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt( + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )), + &db.globals, + )?; db.flush().await?; - Ok(set_read_marker::Response.into()) + Ok(create_receipt::Response.into()) } diff --git a/src/main.rs b/src/main.rs index 65434a5..d5f1f4e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -90,7 +90,7 @@ fn setup_rocket() -> rocket::Rocket { client_server::get_backup_key_sessions_route, client_server::get_backup_keys_route, client_server::set_read_marker_route, - client_server::set_receipt_route, + client_server::create_receipt_route, client_server::create_typing_event_route, client_server::create_room_route, client_server::redact_event_route, From 278751eb23cd524fb489634905612e4939c1501c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 2 Mar 2021 14:36:48 +0100 Subject: [PATCH 52/62] improvement: use transaction ids for federation requests --- src/database/sending.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index fd32793..8c487e1 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -9,6 +9,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; use log::info; +use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ api::{appservice, federation, OutgoingRequest}, @@ -229,6 +230,13 @@ impl Sending { Ok(()) } + fn calculate_hash(keys: &[IVec]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() + } + async fn handle_event( server: Box, is_appservice: bool, @@ -266,7 +274,10 @@ impl Sending { .unwrap(), // TODO: handle error appservice::event::push_events::v1::Request { events: &pdu_jsons, - txn_id: &utils::random_string(16), + txn_id: &base64::encode_config( + Self::calculate_hash(&pdu_ids), + base64::URL_SAFE_NO_PAD, + ), }, ) .await @@ -309,7 +320,10 @@ impl Sending { pdus: &pdu_jsons, edus: &[], origin_server_ts: SystemTime::now(), - transaction_id: &utils::random_string(16), + transaction_id: &base64::encode_config( + Self::calculate_hash(&pdu_ids), + base64::URL_SAFE_NO_PAD, + ), }, ) .await From f7713fdf2e470ba437cf858faf8c306649d91fbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 26 Feb 2021 13:24:07 +0100 Subject: [PATCH 53/62] fix: sending code got stuck sometimes --- src/database/sending.rs | 78 ++++++++++++++++++++++++++++++----------- src/ruma_wrapper.rs | 4 +-- 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 8c487e1..dfb7fa9 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -8,7 +8,7 @@ use std::{ use crate::{appservice_server, server_server, utils, Error, PduEvent, Result}; use federation::transactions::send_transaction_message; -use log::info; +use log::{info, warn}; use ring::digest; use rocket::futures::stream::{FuturesUnordered, StreamExt}; use ruma::{ @@ -36,6 +36,7 @@ impl Sending { ) { let servernamepduids = self.servernamepduids.clone(); let servercurrentpdus = self.servercurrentpdus.clone(); + let maximum_requests = self.maximum_requests.clone(); let rooms = rooms.clone(); let globals = globals.clone(); let appservice = appservice.clone(); @@ -44,23 +45,43 @@ impl Sending { let mut futures = FuturesUnordered::new(); // Retry requests we could not finish yet - let mut current_transactions = HashMap::new(); + let mut current_transactions = HashMap::<(Box, bool), Vec>::new(); - for (server, pdu, is_appservice) in servercurrentpdus + for (key, server, pdu, is_appservice) in servercurrentpdus .iter() .filter_map(|r| r.ok()) .filter_map(|(key, _)| Self::parse_servercurrentpdus(key).ok()) - .filter(|(_, pdu, _)| !pdu.is_empty()) // Skip reservation key - .take(50) - // This should not contain more than 50 anyway { - current_transactions + if pdu.is_empty() { + // Remove old reservation key + servercurrentpdus.remove(key).unwrap(); + continue; + } + + let entry = current_transactions .entry((server, is_appservice)) - .or_insert_with(Vec::new) - .push(pdu); + .or_insert_with(Vec::new); + + if entry.len() > 30 { + warn!("Dropping some current pdus because too many were queued. This should not happen."); + servercurrentpdus.remove(key).unwrap(); + continue; + } + + entry.push(pdu); } for ((server, is_appservice), pdus) in current_transactions { + // Create new reservation + let mut prefix = if is_appservice { + "+".as_bytes().to_vec() + } else { + Vec::new() + }; + prefix.extend_from_slice(server.as_bytes()); + prefix.push(0xff); + servercurrentpdus.insert(prefix, &[]).unwrap(); + futures.push(Self::handle_event( server, is_appservice, @@ -68,6 +89,7 @@ impl Sending { &globals, &rooms, &appservice, + &maximum_requests, )); } @@ -106,7 +128,7 @@ impl Sending { .map(|k| { k.subslice(prefix.len(), k.len() - prefix.len()) }) - .take(50) + .take(30) .collect::>(); if !new_pdus.is_empty() { @@ -117,7 +139,7 @@ impl Sending { servernamepduids.remove(¤t_key).unwrap(); } - futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice)); + futures.push(Self::handle_event(server, is_appservice, new_pdus, &globals, &rooms, &appservice, &maximum_requests)); } else { servercurrentpdus.remove(&prefix).unwrap(); // servercurrentpdus with the prefix should be empty now @@ -194,15 +216,17 @@ impl Sending { prefix.extend_from_slice(server.as_bytes()); prefix.push(0xff); - servercurrentpdus + if servercurrentpdus .compare_and_swap(prefix, Option::<&[u8]>::None, Some(&[])) // Try to reserve - == Ok(Ok(())) + == Ok(Ok(())) { true } else { + false + } }) { servercurrentpdus.insert(&key, &[]).unwrap(); servernamepduids.remove(&key).unwrap(); - futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice)); + futures.push(Self::handle_event(server, is_appservice, vec![pdu_id.into()], &globals, &rooms, &appservice, &maximum_requests)); } } } @@ -244,6 +268,7 @@ impl Sending { globals: &super::globals::Globals, rooms: &super::rooms::Rooms, appservice: &super::appservice::Appservice, + maximum_requests: &Semaphore, ) -> std::result::Result<(Box, bool), (Box, bool, Error)> { if is_appservice { let pdu_jsons = pdu_ids @@ -266,7 +291,9 @@ impl Sending { }) .filter_map(|r| r.ok()) .collect::>(); - appservice_server::send_request( + + let permit = maximum_requests.acquire().await; + let response = appservice_server::send_request( &globals, appservice .get_registration(server.as_str()) @@ -282,7 +309,11 @@ impl Sending { ) .await .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + .map_err(|e| (server, is_appservice, e)); + + drop(permit); + + response } else { let pdu_jsons = pdu_ids .iter() @@ -312,7 +343,8 @@ impl Sending { .filter_map(|r| r.ok()) .collect::>(); - server_server::send_request( + let permit = maximum_requests.acquire().await; + let response = server_server::send_request( &globals, server.clone(), send_transaction_message::v1::Request { @@ -328,12 +360,17 @@ impl Sending { ) .await .map(|_response| (server.clone(), is_appservice)) - .map_err(|e| (server, is_appservice, e)) + .map_err(|e| (server, is_appservice, e)); + + drop(permit); + + response } } - fn parse_servercurrentpdus(key: IVec) -> Result<(Box, IVec, bool)> { - let mut parts = key.splitn(2, |&b| b == 0xff); + fn parse_servercurrentpdus(key: IVec) -> Result<(IVec, Box, IVec, bool)> { + let key2 = key.clone(); + let mut parts = key2.splitn(2, |&b| b == 0xff); let server = parts.next().expect("splitn always returns one element"); let pdu = parts .next() @@ -351,6 +388,7 @@ impl Sending { }; Ok::<_, Error>(( + key, Box::::try_from(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..898561f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -82,9 +82,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| { - dbg!(token.as_deref()) == dbg!(Some(as_token)) - }) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { From 4155a47db1e365b0b2875c419fb1ba1e584587e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 28 Feb 2021 12:41:03 +0100 Subject: [PATCH 54/62] feat: opentelemetry/jaeger support --- Cargo.lock | 249 ++++++++++++++++++++++++++-- Cargo.toml | 6 + conduit-example.toml | 4 + src/client_server/account.rs | 5 + src/client_server/alias.rs | 3 + src/client_server/backup.rs | 14 ++ src/client_server/capabilities.rs | 1 + src/client_server/config.rs | 2 + src/client_server/context.rs | 1 + src/client_server/device.rs | 5 + src/client_server/directory.rs | 4 + src/client_server/filter.rs | 2 + src/client_server/keys.rs | 6 + src/client_server/media.rs | 4 + src/client_server/membership.rs | 11 ++ src/client_server/message.rs | 2 + src/client_server/mod.rs | 1 + src/client_server/presence.rs | 1 + src/client_server/profile.rs | 5 + src/client_server/push.rs | 10 ++ src/client_server/read_marker.rs | 2 + src/client_server/redact.rs | 1 + src/client_server/room.rs | 3 + src/client_server/search.rs | 1 + src/client_server/session.rs | 4 + src/client_server/state.rs | 5 + src/client_server/sync.rs | 5 +- src/client_server/tag.rs | 3 + src/client_server/thirdparty.rs | 1 + src/client_server/to_device.rs | 1 + src/client_server/typing.rs | 1 + src/client_server/unversioned.rs | 1 + src/client_server/user_directory.rs | 1 + src/client_server/voip.rs | 1 + src/database.rs | 2 + src/database/account_data.rs | 1 + src/database/rooms.rs | 16 ++ src/database/rooms/edus.rs | 5 + src/database/sending.rs | 6 + src/database/users.rs | 5 + src/error.rs | 68 +------- src/main.rs | 52 ++++-- src/pdu.rs | 34 ++-- src/ruma_wrapper.rs | 4 +- src/server_server.rs | 14 ++ 45 files changed, 457 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ff405..c8d48dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,6 +30,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "arrayref" version = "0.3.6" @@ -212,7 +221,9 @@ dependencies = [ "js_int", "jsonwebtoken", "log", - "rand", + "opentelemetry", + "opentelemetry-jaeger", + "rand 0.7.3", "regex", "reqwest", "ring", @@ -226,6 +237,9 @@ dependencies = [ "state-res", "thiserror", "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", "trust-dns-resolver", ] @@ -595,6 +609,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", +] + [[package]] name = "gif" version = "0.11.1" @@ -795,6 +820,12 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "integer-encoding" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" + [[package]] name = "iovec" version = "0.1.4" @@ -945,6 +976,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -1189,6 +1229,44 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514d24875c140ed269eecc2d1b56d7b71b573716922a763c317fb1b1b4b58f15" +dependencies = [ + "async-trait", + "futures", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project 1.0.2", + "rand 0.8.3", + "thiserror", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5677b3a361784aff6e2b1b30dbdb5f85f4ec57ff2ced41d9a481ad70a9d0b57" +dependencies = [ + "async-trait", + "lazy_static", + "opentelemetry", + "thiserror", + "thrift", +] + +[[package]] +name = "ordered-float" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits", +] + [[package]] name = "parking_lot" version = "0.11.1" @@ -1406,11 +1484,23 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.15", "libc", - "rand_chacha", - "rand_core", - "rand_hc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", ] [[package]] @@ -1420,7 +1510,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.2", ] [[package]] @@ -1429,7 +1529,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.15", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom 0.2.2", ] [[package]] @@ -1438,7 +1547,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.2", ] [[package]] @@ -1453,7 +1571,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom", + "getrandom 0.1.15", "redox_syscall", "rust-argon2", ] @@ -1490,6 +1608,16 @@ dependencies = [ "thread_local", ] +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + [[package]] name = "regex-syntax" version = "0.6.21" @@ -1582,7 +1710,7 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand", + "rand 0.7.3", "ref-cast", "rocket_codegen", "rocket_http", @@ -1769,7 +1897,7 @@ version = "0.17.4" source = "git+https://github.com/ruma/ruma?rev=ee814aa84934530d76f5e4b275d739805b49bdef#ee814aa84934530d76f5e4b275d739805b49bdef" dependencies = [ "paste", - "rand", + "rand 0.7.3", "ruma-identifiers-macros", "ruma-identifiers-validation", "ruma-serde", @@ -2010,6 +2138,15 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" version = "1.2.2" @@ -2194,7 +2331,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ "cfg-if 0.1.10", "libc", - "rand", + "rand 0.7.3", "redox_syscall", "remove_dir_all", "winapi 0.3.9", @@ -2229,6 +2366,28 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float", + "threadpool", +] + [[package]] name = "time" version = "0.1.44" @@ -2380,9 +2539,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", @@ -2393,9 +2552,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" dependencies = [ "proc-macro2", "quote", @@ -2421,6 +2580,62 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccdf13c28f1654fe806838f28c5b9cb23ca4c0eae71450daa489f50e523ceb1" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + [[package]] name = "trust-dns-proto" version = "0.19.6" @@ -2434,7 +2649,7 @@ dependencies = [ "idna", "lazy_static", "log", - "rand", + "rand 0.7.3", "smallvec", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index f7fbdc5..9ab5250 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,6 +65,12 @@ trust-dns-resolver = "0.19.6" regex = "1.4.2" # jwt jsonwebtokens jsonwebtoken = "7.2.0" +# Performance measurements +tracing = "0.1.25" +opentelemetry = "0.12.0" +tracing-subscriber = "0.2.16" +tracing-opentelemetry = "0.11.0" +opentelemetry-jaeger = "0.11.0" [features] default = ["conduit_bin"] diff --git a/conduit-example.toml b/conduit-example.toml index b82da2c..b1bc618 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -28,8 +28,12 @@ max_request_size = 20_000_000 # in bytes # Disable encryption, so no new encrypted rooms can be created # Note: existing rooms will continue to work #allow_encryption = true + #allow_federation = false +# Enable jaeger to support monitoring and troubleshooting through jaeger +#allow_jaeger = false + #cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #max_concurrent_requests = 4 # How many requests Conduit sends to other servers at the same time #workers = 4 # default: cpu core count * 2 diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 75544b7..044468b 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -40,6 +40,7 @@ const GUEST_NAME_LENGTH: usize = 10; feature = "conduit_bin", get("/_matrix/client/r0/register/available", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: State<'_, Database>, body: Ruma>, @@ -82,6 +83,7 @@ pub async fn get_register_available_route( feature = "conduit_bin", post("/_matrix/client/r0/register", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn register_route( db: State<'_, Database>, body: Ruma>, @@ -546,6 +548,7 @@ pub async fn register_route( feature = "conduit_bin", post("/_matrix/client/r0/account/password", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: State<'_, Database>, body: Ruma>, @@ -610,6 +613,7 @@ pub async fn change_password_route( feature = "conduit_bin", get("/_matrix/client/r0/account/whoami", data = "") )] +#[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { @@ -630,6 +634,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult, body: Ruma>, diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 498e882..b8c16d9 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -19,6 +19,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: State<'_, Database>, body: Ruma>, @@ -39,6 +40,7 @@ pub async fn create_alias_route( feature = "conduit_bin", delete("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +56,7 @@ pub async fn delete_alias_route( feature = "conduit_bin", get("/_matrix/client/r0/directory/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 0f34ba7..f33d0de 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -17,6 +17,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", post("/_matrix/client/unstable/room_keys/version", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: State<'_, Database>, body: Ruma, @@ -35,6 +36,7 @@ pub async fn create_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: State<'_, Database>, body: Ruma>, @@ -52,6 +54,7 @@ pub async fn update_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: State<'_, Database>, body: Ruma, @@ -79,6 +82,7 @@ pub async fn get_latest_backup_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: State<'_, Database>, body: Ruma>, @@ -105,6 +109,7 @@ pub async fn get_backup_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: State<'_, Database>, body: Ruma>, @@ -123,6 +128,7 @@ pub async fn delete_backup_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -156,6 +162,7 @@ pub async fn add_backup_keys_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -187,6 +194,7 @@ pub async fn add_backup_key_sessions_route( feature = "conduit_bin", put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: State<'_, Database>, body: Ruma>, @@ -215,6 +223,7 @@ pub async fn add_backup_key_session_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -230,6 +239,7 @@ pub async fn get_backup_keys_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -247,6 +257,7 @@ pub async fn get_backup_key_sessions_route( feature = "conduit_bin", get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: State<'_, Database>, body: Ruma>, @@ -270,6 +281,7 @@ pub async fn get_backup_key_session_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: State<'_, Database>, body: Ruma>, @@ -292,6 +304,7 @@ pub async fn delete_backup_keys_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: State<'_, Database>, body: Ruma>, @@ -314,6 +327,7 @@ pub async fn delete_backup_key_sessions_route( feature = "conduit_bin", delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index fa12a08..b4fdf69 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -9,6 +9,7 @@ use rocket::get; /// /// Get information on this server's supported feature set and other relevent capabilities. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))] +#[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); available.insert( diff --git a/src/client_server/config.rs b/src/client_server/config.rs index f1d233a..aece96e 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,6 +16,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: State<'_, Database>, body: Ruma>, @@ -49,6 +50,7 @@ pub async fn set_global_account_data_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index f2a8cd4..cb9aaf9 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -10,6 +10,7 @@ use rocket::get; feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 86ac511..1950c5c 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -16,6 +16,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/devices", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: State<'_, Database>, body: Ruma, @@ -35,6 +36,7 @@ pub async fn get_devices_route( feature = "conduit_bin", get("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: State<'_, Database>, body: Ruma>, @@ -53,6 +55,7 @@ pub async fn get_device_route( feature = "conduit_bin", put("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: State<'_, Database>, body: Ruma>, @@ -78,6 +81,7 @@ pub async fn update_device_route( feature = "conduit_bin", delete("/_matrix/client/r0/devices/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: State<'_, Database>, body: Ruma>, @@ -126,6 +130,7 @@ pub async fn delete_device_route( feature = "conduit_bin", post("/_matrix/client/r0/delete_devices", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index fa5db3a..1c72915 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -31,6 +31,7 @@ use rocket::{get, post, put}; feature = "conduit_bin", post("/_matrix/client/r0/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, @@ -50,6 +51,7 @@ pub async fn get_public_rooms_filtered_route( feature = "conduit_bin", get("/_matrix/client/r0/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, @@ -78,6 +80,7 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/client/r0/directory/list/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: State<'_, Database>, body: Ruma>, @@ -107,6 +110,7 @@ pub async fn set_room_visibility_route( feature = "conduit_bin", get("/_matrix/client/r0/directory/list/room/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 4513ab4..a08eb34 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -5,6 +5,7 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter}; use rocket::{get, post}; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] +#[tracing::instrument] pub async fn get_filter_route() -> ConduitResult { // TODO Ok(get_filter::Response::new(filter::IncomingFilterDefinition { @@ -18,6 +19,7 @@ pub async fn get_filter_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] +#[tracing::instrument] pub async fn create_filter_route() -> ConduitResult { // TODO Ok(create_filter::Response::new(utils::random_string(10)).into()) diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 8426518..08bb4c6 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -22,6 +22,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/keys/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: State<'_, Database>, body: Ruma, @@ -70,6 +71,7 @@ pub async fn upload_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/query", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: State<'_, Database>, body: Ruma>, @@ -150,6 +152,7 @@ pub async fn get_keys_route( feature = "conduit_bin", post("/_matrix/client/r0/keys/claim", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: State<'_, Database>, body: Ruma, @@ -183,6 +186,7 @@ pub async fn claim_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/device_signing/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: State<'_, Database>, body: Ruma>, @@ -240,6 +244,7 @@ pub async fn upload_signing_keys_route( feature = "conduit_bin", post("/_matrix/client/unstable/keys/signatures/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: State<'_, Database>, body: Ruma, @@ -300,6 +305,7 @@ pub async fn upload_signatures_route( feature = "conduit_bin", get("/_matrix/client/r0/keys/changes", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 156040b..2db4fc6 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -12,6 +12,7 @@ use std::convert::TryInto; const MXC_LENGTH: usize = 32; #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] +#[tracing::instrument(skip(db))] pub async fn get_media_config_route( db: State<'_, Database>, ) -> ConduitResult { @@ -25,6 +26,7 @@ pub async fn get_media_config_route( feature = "conduit_bin", post("/_matrix/media/r0/upload", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +56,7 @@ pub async fn create_content_route( feature = "conduit_bin", get("/_matrix/media/r0/download/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: State<'_, Database>, body: Ruma>, @@ -103,6 +106,7 @@ pub async fn get_content_route( feature = "conduit_bin", get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b459d37..287cfbb 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -36,6 +36,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/join", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: State<'_, Database>, body: Ruma>, @@ -54,6 +55,7 @@ pub async fn join_room_by_id_route( feature = "conduit_bin", post("/_matrix/client/r0/join/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: State<'_, Database>, body: Ruma>, @@ -88,6 +90,7 @@ pub async fn join_room_by_id_or_alias_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/leave", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: State<'_, Database>, body: Ruma>, @@ -140,6 +143,7 @@ pub async fn leave_room_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/invite", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: State<'_, Database>, body: Ruma>, @@ -183,6 +187,7 @@ pub async fn invite_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/kick", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: State<'_, Database>, body: Ruma>, @@ -236,6 +241,7 @@ pub async fn kick_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/ban", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: State<'_, Database>, body: Ruma>, @@ -296,6 +302,7 @@ pub async fn ban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/unban", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: State<'_, Database>, body: Ruma>, @@ -348,6 +355,7 @@ pub async fn unban_user_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/forget", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: State<'_, Database>, body: Ruma>, @@ -365,6 +373,7 @@ pub async fn forget_room_route( feature = "conduit_bin", get("/_matrix/client/r0/joined_rooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: State<'_, Database>, body: Ruma, @@ -385,6 +394,7 @@ pub async fn joined_rooms_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/members", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: State<'_, Database>, body: Ruma>, @@ -414,6 +424,7 @@ pub async fn get_member_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 3640730..39a61cb 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -17,6 +17,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: State<'_, Database>, body: Ruma>, @@ -88,6 +89,7 @@ pub async fn send_message_event_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/messages", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 672957b..dd8e7a6 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -75,6 +75,7 @@ const SESSION_ID_LENGTH: usize = 256; #[cfg(feature = "conduit_bin")] #[options("/<_..>")] +#[tracing::instrument] pub async fn options_route() -> ConduitResult { Ok(send_event_to_device::Response.into()) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 15c746e..175853f 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -10,6 +10,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/presence/<_>/status", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 761443d..bd8425a 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -19,6 +19,7 @@ use std::convert::TryInto; feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/displayname", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: State<'_, Database>, body: Ruma>, @@ -102,6 +103,7 @@ pub async fn set_displayname_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/displayname", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: State<'_, Database>, body: Ruma>, @@ -116,6 +118,7 @@ pub async fn get_displayname_route( feature = "conduit_bin", put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: State<'_, Database>, body: Ruma>, @@ -199,6 +202,7 @@ pub async fn set_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: State<'_, Database>, body: Ruma>, @@ -213,6 +217,7 @@ pub async fn get_avatar_url_route( feature = "conduit_bin", get("/_matrix/client/r0/profile/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 667d667..03da73a 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -22,6 +22,7 @@ use rocket::{delete, get, post, put}; feature = "conduit_bin", get("/_matrix/client/r0/pushrules", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: State<'_, Database>, body: Ruma, @@ -46,6 +47,7 @@ pub async fn get_pushrules_all_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -101,6 +103,7 @@ pub async fn get_pushrule_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -247,6 +250,7 @@ pub async fn set_pushrule_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: State<'_, Database>, body: Ruma>, @@ -310,6 +314,7 @@ pub async fn get_pushrule_actions_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: State<'_, Database>, body: Ruma>, @@ -413,6 +418,7 @@ pub async fn set_pushrule_actions_route( feature = "conduit_bin", get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: State<'_, Database>, body: Ruma>, @@ -473,6 +479,7 @@ pub async fn get_pushrule_enabled_route( feature = "conduit_bin", put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: State<'_, Database>, body: Ruma>, @@ -576,6 +583,7 @@ pub async fn set_pushrule_enabled_route( feature = "conduit_bin", delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: State<'_, Database>, body: Ruma>, @@ -666,6 +674,7 @@ pub async fn delete_pushrule_route( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/pushers"))] +#[tracing::instrument] pub async fn get_pushers_route() -> ConduitResult { Ok(get_pushers::Response { pushers: Vec::new(), @@ -674,6 +683,7 @@ pub async fn get_pushers_route() -> ConduitResult { } #[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/pushers/set"))] +#[tracing::instrument(skip(db))] pub async fn set_pushers_route(db: State<'_, Database>) -> ConduitResult { db.flush().await?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index f7d3712..555b7e7 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -16,6 +16,7 @@ use std::{collections::BTreeMap, time::SystemTime}; feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: State<'_, Database>, body: Ruma>, @@ -84,6 +85,7 @@ pub async fn set_read_marker_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 212e751..af277db 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -12,6 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 092e083..e2c931c 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,6 +22,7 @@ use rocket::{get, post}; feature = "conduit_bin", post("/_matrix/client/r0/createRoom", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: State<'_, Database>, body: Ruma>, @@ -350,6 +351,7 @@ pub async fn create_room_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: State<'_, Database>, body: Ruma>, @@ -377,6 +379,7 @@ pub async fn get_room_event_route( feature = "conduit_bin", post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 5fb87f0..a668a0d 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -11,6 +11,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", post("/_matrix/client/r0/search", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 1b2583c..8c8b643 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -24,6 +24,7 @@ use rocket::{get, post}; /// Get the homeserver's supported login types. One of these should be used as the `type` field /// when logging in. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] +#[tracing::instrument] pub async fn get_login_types_route() -> ConduitResult { Ok(get_login_types::Response::new(vec![get_login_types::LoginType::Password]).into()) } @@ -42,6 +43,7 @@ pub async fn get_login_types_route() -> ConduitResult feature = "conduit_bin", post("/_matrix/client/r0/login", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn login_route( db: State<'_, Database>, body: Ruma>, @@ -156,6 +158,7 @@ pub async fn login_route( feature = "conduit_bin", post("/_matrix/client/r0/logout", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn logout_route( db: State<'_, Database>, body: Ruma, @@ -183,6 +186,7 @@ pub async fn logout_route( feature = "conduit_bin", post("/_matrix/client/r0/logout/all", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: State<'_, Database>, body: Ruma, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index faa415d..073d94f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -22,6 +22,7 @@ use rocket::{get, put}; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: State<'_, Database>, body: Ruma>, @@ -55,6 +56,7 @@ pub async fn send_state_event_for_key_route( feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: State<'_, Database>, body: Ruma>, @@ -96,6 +98,7 @@ pub async fn send_state_event_for_empty_key_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: State<'_, Database>, body: Ruma>, @@ -142,6 +145,7 @@ pub async fn get_state_events_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: State<'_, Database>, body: Ruma>, @@ -193,6 +197,7 @@ pub async fn get_state_events_for_key_route( feature = "conduit_bin", get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 494c773..b4d0520 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -30,6 +30,7 @@ use std::{ feature = "conduit_bin", get("/_matrix/client/r0/sync", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: State<'_, Database>, body: Ruma>, @@ -310,8 +311,7 @@ pub async fn sync_events_route( }; let state_events = if joined_since_last_sync { - db.rooms - .room_state_full(&room_id)? + current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) .collect() @@ -708,6 +708,7 @@ pub async fn sync_events_route( Ok(response.into()) } +#[tracing::instrument(skip(db))] fn share_encrypted_room( db: &Database, sender_user: &UserId, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 7bbf9e8..21264a1 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -13,6 +13,7 @@ use rocket::{delete, get, put}; feature = "conduit_bin", put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: State<'_, Database>, body: Ruma>, @@ -49,6 +50,7 @@ pub async fn update_tag_route( feature = "conduit_bin", delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: State<'_, Database>, body: Ruma>, @@ -82,6 +84,7 @@ pub async fn delete_tag_route( feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index c775e9b..3c07699 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -10,6 +10,7 @@ use std::collections::BTreeMap; feature = "conduit_bin", get("/_matrix/client/r0/thirdparty/protocols") )] +#[tracing::instrument] pub async fn get_protocols_route() -> ConduitResult { warn!("TODO: get_protocols_route"); Ok(get_protocols::Response { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 5bc001e..460bd05 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -12,6 +12,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index e90746e..4b7feb7 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -10,6 +10,7 @@ use rocket::put; feature = "conduit_bin", put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn create_typing_event_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index e51ed56..d25dce6 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -15,6 +15,7 @@ use rocket::get; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] +#[tracing::instrument] pub async fn get_supported_versions_route() -> ConduitResult { let mut resp = get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 5829364..b358274 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -9,6 +9,7 @@ use rocket::post; feature = "conduit_bin", post("/_matrix/client/r0/user_directory/search", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: State<'_, Database>, body: Ruma>, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 9216f1a..7924a7f 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -6,6 +6,7 @@ use std::time::Duration; use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] +#[tracing::instrument] pub async fn turn_server_route() -> ConduitResult { Ok(get_turn_server_info::Response { username: "".to_owned(), diff --git a/src/database.rs b/src/database.rs index 8fcffd9..6dc9c70 100644 --- a/src/database.rs +++ b/src/database.rs @@ -38,6 +38,8 @@ pub struct Config { allow_encryption: bool, #[serde(default = "false_fn")] allow_federation: bool, + #[serde(default = "false_fn")] + pub allow_jaeger: bool, jwt_secret: Option, } diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 855ebfe..38e6c32 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -74,6 +74,7 @@ impl AccountData { } /// Returns all changes to the account data that happened after `since`. + #[tracing::instrument(skip(self))] pub fn changes_since( &self, room_id: Option<&RoomId>, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b35d006..7e80134 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -108,6 +108,7 @@ impl StateStore for Rooms { impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. + #[tracing::instrument(skip(self))] pub fn state_full( &self, room_id: &RoomId, @@ -145,6 +146,7 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] pub fn state_get( &self, room_id: &RoomId, @@ -186,11 +188,13 @@ impl Rooms { } /// Returns the last state hash key added to the db. + #[tracing::instrument(skip(self))] pub fn pdu_state_hash(&self, pdu_id: &[u8]) -> Result> { Ok(self.pduid_statehash.get(pdu_id)?) } /// Returns the last state hash key added to the db for the given room. + #[tracing::instrument(skip(self))] pub fn current_state_hash(&self, room_id: &RoomId) -> Result> { Ok(self.roomid_statehash.get(room_id.as_bytes())?) } @@ -290,6 +294,7 @@ impl Rooms { } /// Returns the full room state. + #[tracing::instrument(skip(self))] pub fn room_state_full(&self, room_id: &RoomId) -> Result> { if let Some(current_state_hash) = self.current_state_hash(room_id)? { self.state_full(&room_id, ¤t_state_hash) @@ -299,6 +304,7 @@ impl Rooms { } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] pub fn room_state_get( &self, room_id: &RoomId, @@ -313,6 +319,7 @@ impl Rooms { } /// Returns the `count` of this pdu's id. + #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { Ok( utils::u64_from_bytes(&pdu_id[pdu_id.len() - mem::size_of::()..pdu_id.len()]) @@ -1024,6 +1031,7 @@ impl Rooms { } /// Returns an iterator over all PDUs in a room. + #[tracing::instrument(skip(self))] pub fn all_pdus( &self, user_id: &UserId, @@ -1034,6 +1042,7 @@ impl Rooms { /// Returns a double-ended iterator over all events in a room that happened after the event with id `since` /// in chronological order. + #[tracing::instrument(skip(self))] pub fn pdus_since( &self, user_id: &UserId, @@ -1100,6 +1109,7 @@ impl Rooms { /// Returns an iterator over all events and their token in a room that happened after the event /// with id `from` in chronological order. + #[tracing::instrument(skip(self))] pub fn pdus_after( &self, user_id: &UserId, @@ -1449,6 +1459,7 @@ impl Rooms { )) } + #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, users: Vec, @@ -1510,6 +1521,7 @@ impl Rooms { } /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1558,6 +1570,7 @@ impl Rooms { } /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1582,6 +1595,7 @@ impl Rooms { } /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> { self.userroomid_joined .scan_prefix(user_id.as_bytes()) @@ -1603,6 +1617,7 @@ impl Rooms { } /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] pub fn rooms_invited(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1627,6 +1642,7 @@ impl Rooms { } /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] pub fn rooms_left(&self, user_id: &UserId) -> impl Iterator> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 29edc2a..8433884 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -71,6 +71,7 @@ impl RoomEdus { } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[tracing::instrument(skip(self))] pub fn readreceipts_since( &self, room_id: &RoomId, @@ -116,6 +117,7 @@ impl RoomEdus { } /// Returns the private read marker. + #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.to_string().as_bytes().to_vec(); key.push(0xff); @@ -257,6 +259,7 @@ impl RoomEdus { } /// Returns the count of the last typing update in this room. + #[tracing::instrument(skip(self, globals))] pub fn last_typing_update( &self, room_id: &RoomId, @@ -340,6 +343,7 @@ impl RoomEdus { } /// Resets the presence timeout, so the user will stay in their current presence state. + #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( &user_id.to_string().as_bytes(), @@ -430,6 +434,7 @@ impl RoomEdus { } /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + #[tracing::instrument(skip(self, globals, rooms))] pub fn presence_since( &self, room_id: &RoomId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 8c487e1..2e50710 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -211,6 +211,7 @@ impl Sending { }); } + #[tracing::instrument(skip(self))] pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> { let mut key = server.as_bytes().to_vec(); key.push(0xff); @@ -220,6 +221,7 @@ impl Sending { Ok(()) } + #[tracing::instrument(skip(self))] pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { let mut key = "+".as_bytes().to_vec(); key.extend_from_slice(appservice_id.as_bytes()); @@ -230,6 +232,7 @@ impl Sending { Ok(()) } + #[tracing::instrument] fn calculate_hash(keys: &[IVec]) -> Vec { // We only hash the pdu's event ids, not the whole pdu let bytes = keys.join(&0xff); @@ -237,6 +240,7 @@ impl Sending { hash.as_ref().to_owned() } + #[tracing::instrument(skip(globals, rooms, appservice))] async fn handle_event( server: Box, is_appservice: bool, @@ -359,6 +363,7 @@ impl Sending { )) } + #[tracing::instrument(skip(self, globals))] pub async fn send_federation_request( &self, globals: &crate::database::globals::Globals, @@ -375,6 +380,7 @@ impl Sending { response } + #[tracing::instrument(skip(self, globals))] pub async fn send_appservice_request( &self, globals: &crate::database::globals::Globals, diff --git a/src/database/users.rs b/src/database/users.rs index 9da0776..985647a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -312,6 +312,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(&user_id.to_string().as_bytes())? @@ -365,6 +366,7 @@ impl Users { .transpose() } + #[tracing::instrument(skip(self))] pub fn count_one_time_keys( &self, user_id: &UserId, @@ -564,6 +566,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn keys_changed( &self, user_or_room_id: &str, @@ -739,6 +742,7 @@ impl Users { Ok(()) } + #[tracing::instrument(skip(self))] pub fn get_to_device_events( &self, user_id: &UserId, @@ -761,6 +765,7 @@ impl Users { Ok(events) } + #[tracing::instrument(skip(self))] pub fn remove_to_device_events( &self, user_id: &UserId, diff --git a/src/error.rs b/src/error.rs index c57843c..65c5b4f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,14 +1,7 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; - use log::error; -use ruma::{ - api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}, - events::room::message, -}; +use ruma::api::client::{error::ErrorKind, r0::uiaa::UiaaInfo}; use thiserror::Error; -use crate::{database::admin::AdminCommand, Database}; - #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, @@ -107,62 +100,3 @@ where .respond_to(r) } } - -pub struct ConduitLogger { - pub db: Database, - pub last_logs: RwLock>, -} - -impl log::Log for ConduitLogger { - fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { - true - } - - fn log(&self, record: &log::Record<'_>) { - let output = format!("{} - {}", record.level(), record.args()); - - if self.enabled(record.metadata()) - && (record - .module_path() - .map_or(false, |path| path.starts_with("conduit::")) - || record - .module_path() - .map_or(true, |path| !path.starts_with("rocket::")) // Rockets logs are annoying - && record.metadata().level() <= log::Level::Warn) - { - let first_line = output - .lines() - .next() - .expect("lines always returns one item"); - - eprintln!("{}", output); - - let mute_duration = match record.metadata().level() { - log::Level::Error => Duration::from_secs(60 * 5), // 5 minutes - log::Level::Warn => Duration::from_secs(60 * 60 * 24), // A day - _ => Duration::from_secs(60 * 60 * 24 * 7), // A week - }; - - if self - .last_logs - .read() - .unwrap() - .get(first_line) - .map_or(false, |i| i.elapsed() < mute_duration) - // Don't post this log again for some time - { - return; - } - - if let Ok(mut_last_logs) = &mut self.last_logs.try_write() { - mut_last_logs.insert(first_line.to_owned(), Instant::now()); - } - - self.db.admin.send(AdminCommand::SendMessage( - message::MessageEventContent::notice_plain(output), - )); - } - } - - fn flush(&self) {} -} diff --git a/src/main.rs b/src/main.rs index d5f1f4e..498cfa6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,21 +11,23 @@ mod push_rules; mod ruma_wrapper; mod utils; +use database::Config; pub use database::Database; -pub use error::{ConduitLogger, Error, Result}; +pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use log::LevelFilter; use rocket::figment::{ providers::{Env, Format, Toml}, Figment, }; use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; +use tracing::span; +use tracing_subscriber::{prelude::*, Registry}; -fn setup_rocket() -> rocket::Rocket { +fn setup_rocket() -> (rocket::Rocket, Config) { // Force log level off, so we can use our own logger std::env::set_var("CONDUIT_LOG_LEVEL", "off"); @@ -39,7 +41,12 @@ fn setup_rocket() -> rocket::Rocket { ) .merge(Env::prefixed("CONDUIT_").global()); - rocket::custom(config) + let parsed_config = config + .extract::() + .expect("It looks like your config is invalid. Please take a look at the error"); + let parsed_config2 = parsed_config.clone(); + + let rocket = rocket::custom(config) .mount( "/", routes![ @@ -163,30 +170,41 @@ fn setup_rocket() -> rocket::Rocket { bad_json_catcher ]) .attach(AdHoc::on_attach("Config", |rocket| async { - let config = rocket - .figment() - .extract() - .expect("It looks like your config is invalid. Please take a look at the error"); - let data = Database::load_or_create(config) + let data = Database::load_or_create(parsed_config2) .await .expect("config is valid"); data.sending .start_handler(&data.globals, &data.rooms, &data.appservice); - log::set_boxed_logger(Box::new(ConduitLogger { - db: data.clone(), - last_logs: Default::default(), - })) - .unwrap(); - log::set_max_level(LevelFilter::Info); Ok(rocket.manage(data)) - })) + })); + + (rocket, parsed_config) } #[rocket::main] async fn main() { - setup_rocket().launch().await.unwrap(); + let (rocket, config) = setup_rocket(); + + if config.allow_jaeger { + let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + .with_service_name("conduit") + .install() + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + Registry::default().with(telemetry).try_init().unwrap(); + + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); + let _enter = root.enter(); + + rocket.launch().await.unwrap(); + } else { + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); + let _enter = root.enter(); + + rocket.launch().await.unwrap(); + } } #[catch(404)] diff --git a/src/pdu.rs b/src/pdu.rs index 75ef492..544c073 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -40,6 +40,7 @@ pub struct PduEvent { } impl PduEvent { + #[tracing::instrument(skip(self))] pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { self.unsigned.clear(); @@ -86,6 +87,7 @@ impl PduEvent { Ok(()) } + #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -107,6 +109,7 @@ impl PduEvent { } /// This only works for events that are also AnyRoomEvents. + #[tracing::instrument(skip(self))] pub fn to_any_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -128,6 +131,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -149,6 +153,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -164,20 +169,27 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_sync_state_event(&self) -> Raw { - let json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "unsigned": self.unsigned, - "state_key": self.state_key, - }); + let json = format!( + r#"{{"content":{},"type":"{}","event_id":"{}","sender":"{}","origin_server_ts":{},"unsigned":{},"state_key":"{}"}}"#, + self.content, + self.kind, + self.event_id, + self.sender, + self.origin_server_ts, + serde_json::to_string(&self.unsigned).expect("Map::to_string always works"), + self.state_key + .as_ref() + .expect("state events have state keys") + ); - serde_json::from_value(json).expect("Raw::from_value always works") + Raw::from_json( + serde_json::value::RawValue::from_string(json).expect("our string is valid json"), + ) } + #[tracing::instrument(skip(self))] pub fn to_stripped_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -189,6 +201,7 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } + #[tracing::instrument(skip(self))] pub fn to_member_event(&self) -> Raw> { let json = json!({ "content": self.content, @@ -206,6 +219,7 @@ impl PduEvent { } /// This does not return a full `Pdu` it is only to satisfy ruma's types. + #[tracing::instrument] pub fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, ) -> Raw { diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 45fcc7f..898561f 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -82,9 +82,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| { - dbg!(token.as_deref()) == dbg!(Some(as_token)) - }) + .map_or(false, |as_token| token.as_deref() == Some(as_token)) }) { match T::METADATA.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { diff --git a/src/server_server.rs b/src/server_server.rs index 3fea4da..4ea9bfe 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -28,6 +28,7 @@ use std::{ time::{Duration, SystemTime}, }; +#[tracing::instrument(skip(globals))] pub async fn send_request( globals: &crate::database::globals::Globals, destination: Box, @@ -194,6 +195,7 @@ where } } +#[tracing::instrument] fn get_ip_with_port(destination_str: String) -> Option { if destination_str.parse::().is_ok() { Some(destination_str) @@ -204,6 +206,7 @@ fn get_ip_with_port(destination_str: String) -> Option { } } +#[tracing::instrument] fn add_port_to_hostname(destination_str: String) -> String { match destination_str.find(':') { None => destination_str.to_owned() + ":8448", @@ -214,6 +217,7 @@ fn add_port_to_hostname(destination_str: String) -> String { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification +#[tracing::instrument(skip(globals))] async fn find_actual_destination( globals: &crate::database::globals::Globals, destination: &Box, @@ -272,6 +276,7 @@ async fn find_actual_destination( (actual_destination, host) } +#[tracing::instrument(skip(globals))] async fn query_srv_record<'a>( globals: &crate::database::globals::Globals, hostname: &'a str, @@ -296,6 +301,7 @@ async fn query_srv_record<'a>( } } +#[tracing::instrument(skip(globals))] pub async fn request_well_known( globals: &crate::database::globals::Globals, destination: &str, @@ -319,6 +325,7 @@ pub async fn request_well_known( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] +#[tracing::instrument(skip(db))] pub fn get_server_version_route( db: State<'_, Database>, ) -> ConduitResult { @@ -336,6 +343,7 @@ pub fn get_server_version_route( } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] +#[tracing::instrument(skip(db))] pub fn get_server_keys_route(db: State<'_, Database>) -> Json { if !db.globals.allow_federation() { // TODO: Use proper types @@ -378,6 +386,7 @@ pub fn get_server_keys_route(db: State<'_, Database>) -> Json { } #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] +#[tracing::instrument(skip(db))] pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json { get_server_keys_route(db) } @@ -386,6 +395,7 @@ pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json feature = "conduit_bin", post("/_matrix/federation/v1/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: State<'_, Database>, body: Ruma>, @@ -433,6 +443,7 @@ pub async fn get_public_rooms_filtered_route( feature = "conduit_bin", get("/_matrix/federation/v1/publicRooms", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: State<'_, Database>, body: Ruma>, @@ -480,6 +491,7 @@ pub async fn get_public_rooms_route( feature = "conduit_bin", put("/_matrix/federation/v1/send/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route<'a>( db: State<'a, Database>, body: Ruma>, @@ -585,6 +597,7 @@ pub async fn send_transaction_message_route<'a>( feature = "conduit_bin", post("/_matrix/federation/v1/get_missing_events/<_>", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn get_missing_events_route<'a>( db: State<'a, Database>, body: Ruma>, @@ -630,6 +643,7 @@ pub fn get_missing_events_route<'a>( feature = "conduit_bin", get("/_matrix/federation/v1/query/profile", data = "") )] +#[tracing::instrument(skip(db, body))] pub fn get_profile_information_route<'a>( db: State<'a, Database>, body: Ruma>, From f2ec2be821b49ad565bdb92e44eabe9510cf5455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 3 Mar 2021 21:41:26 +0100 Subject: [PATCH 55/62] fix: don't always query aliases of appservices only do so if the alias matches the regex in the registration file --- src/client_server/alias.rs | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index b8c16d9..0a8ad08 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,5 +1,6 @@ use super::State; use crate::{ConduitResult, Database, Error, Ruma}; +use regex::Regex; use ruma::{ api::{ appservice, @@ -86,15 +87,23 @@ pub async fn get_alias_helper( Some(r) => room_id = Some(r), None => { for (_id, registration) in db.appservice.iter_all().filter_map(|r| r.ok()) { - if db - .sending - .send_appservice_request( - &db.globals, - registration, - appservice::query::query_room_alias::v1::Request { room_alias }, - ) - .await - .is_ok() + let aliases = registration + .get("namespaces") + .and_then(|ns| ns.get("aliases")) + .and_then(|users| users.get("regex")) + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()); + + if aliases.map_or(false, |aliases| aliases.is_match(room_alias.as_str())) + && db + .sending + .send_appservice_request( + &db.globals, + registration, + appservice::query::query_room_alias::v1::Request { room_alias }, + ) + .await + .is_ok() { room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") From f4039902d866c4fe9ee400ed65ac52fc6c611b72 Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Wed, 3 Mar 2021 22:38:31 +0100 Subject: [PATCH 56/62] Bump Rocket to current HEAD --- Cargo.lock | 745 ++++++++++++++++++++------------------ Cargo.toml | 4 +- src/client_server/sync.rs | 3 +- src/database/admin.rs | 3 +- 4 files changed, 401 insertions(+), 354 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8d48dc..45ea00c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,18 +2,18 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "adler32" @@ -53,9 +53,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assign" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af5687fe33aec5e70ef14caac5e0d363e335e5e5d6385fb75978d0c241b1d67" +checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-trait" @@ -96,14 +96,14 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.4.3", + "miniz_oxide 0.4.4", "object", "rustc-demangle", ] @@ -151,21 +151,21 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" [[package]] name = "bytemuck" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aa2ec95ca3b5c54cf73c91acf06d24f4495d5f1b1c12506ae3483d646177ac" +checksum = "bed57e2090563b83ba8f83366628ce535a7584c9afa4c9fc0612a03925c6df58" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bytes" @@ -174,10 +174,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] -name = "cc" -version = "1.0.66" +name = "bytes" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + +[[package]] +name = "cc" +version = "1.0.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" [[package]] name = "cfg-if" @@ -200,7 +206,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.43", "winapi 0.3.9", ] @@ -236,28 +242,18 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio", + "tokio 1.2.0", "tracing", "tracing-opentelemetry", "tracing-subscriber", "trust-dns-resolver", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if 0.1.10", - "wasm-bindgen", -] - [[package]] name = "const_fn" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -271,7 +267,7 @@ version = "0.15.0-dev" source = "git+https://github.com/SergioBenitez/cookie-rs.git?rev=1c3ca83#1c3ca838543b60a4448d279dc4b903cc7a2bc22a" dependencies = [ "percent-encoding", - "time 0.2.23", + "time 0.2.25", "version_check", ] @@ -302,12 +298,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", - "const_fn", "crossbeam-utils", "lazy_static", "memoffset", @@ -316,9 +311,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -393,9 +388,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "88d7ed2934d741c6b37e33e3832298e8850b53fd2d2bea03873375596c7cea4e" [[package]] name = "either" @@ -405,9 +400,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.26" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801bbab217d7f79c0062f4f7205b5d4427c6d1a7bd7aafdd1475f7c59d62b283" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ "cfg-if 1.0.0", ] @@ -426,10 +421,11 @@ dependencies = [ [[package]] name = "figment" -version = "0.9.4" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13e2d266426f89e45fc544117ade84fad2a58ff676f34cc34e123fe4391b856" +checksum = "c38799b106530aa30f774f7fca6d8f7e5f6234a79f427c4fad3c975eaf678931" dependencies = [ + "atomic", "pear", "serde", "toml", @@ -460,9 +456,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding", @@ -496,9 +492,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" dependencies = [ "futures-channel", "futures-core", @@ -511,9 +507,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" dependencies = [ "futures-core", "futures-sink", @@ -521,15 +517,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" [[package]] name = "futures-executor" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" dependencies = [ "futures-core", "futures-task", @@ -538,15 +534,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -556,24 +552,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" -dependencies = [ - "once_cell", -] +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" dependencies = [ "futures-channel", "futures-core", @@ -582,7 +575,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project 1.0.2", + "pin-project-lite 0.2.5", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -600,11 +593,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -617,7 +610,7 @@ checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] @@ -648,7 +641,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", @@ -656,12 +649,31 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", - "tokio-util", + "tokio 0.2.25", + "tokio-util 0.3.1", "tracing", "tracing-futures", ] +[[package]] +name = "h2" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.2.0", + "tokio-util 0.6.3", + "tracing", +] + [[package]] name = "hashbrown" version = "0.9.1" @@ -670,18 +682,18 @@ checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -699,11 +711,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -714,15 +726,25 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes", + "bytes 0.5.6", + "http", +] + +[[package]] +name = "http-body" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", "http", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -732,23 +754,47 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "hyper" -version = "0.13.9" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad767baac13b44d4529fcf58ba2cd0995e36e7b435bc5b039de6f47e880dbf" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", "httpdate", "itoa", - "pin-project 1.0.2", + "pin-project", "socket2", - "tokio", + "tokio 0.2.25", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.1", + "http", + "http-body 0.4.0", + "httparse", + "httpdate", + "itoa", + "pin-project", + "socket2", + "tokio 1.2.0", "tower-service", "tracing", "want", @@ -760,18 +806,18 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ - "bytes", - "hyper", + "bytes 0.5.6", + "hyper 0.13.10", "native-tls", - "tokio", + "tokio 0.2.25", "tokio-tls", ] [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -780,9 +826,9 @@ dependencies = [ [[package]] name = "image" -version = "0.23.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce04077ead78e39ae8610ad26216aed811996b043d47beed5090db674f9e9b5" +checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" dependencies = [ "bytemuck", "byteorder", @@ -797,9 +843,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", "hashbrown", @@ -864,24 +910,21 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "jpeg-decoder" -version = "0.1.20" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc797adac5f083b8ff0ca6f6294a999393d76e197c36488e2ef732c4715f6fa3" -dependencies = [ - "byteorder", -] +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "dc9f84f9b115ce7843d60706df1422a916680bfdfcbdb0447c5614ff9d7e4d78" dependencies = [ "wasm-bindgen", ] @@ -927,15 +970,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "265d751d31d6780a3f956bb5b8022feba2d94eeee5a84ba64f4212eedca42213" [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -948,11 +991,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", ] [[package]] @@ -1033,9 +1076,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", @@ -1054,21 +1097,23 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" dependencies = [ - "iovec", "libc", - "mio", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", ] [[package]] @@ -1084,10 +1129,20 @@ dependencies = [ ] [[package]] -name = "native-tls" -version = "0.2.6" +name = "miow" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc7939b5edc4e4f86b1b4a04bb1498afaaf871b1a6691838ed06fcb48d3a3f" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.9", +] + +[[package]] +name = "native-tls" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", @@ -1103,15 +1158,24 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.36" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cf75f38f16cb05ea017784dc6dbfd354f76c223dba37701734c4f5a9337d02" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.2.6" @@ -1176,24 +1240,24 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" [[package]] name = "once_cell" -version = "1.5.2" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "foreign-types", "lazy_static", "libc", @@ -1208,18 +1272,18 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.12.0+1.1.1h" +version = "111.14.0+1.1.1j" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61" +checksum = "055b569b5bd7e5462a1700f595c7c7d487691d73b5ce064176af7f9f0cbb80a9" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" dependencies = [ "autocfg", "cc", @@ -1240,7 +1304,7 @@ dependencies = [ "js-sys", "lazy_static", "percent-encoding", - "pin-project 1.0.2", + "pin-project", "rand 0.8.3", "thiserror", ] @@ -1280,29 +1344,29 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.5", "smallvec", "winapi 0.3.9", ] [[package]] name = "paste" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7151b083b0664ed58ed669fcdd92f01c3d2fdbf10af4931a301474950b52bfa9" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f612cbd0f9dd03f5dd28a191c48e4148c3b027e41207b32eee130373c6c941" +checksum = "3e61c26b3b7e7ef4bd0b17d2943b4620ca4682721f35a51c7fec1f5ae6325150" dependencies = [ "inlinable_string", "pear_codegen", @@ -1311,9 +1375,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602cf1780ee9bbca663ea75769e05643e16fe87d7c8ac9f4f385a2ed8940a75c" +checksum = "b35ff95312c89207a3770143c628d2788cf4f7dcc230b25d9623e863d5b30b84" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1323,9 +1387,9 @@ dependencies = [ [[package]] name = "pem" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c220d01f863d13d96ca82359d1e81e64a7c6bf0637bcde7b2349630addf0c6" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ "base64 0.13.0", "once_cell", @@ -1340,38 +1404,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.27" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" dependencies = [ - "pin-project-internal 0.4.27", -] - -[[package]] -name = "pin-project" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" -dependencies = [ - "pin-project-internal 1.0.2", + "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.27" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" dependencies = [ "proc-macro2", "quote", @@ -1380,15 +1424,15 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "0cf491442e4b033ed1c722cb9f0df5fcfcf4de682466c46469c36bc47dc5548a" [[package]] name = "pin-utils" @@ -1404,9 +1448,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "png" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe7f9f1c730833200b134370e1d5098964231af8450bce9b78ee3ab5278b970" +checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" dependencies = [ "bitflags", "crc32fast", @@ -1437,9 +1481,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" @@ -1471,9 +1515,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] @@ -1484,7 +1528,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1529,7 +1573,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -1565,31 +1609,40 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", - "redox_syscall", + "getrandom 0.1.16", + "redox_syscall 0.1.57", "rust-argon2", ] [[package]] name = "ref-cast" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -1598,9 +1651,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -1620,9 +1673,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "remove_dir_all" @@ -1635,18 +1688,18 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "base64 0.13.0", - "bytes", + "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", "http", - "http-body", - "hyper", + "http-body 0.3.1", + "hyper 0.13.10", "hyper-tls", "ipnet", "js-sys", @@ -1656,15 +1709,14 @@ dependencies = [ "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.5", "serde", "serde_urlencoded", - "tokio", + "tokio 0.2.25", "tokio-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test", "web-sys", "winreg 0.7.0", ] @@ -1681,9 +1733,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1697,7 +1749,7 @@ dependencies = [ [[package]] name = "rocket" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "async-trait", "atomic", @@ -1710,14 +1762,14 @@ dependencies = [ "memchr", "num_cpus", "parking_lot", - "rand 0.7.3", + "rand 0.8.3", "ref-cast", "rocket_codegen", "rocket_http", "serde", "state", - "time 0.2.23", - "tokio", + "time 0.2.25", + "tokio 1.2.0", "ubyte", "version_check", "yansi", @@ -1726,7 +1778,7 @@ dependencies = [ [[package]] name = "rocket_codegen" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "devise", "glob", @@ -1738,23 +1790,24 @@ dependencies = [ [[package]] name = "rocket_http" version = "0.5.0-dev" -source = "git+https://github.com/SergioBenitez/Rocket.git?rev=1f1f44f336e5a172361fc1860461bb03667b1ed2#1f1f44f336e5a172361fc1860461bb03667b1ed2" +source = "git+https://github.com/SergioBenitez/Rocket.git?rev=93e62c86eddf7cc9a7fc40b044182f83f0d7d92a#93e62c86eddf7cc9a7fc40b044182f83f0d7d92a" dependencies = [ "cookie", "either", "http", - "hyper", + "hyper 0.14.4", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", + "pin-project-lite 0.2.5", "ref-cast", "smallvec", "state", - "time 0.2.23", - "tokio", + "time 0.2.25", + "tokio 1.2.0", "tokio-rustls", "uncased", "unicode-xid", @@ -1990,11 +2043,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log", "ring", "sct", @@ -2017,12 +2070,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2041,9 +2088,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" +checksum = "2dfd318104249865096c8da1dfabf09ddbb6d0330ea176812a62ec75e40c4166" dependencies = [ "bitflags", "core-foundation", @@ -2054,9 +2101,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" +checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" dependencies = [ "core-foundation-sys", "libc", @@ -2079,18 +2126,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" dependencies = [ "proc-macro2", "quote", @@ -2099,9 +2146,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -2122,9 +2169,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7baae0a99f1a324984bcdc5f0718384c1f69775f1c7eec8b859b71b443e3fd7" +checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" dependencies = [ "dtoa", "linked-hash-map", @@ -2149,9 +2196,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" dependencies = [ "libc", ] @@ -2191,19 +2238,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", "winapi 0.3.9", ] @@ -2215,9 +2261,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -2314,9 +2360,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.54" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44" +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", @@ -2325,32 +2371,32 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.3", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", @@ -2359,11 +2405,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -2390,20 +2436,19 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "time" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcdaeea317915d59b2b4cd3b5efcd156c309108664277793f5351700c02ce98b" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" dependencies = [ "const_fn", "libc", @@ -2439,9 +2484,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -2454,32 +2499,45 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d7ad61edd59bfcc7e80dababf0f4aed2e6d5e0ba1659356ae889752dfc12ff" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ - "bytes", + "bytes 0.5.6", "fnv", "futures-core", "iovec", "lazy_static", + "memchr", + "mio 0.6.23", + "pin-project-lite 0.1.12", + "slab", +] + +[[package]] +name = "tokio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" +dependencies = [ + "autocfg", + "bytes 1.0.1", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.9", "num_cpus", - "pin-project-lite 0.1.11", + "once_cell", + "pin-project-lite 0.2.5", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" dependencies = [ "proc-macro2", "quote", @@ -2488,13 +2546,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", "rustls", - "tokio", + "tokio 1.2.0", "webpki", ] @@ -2505,7 +2562,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -2514,28 +2571,42 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes", + "bytes 0.5.6", "futures-core", "futures-sink", "log", - "pin-project-lite 0.1.11", - "tokio", + "pin-project-lite 0.1.12", + "tokio 0.2.25", +] + +[[package]] +name = "tokio-util" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.5", + "tokio 1.2.0", ] [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" @@ -2545,7 +2616,7 @@ checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.0", + "pin-project-lite 0.2.5", "tracing-attributes", "tracing-core", ] @@ -2572,11 +2643,11 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project", "tracing", ] @@ -2652,7 +2723,7 @@ dependencies = [ "rand 0.7.3", "smallvec", "thiserror", - "tokio", + "tokio 0.2.25", "url", ] @@ -2672,7 +2743,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio", + "tokio 0.2.25", "trust-dns-proto", ] @@ -2693,9 +2764,9 @@ dependencies = [ [[package]] name = "uncased" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "369fa7fd7969c5373541d3c9a40dc1b76ce676fc87aba30d87c0ad3b97fad179" +checksum = "300932469d646d39929ffe84ad5c1837beecf602519ef5695e485b472de4082b" dependencies = [ "version_check", ] @@ -2720,9 +2791,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -2747,9 +2818,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", "idna", @@ -2759,9 +2830,9 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "version_check" @@ -2787,15 +2858,15 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" dependencies = [ "cfg-if 1.0.0", "serde", @@ -2805,9 +2876,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" dependencies = [ "bumpalo", "lazy_static", @@ -2820,9 +2891,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2832,9 +2903,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2842,9 +2913,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" dependencies = [ "proc-macro2", "quote", @@ -2855,39 +2926,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" - -[[package]] -name = "wasm-bindgen-test" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0355fa0c1f9b792a09b6dcb6a8be24d51e71e6d74972f9eb4a44c4c004d24a25" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e07b46b98024c2ba2f9e83a10c2ef0515f057f2da299c1762a2017de80438b" -dependencies = [ - "proc-macro2", - "quote", -] +checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "ec600b26223b2948cedfde2a0aa6756dcf1fef616f43d7b3097aaf53a6c4d92b" dependencies = [ "js-sys", "wasm-bindgen", @@ -2905,9 +2952,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2bb9fc8309084dd7cd651336673844c1d47f8ef6d2091ec160b27f5c4aa277" +checksum = "4a32b378380f4e9869b22f0b5177c68a5519f03b3454fde0b291455ddbae266c" [[package]] name = "widestring" @@ -2979,9 +3026,9 @@ dependencies = [ [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] diff --git a/Cargo.toml b/Cargo.toml index 9ab5250..27394f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" [dependencies] # Used to handle requests # TODO: This can become optional as soon as proper configs are supported -rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "1f1f44f336e5a172361fc1860461bb03667b1ed2", features = ["tls"] } # Used to handle requests +rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86eddf7cc9a7fc40b044182f83f0d7d92a", features = ["tls"] } # Used to handle requests #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers @@ -28,7 +28,7 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-com #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "0.2.23" } +tokio = { version = "1.2.0" } # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index b4d0520..fac6b15 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -698,7 +698,8 @@ pub async fn sync_events_route( if duration.as_secs() > 30 { duration = Duration::from_secs(30); } - let mut delay = tokio::time::delay_for(duration); + let delay = tokio::time::sleep(duration); + tokio::pin!(delay); tokio::select! { _ = &mut delay => {} _ = watcher => {} diff --git a/src/database/admin.rs b/src/database/admin.rs index 1fb1983..160f55a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -7,7 +7,6 @@ use ruma::{ events::{room::message, EventType}, UserId, }; -use tokio::select; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), @@ -71,7 +70,7 @@ impl Admin { }; loop { - select! { + tokio::select! { Some(event) = receiver.next() => { match event { AdminCommand::RegisterAppservice(yaml) => { From 437cb5783a04f41fa31c2906ad4596093f703e3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 11:29:13 +0100 Subject: [PATCH 57/62] fix: apply the same appservice sending rules to events coming from federation currently this code is duplicated from database/rooms.rs, when we refactor server_server.rs we should deduplicate it --- src/server_server.rs | 67 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 4ea9bfe..26d7b1d 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,6 +2,7 @@ use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Resu use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION, HOST}; use log::{info, warn}; +use regex::Regex; use rocket::{get, post, put, response::content::Json, State}; use ruma::{ api::{ @@ -18,6 +19,7 @@ use ruma::{ OutgoingRequest, }, directory::{IncomingFilter, IncomingRoomNetwork}, + events::EventType, EventId, RoomId, ServerName, ServerSigningKeyId, UserId, }; use std::{ @@ -584,9 +586,70 @@ pub async fn send_transaction_message_route<'a>( db.rooms.set_room_state(&room_id, &next_room_state)?; for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else( + || Vec::new(), + |users| { + users + .iter() + .map(|users| { + users + .get("regex") + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()) + }) + .filter_map(|o| o) + .collect::>() + }, + ); + let aliases = namespaces + .get("aliases") + .and_then(|users| users.get("regex")) + .and_then(|regex| regex.as_str()) + .and_then(|regex| Regex::new(regex).ok()); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + let room_aliases = db.rooms.room_aliases(&room_id); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }); + + if bridge_user_id.map_or(false, |bridge_user_id| { + db.rooms + .is_joined(&bridge_user_id, room_id) + .unwrap_or(false) + }) || users.iter().any(|users| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == EventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(&state_key)) + }) || aliases.map_or(false, |aliases| { + room_aliases + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }) || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || db + .rooms + .room_members(&room_id) + .filter_map(|r| r.ok()) + .any(|member| users.iter().any(|regex| regex.is_match(member.as_str()))) + { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } + } resolved_map.insert(event_id, Ok::<(), String>(())); } From 105f893cf3be42c85f8ada937764f90a7c27e548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 4 Mar 2021 12:29:08 +0100 Subject: [PATCH 58/62] chore: bump other dependencies to make it work --- Cargo.lock | 505 ++++++++------------------------- Cargo.toml | 28 +- src/client_server/directory.rs | 4 +- src/database/globals.rs | 8 +- src/database/rooms/edus.rs | 3 +- src/database/users.rs | 3 +- src/pdu.rs | 3 +- src/utils.rs | 1 + 8 files changed, 134 insertions(+), 421 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45ea00c..51ccff7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,20 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler32" version = "1.2.0" @@ -36,7 +21,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -85,7 +70,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -94,20 +79,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "backtrace" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide 0.4.4", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" @@ -167,12 +138,6 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.0.1" @@ -185,12 +150,6 @@ version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -207,7 +166,7 @@ dependencies = [ "num-integer", "num-traits", "time 0.1.43", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -224,12 +183,11 @@ dependencies = [ "directories", "http", "image", - "js_int", "jsonwebtoken", "log", "opentelemetry", "opentelemetry-jaeger", - "rand 0.7.3", + "rand 0.8.3", "regex", "reqwest", "ring", @@ -242,7 +200,7 @@ dependencies = [ "sled", "state-res", "thiserror", - "tokio 1.2.0", + "tokio", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -293,7 +251,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -302,7 +260,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", @@ -316,10 +274,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "deflate" version = "0.8.6" @@ -377,7 +341,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -404,7 +368,7 @@ version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -471,25 +435,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures" version = "0.3.13" @@ -575,7 +523,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.5", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -597,7 +545,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -608,7 +556,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.2+wasi-snapshot-preview1", ] @@ -623,45 +571,19 @@ dependencies = [ "weezl", ] -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - [[package]] name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -[[package]] -name = "h2" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 0.2.25", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", -] - [[package]] name = "h2" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "futures-core", "futures-sink", @@ -669,8 +591,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.2.0", - "tokio-util 0.6.3", + "tokio", + "tokio-util", "tracing", ] @@ -706,7 +628,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -715,28 +637,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http", -] - [[package]] name = "http-body" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" dependencies = [ - "bytes 1.0.1", + "bytes", "http", ] @@ -752,49 +664,25 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" -[[package]] -name = "hyper" -version = "0.13.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2", - "tokio 0.2.25", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.1", + "h2", "http", - "http-body 0.4.0", + "http-body", "httparse", "httpdate", "itoa", "pin-project", "socket2", - "tokio 1.2.0", + "tokio", "tower-service", "tracing", "want", @@ -802,15 +690,15 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.10", + "bytes", + "hyper", "native-tls", - "tokio 0.2.25", - "tokio-tls", + "tokio", + "tokio-native-tls", ] [[package]] @@ -863,7 +751,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -872,15 +760,6 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - [[package]] name = "ipconfig" version = "0.2.2" @@ -889,7 +768,7 @@ checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" dependencies = [ "socket2", "widestring", - "winapi 0.3.9", + "winapi", "winreg 0.6.2", ] @@ -952,16 +831,6 @@ dependencies = [ "simple_asn1", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -995,7 +864,7 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1055,16 +924,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1074,35 +933,6 @@ dependencies = [ "adler32", ] -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.7.9" @@ -1111,21 +941,9 @@ checksum = "a5dede4e2065b3842b8b0af444119f3aa331cc7cc2dd20388bfb0f5d5a38823a" dependencies = [ "libc", "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "winapi", ] [[package]] @@ -1135,7 +953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1156,24 +974,13 @@ dependencies = [ "tempfile", ] -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - [[package]] name = "ntapi" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1238,12 +1045,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - [[package]] name = "once_cell" version = "1.7.2" @@ -1257,7 +1058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "lazy_static", "libc", @@ -1348,12 +1149,12 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.5", "smallvec", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1364,9 +1165,9 @@ checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" [[package]] name = "pear" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e61c26b3b7e7ef4bd0b17d2943b4620ca4682721f35a51c7fec1f5ae6325150" +checksum = "86ab3a2b792945ed67eadbbdcbd2898f8dd2319392b2a45ac21adea5245cb113" dependencies = [ "inlinable_string", "pear_codegen", @@ -1375,9 +1176,9 @@ dependencies = [ [[package]] name = "pear_codegen" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35ff95312c89207a3770143c628d2788cf4f7dcc230b25d9623e863d5b30b84" +checksum = "620c9c4776ba41b59ab101360c9b1419c0c8c81cd2e6e39fae7109e7425994cb" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", @@ -1422,12 +1223,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - [[package]] name = "pin-project-lite" version = "0.2.5" @@ -1455,7 +1250,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide 0.3.7", + "miniz_oxide", ] [[package]] @@ -1683,37 +1478,36 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "0460542b551950620a3648c6aa23318ac6b3cd779114bd873209e6e8b5eb1c34" dependencies = [ "base64 0.13.0", - "bytes 0.5.6", + "bytes", "encoding_rs", "futures-core", "futures-util", "http", - "http-body 0.3.1", - "hyper 0.13.10", + "http-body", + "hyper", "hyper-tls", "ipnet", "js-sys", "lazy_static", "log", "mime", - "mime_guess", "native-tls", "percent-encoding", - "pin-project-lite 0.2.5", + "pin-project-lite", "serde", "serde_urlencoded", - "tokio 0.2.25", - "tokio-tls", + "tokio", + "tokio-native-tls", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1743,7 +1537,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1769,7 +1563,7 @@ dependencies = [ "serde", "state", "time 0.2.25", - "tokio 1.2.0", + "tokio", "ubyte", "version_check", "yansi", @@ -1795,19 +1589,19 @@ dependencies = [ "cookie", "either", "http", - "hyper 0.14.4", + "hyper", "indexmap", "log", "mime", "parking_lot", "pear", "percent-encoding", - "pin-project-lite 0.2.5", + "pin-project-lite", "ref-cast", "smallvec", "state", "time 0.2.25", - "tokio 1.2.0", + "tokio", "tokio-rustls", "uncased", "unicode-xid", @@ -2026,12 +1820,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2067,7 +1855,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2248,9 +2036,9 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2375,12 +2163,12 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "rand 0.8.3", "redox_syscall 0.2.5", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2441,7 +2229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2456,7 +2244,7 @@ dependencies = [ "stdweb", "time-macros", "version_check", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2497,23 +2285,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.12", - "slab", -] - [[package]] name = "tokio" version = "1.2.0" @@ -2521,16 +2292,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8190d04c665ea9e6b6a0dc45523ade572c088d2e6566244c1122671dbf4ae3a" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", - "mio 0.7.9", + "mio", "num_cpus", "once_cell", - "pin-project-lite 0.2.5", + "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2544,6 +2315,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -2551,46 +2332,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls", - "tokio 1.2.0", + "tokio", "webpki", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.25", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.1.12", - "tokio 0.2.25", -] - [[package]] name = "tokio-util" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebb7cb2f00c5ae8df755b252306272cd1790d39728363936e01827e11f0b017b" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-core", "futures-sink", "log", - "pin-project-lite 0.2.5", - "tokio 1.2.0", + "pin-project-lite", + "tokio", ] [[package]] @@ -2614,9 +2371,8 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.5", + "cfg-if", + "pin-project-lite", "tracing-attributes", "tracing-core", ] @@ -2641,16 +2397,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.1.2" @@ -2709,41 +2455,45 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53861fcb288a166aae4c508ae558ed18b53838db728d4d310aad08270a7d4c2b" +checksum = "98a0381b2864c2978db7f8e17c7b23cca5a3a5f99241076e13002261a8ecbabd" dependencies = [ "async-trait", - "backtrace", + "cfg-if", + "data-encoding", "enum-as-inner", - "futures", + "futures-channel", + "futures-io", + "futures-util", "idna", + "ipnet", "lazy_static", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec", "thiserror", - "tokio 0.2.25", + "tokio", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.19.6" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6759e8efc40465547b0dfce9500d733c65f969a4cbbfbe3ccf68daaa46ef179e" +checksum = "3072d18c10bd621cb00507d59cfab5517862285c353160366e37fbf4c74856e4" dependencies = [ - "backtrace", - "cfg-if 0.1.10", - "futures", + "cfg-if", + "futures-util", "ipconfig", "lazy_static", "log", "lru-cache", + "parking_lot", "resolv-conf", "smallvec", "thiserror", - "tokio 0.2.25", + "tokio", "trust-dns-proto", ] @@ -2771,15 +2521,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2868,7 +2609,7 @@ version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "serde", "serde_json", "wasm-bindgen-macro", @@ -2895,7 +2636,7 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e67a5806118af01f0d9045915676b22aaebecf4178ae7021bc171dab0b897ab" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2962,12 +2703,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -2978,12 +2713,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -3002,7 +2731,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -3011,17 +2740,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 27394f0..0351543 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,41 +28,39 @@ state-res = { git = "https://github.com/ruma/state-res", branch = "timo-spec-com #state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio -tokio = { version = "1.2.0" } +tokio = "1.2.0" # Used for storing data permanently sled = { version = "0.34.6", default-features = false } # Used for emitting log entries -log = "0.4.11" +log = "0.4.14" # Used for rocket<->ruma conversions -http = "0.2.1" +http = "0.2.3" # Used to find data directory for default db path directories = "3.0.1" -# Used for number types for ruma -js_int = "0.1.9" # Used for ruma wrapper -serde_json = { version = "1.0.60", features = ["raw_value"] } +serde_json = { version = "1.0.64", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.14" +serde_yaml = "0.8.17" # Used for pdu definition -serde = "1.0.117" +serde = "1.0.123" # Used for secure identifiers -rand = "0.7.3" +rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.10.9" +reqwest = "0.11.1" # Used for conduit::Error type -thiserror = "1.0.22" +thiserror = "1.0.24" # Used to generate thumbnails for images -image = { version = "0.23.12", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.13.0" # Used when hashing the state -ring = "0.16.19" +ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.19.6" +trust-dns-resolver = "0.20.0" # Used to find matching events for appservices -regex = "1.4.2" +regex = "1.4.3" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 1c72915..0dadde9 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -21,7 +21,7 @@ use ruma::{ EventType, }, serde::Raw, - ServerName, + ServerName, UInt, }; #[cfg(feature = "conduit_bin")] @@ -128,7 +128,7 @@ pub async fn get_room_visibility_route( pub async fn get_public_rooms_filtered_helper( db: &Database, server: Option<&ServerName>, - limit: Option, + limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..7e924db 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -73,11 +73,9 @@ impl Globals { config, keypair: Arc::new(keypair), reqwest_client, - dns_resolver: TokioAsyncResolver::tokio_from_system_conf() - .await - .map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, actual_destination_cache: Arc::new(RwLock::new(HashMap::new())), jwt_decoding_key, }) diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 8433884..084e4a1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, @@ -7,7 +6,7 @@ use ruma::{ }, presence::PresenceState, serde::Raw, - RoomId, UserId, + RoomId, UInt, UserId, }; use std::{ collections::HashMap, diff --git a/src/database/users.rs b/src/database/users.rs index 985647a..e5bc16e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,5 +1,4 @@ use crate::{utils, Error, Result}; -use js_int::UInt; use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,7 @@ use ruma::{ encryption::DeviceKeys, events::{AnyToDeviceEvent, EventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId, }; use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime}; diff --git a/src/pdu.rs b/src/pdu.rs index 544c073..bcf5ffb 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,12 +1,11 @@ use crate::Error; -use js_int::UInt; use ruma::{ events::{ pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, - EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::json; diff --git a/src/utils.rs b/src/utils.rs index c82e6fe..0783567 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -55,6 +55,7 @@ pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) .take(length) + .map(char::from) .collect() } From 231c6032f465024b336122b54390f10a38d4bd6b Mon Sep 17 00:00:00 2001 From: Rudi Floren Date: Thu, 4 Mar 2021 12:35:12 +0000 Subject: [PATCH 59/62] Make clippy happy (needless-return, etc.) --- src/appservice_server.rs | 2 +- src/client_server/push.rs | 2 +- src/client_server/session.rs | 3 +-- src/database/globals.rs | 3 ++- src/database/rooms.rs | 7 ++++--- src/database/sending.rs | 13 +++++++------ src/server_server.rs | 6 +++--- 7 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 986909b..04f14c0 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -25,7 +25,7 @@ where let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains("?") { + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 03da73a..5403f96 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -95,7 +95,7 @@ pub async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::Response { rule }.into()) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.").into()) + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) } } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 8c8b643..7b3acfc 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -119,8 +119,7 @@ pub async fn login_route( let device_exists = body.device_id.as_ref().map_or(false, |device_id| { db.users .all_device_ids(&user_id) - .find(|x| x.as_ref().map_or(false, |v| v == device_id)) - .is_some() + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { diff --git a/src/database/globals.rs b/src/database/globals.rs index ccd6284..7a0c217 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -9,9 +9,10 @@ use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; +type WellKnownMap = HashMap, (String, Option)>; #[derive(Clone)] pub struct Globals { - pub actual_destination_cache: Arc, (String, Option)>>>, // actual_destination, host + pub actual_destination_cache: Arc>, // actual_destination, host pub(super) globals: sled::Tree, config: Config, keypair: Arc, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7e80134..6ee4f19 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -183,7 +183,7 @@ impl Rooms { ))) }) } else { - return Ok(None); + Ok(None) } } @@ -449,6 +449,7 @@ impl Rooms { /// /// By this point the incoming event should be fully authenticated, no auth happens /// in `append_pdu`. + #[allow(clippy::too_many_arguments)] pub fn append_pdu( &self, pdu: &PduEvent, @@ -970,7 +971,7 @@ impl Rooms { .get("users") .and_then(|users| users.as_sequence()) .map_or_else( - || Vec::new(), + Vec::new, |users| { users .iter() @@ -1002,7 +1003,7 @@ impl Rooms { .and_then(|string| { UserId::parse_with_server_name(string, globals.server_name()).ok() }); - + #[allow(clippy::blocks_in_if_conditions)] if bridge_user_id.map_or(false, |bridge_user_id| { self.is_joined(&bridge_user_id, room_id).unwrap_or(false) }) || users.iter().any(|users| { diff --git a/src/database/sending.rs b/src/database/sending.rs index 11034ea..48fe68a 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -102,7 +102,7 @@ impl Sending { match response { Ok((server, is_appservice)) => { let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -148,7 +148,7 @@ impl Sending { Err((server, is_appservice, e)) => { info!("Couldn't send transaction to {}\n{}", server, e); let mut prefix = if is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -180,7 +180,7 @@ impl Sending { .map_err(|_| Error::bad_database("ServerName in servernamepduid bytes are invalid.")) .map(|server_str| { // Appservices start with a plus - if server_str.starts_with("+") { + if server_str.starts_with('+') { (server_str[1..].to_owned(), true) } else { (server_str, false) @@ -196,6 +196,7 @@ impl Sending { .map(|pdu_id| (server, is_appservice, pdu_id)) ) .filter(|(server, is_appservice, _)| { + #[allow(clippy::blocks_in_if_conditions)] if last_failed_try.get(server).map_or(false, |(tries, instant)| { // Fail if a request has failed recently (exponential backoff) let mut min_elapsed_duration = Duration::from_secs(60) * *tries * *tries; @@ -209,7 +210,7 @@ impl Sending { } let mut prefix = if *is_appservice { - "+".as_bytes().to_vec() + b"+".to_vec() } else { Vec::new() }; @@ -247,7 +248,7 @@ impl Sending { #[tracing::instrument(skip(self))] pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = "+".as_bytes().to_vec(); + let mut key = b"+".to_vec(); key.extend_from_slice(appservice_id.as_bytes()); key.push(0xff); key.extend_from_slice(pdu_id); @@ -385,7 +386,7 @@ impl Sending { })?; // Appservices start with a plus - let (server, is_appservice) = if server.starts_with("+") { + let (server, is_appservice) = if server.starts_with('+') { (&server[1..], true) } else { (&*server, false) diff --git a/src/server_server.rs b/src/server_server.rs index 4ea9bfe..261172a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -220,7 +220,7 @@ fn add_port_to_hostname(destination_str: String) -> String { #[tracing::instrument(skip(globals))] async fn find_actual_destination( globals: &crate::database::globals::Globals, - destination: &Box, + destination: &'_ ServerName, ) -> (String, Option) { let mut host = None; @@ -277,9 +277,9 @@ async fn find_actual_destination( } #[tracing::instrument(skip(globals))] -async fn query_srv_record<'a>( +async fn query_srv_record( globals: &crate::database::globals::Globals, - hostname: &'a str, + hostname: &'_ str, ) -> Option { if let Ok(Some(host_port)) = globals .dns_resolver() From e239014fa3935c523565b21344fec0b926c7fccf Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:02:41 -0500 Subject: [PATCH 60/62] Query for the correct server --- src/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index dcd72f7..12b60b9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1148,7 +1148,7 @@ pub(crate) async fn fetch_signing_keys( &db.globals, &server, get_remote_server_keys::v2::Request::new( - &server, + origin, SystemTime::now() .checked_add(Duration::from_secs(3600)) .expect("SystemTime to large"), From 0d55964d241c00b36341f1843bb515f9241e8463 Mon Sep 17 00:00:00 2001 From: Devin Ragotzy Date: Thu, 4 Mar 2021 08:45:23 -0500 Subject: [PATCH 61/62] Run nightly cargo fmt --- rustfmt.toml | 2 +- src/database.rs | 8 +++++--- src/database/appservice.rs | 6 ++++-- src/database/globals.rs | 9 +++++---- src/database/sending.rs | 3 +-- src/error.rs | 6 +++++- src/main.rs | 14 +++++++++----- 7 files changed, 30 insertions(+), 18 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 7d2cf54..e86028b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1 @@ -merge_imports = true +imports_granularity="Crate" diff --git a/src/database.rs b/src/database.rs index 34b74be..bf3e0f0 100644 --- a/src/database.rs +++ b/src/database.rs @@ -17,9 +17,11 @@ use log::info; use rocket::futures::{self, channel::mpsc}; use ruma::{DeviceId, ServerName, UserId}; use serde::Deserialize; -use std::collections::HashMap; -use std::fs::remove_dir_all; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + fs::remove_dir_all, + sync::{Arc, RwLock}, +}; use tokio::sync::Semaphore; #[derive(Clone, Debug, Deserialize)] diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 26ea5b9..764291d 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -1,6 +1,8 @@ use crate::{utils, Error, Result}; -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; #[derive(Clone)] pub struct Appservice { diff --git a/src/database/globals.rs b/src/database/globals.rs index 8d7f104..8c0463d 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,10 +4,11 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerName, ServerSigningKeyId, }; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, + time::Duration, +}; use trust_dns_resolver::TokioAsyncResolver; pub const COUNTER: &str = "c"; diff --git a/src/database/sending.rs b/src/database/sending.rs index 9793971..00073af 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -16,8 +16,7 @@ use ruma::{ ServerName, }; use sled::IVec; -use tokio::select; -use tokio::sync::Semaphore; +use tokio::{select, sync::Semaphore}; #[derive(Clone)] pub struct Sending { diff --git a/src/error.rs b/src/error.rs index 13efce6..d8f10f4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::RwLock, time::Duration, time::Instant}; +use std::{ + collections::HashMap, + sync::RwLock, + time::{Duration, Instant}, +}; use log::error; use ruma::{ diff --git a/src/main.rs b/src/main.rs index b469f4d..a2c020f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -// #![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms)] pub mod appservice_server; pub mod client_server; @@ -19,11 +19,15 @@ pub use rocket::State; use ruma::api::client::error::ErrorKind; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use rocket::figment::{ - providers::{Env, Format, Toml}, - Figment, +use rocket::{ + catch, catchers, + fairing::AdHoc, + figment::{ + providers::{Env, Format, Toml}, + Figment, + }, + routes, Request, }; -use rocket::{catch, catchers, fairing::AdHoc, routes, Request}; use tracing::span; use tracing_subscriber::{prelude::*, Registry}; From 6da40225bb7363b9c76c4574820843faf587b43b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 13 Mar 2021 16:30:12 +0100 Subject: [PATCH 62/62] improvement: make state res actually work --- Cargo.lock | 97 +++++-- Cargo.toml | 11 +- src/client_server/account.rs | 13 +- src/client_server/capabilities.rs | 35 ++- src/client_server/config.rs | 7 +- src/client_server/membership.rs | 189 ++++++-------- src/client_server/sync.rs | 21 +- src/database.rs | 3 +- src/database/globals.rs | 56 ++-- src/database/rooms.rs | 219 +++++++--------- src/database/sending.rs | 7 +- src/main.rs | 2 + src/server_server.rs | 418 +++++++++++++++--------------- 13 files changed, 537 insertions(+), 541 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7538e0..e157565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,6 +187,7 @@ dependencies = [ "log", "opentelemetry", "opentelemetry-jaeger", + "pretty_env_logger", "rand", "regex", "reqwest", @@ -383,6 +384,19 @@ dependencies = [ "syn", ] +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "figment" version = "0.10.3" @@ -664,6 +678,15 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + [[package]] name = "hyper" version = "0.14.4" @@ -688,6 +711,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +dependencies = [ + "futures-util", + "hyper", + "log", + "rustls", + "tokio", + "tokio-rustls", + "webpki", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1259,6 +1297,16 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +dependencies = [ + "env_logger", + "log", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -1454,6 +1502,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -1463,14 +1512,17 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite", + "rustls", "serde", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.7.0", ] @@ -1570,7 +1622,6 @@ dependencies = [ [[package]] name = "ruma" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "js_int", @@ -1590,7 +1641,6 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "http", "percent-encoding", @@ -1605,7 +1655,6 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.17.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1616,7 +1665,6 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.2.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1630,7 +1678,6 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.10.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "assign", "http", @@ -1649,7 +1696,6 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.3.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "maplit", @@ -1662,7 +1708,6 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-common", @@ -1676,7 +1721,6 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.22.0-alpha.2" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1687,7 +1731,6 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.1.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1702,7 +1745,6 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "paste", "rand", @@ -1716,7 +1758,6 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.18.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro2", "quote", @@ -1727,12 +1768,10 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" [[package]] name = "ruma-identity-service-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "ruma-api", "ruma-common", @@ -1745,7 +1784,6 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.0.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "js_int", "ruma-api", @@ -1760,7 +1798,6 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "form_urlencoded", "itoa", @@ -1773,7 +1810,6 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -1784,7 +1820,6 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-alpha.1" -source = "git+https://github.com/ruma/ruma?rev=0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc#0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" dependencies = [ "base64 0.13.0", "ring", @@ -2051,7 +2086,6 @@ checksum = "3015a7d0a5fd5105c91c3710d42f9ccf0abfb287d62206484dcc67f9569a6483" [[package]] name = "state-res" version = "0.1.0" -source = "git+https://github.com/ruma/state-res?branch=main#d34a78c5b66de419862d9e592bde8e0007111ebd" dependencies = [ "itertools", "log", @@ -2136,6 +2170,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + [[package]] name = "thiserror" version = "1.0.24" @@ -2656,6 +2699,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +dependencies = [ + "webpki", +] + [[package]] name = "weezl" version = "0.1.4" @@ -2684,6 +2736,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 2293b62..9c08776 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,16 +18,16 @@ rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "93e62c86e #rocket = { git = "https://github.com/timokoesters/Rocket.git", branch = "empty_parameters", default-features = false, features = ["tls"] } # Used for matrix spec type definitions and helpers -ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } +#ruma = { git = "https://github.com/ruma/ruma", features = ["rand", "appservice-api", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks", "unstable-exhaustive-types"], rev = "0a10afe6dacc2b7a50a8002c953d10b7fb4e37bc" } # ruma = { git = "https://github.com/DevinR528/ruma", features = ["rand", "client-api", "federation-api", "unstable-exhaustive-types", "unstable-pre-spec", "unstable-synapse-quirks"], branch = "verified-export" } -# ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } +ruma = { path = "../ruma/ruma", features = ["unstable-exhaustive-types", "rand", "client-api", "federation-api", "unstable-pre-spec", "unstable-synapse-quirks"] } # Used when doing state resolution # state-res = { git = "https://github.com/timokoesters/state-res", branch = "timo-spec-comp", features = ["unstable-pre-spec"] } # TODO: remove the gen-eventid feature -state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } +#state-res = { git = "https://github.com/ruma/state-res", branch = "main", features = ["unstable-pre-spec", "gen-eventid"] } # state-res = { git = "https://github.com/ruma/state-res", rev = "791c66d73cf064d09db0cdf767d5fef43a343425", features = ["unstable-pre-spec", "gen-eventid"] } -# state-res = { path = "../../state-res", features = ["unstable-pre-spec", "gen-eventid"] } +state-res = { path = "../state-res", features = ["unstable-pre-spec", "gen-eventid"] } # Used for long polling and federation sender, should be the same as rocket::tokio tokio = "1.2.0" @@ -50,7 +50,7 @@ rand = "0.8.3" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = "0.11.1" +reqwest = { version = "0.11.1", features = ["rustls-tls"] } # Used for conduit::Error type thiserror = "1.0.24" # Used to generate thumbnails for images @@ -71,6 +71,7 @@ opentelemetry = "0.12.0" tracing-subscriber = "0.2.16" tracing-opentelemetry = "0.11.0" opentelemetry-jaeger = "0.11.0" +pretty_env_logger = "0.4.0" [features] default = ["conduit_bin"] diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 7d3067e..1c6f517 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -455,16 +455,9 @@ pub async fn register_route( db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomMessage, - content: serde_json::to_value(message::MessageEventContent::Text( - message::TextMessageEventContent { - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - formatted: Some(message::FormattedBody { - format: message::MessageFormat::Html, - body: "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), - }), - relates_to: None, - new_content: None, - }, + content: serde_json::to_value(message::MessageEventContent::text_html( + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), + "Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing /join #conduit:matrix.org. Important: Please don't join any other Matrix rooms over federation without permission from the room's admins. Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index b4fdf69..a3c0db6 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,5 +1,10 @@ use crate::ConduitResult; -use ruma::{api::client::r0::capabilities::get_capabilities, RoomVersionId}; +use ruma::{ + api::client::r0::capabilities::{ + get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, + }, + RoomVersionId, +}; use std::collections::BTreeMap; #[cfg(feature = "conduit_bin")] @@ -12,24 +17,14 @@ use rocket::get; #[tracing::instrument] pub async fn get_capabilities_route() -> ConduitResult { let mut available = BTreeMap::new(); - available.insert( - RoomVersionId::Version5, - get_capabilities::RoomVersionStability::Stable, - ); - available.insert( - RoomVersionId::Version6, - get_capabilities::RoomVersionStability::Stable, - ); + available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); + available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); - Ok(get_capabilities::Response { - capabilities: get_capabilities::Capabilities { - change_password: get_capabilities::ChangePasswordCapability::default(), // enabled by default - room_versions: get_capabilities::RoomVersionsCapability { - default: RoomVersionId::Version6, - available, - }, - custom_capabilities: BTreeMap::new(), - }, - } - .into()) + let mut capabilities = Capabilities::new(); + capabilities.room_versions = RoomVersionsCapability { + default: RoomVersionId::Version6, + available, + }; + + Ok(get_capabilities::Response { capabilities }.into()) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index aece96e..a53b7cd 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -23,7 +23,7 @@ pub async fn set_global_account_data_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let content = serde_json::from_str::(body.data.get()) + let data = serde_json::from_str(body.data.get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -33,10 +33,7 @@ pub async fn set_global_account_data_route( sender_user, event_type.clone().into(), &BasicEvent { - content: CustomEventContent { - event_type, - json: content, - }, + content: CustomEventContent { event_type, data }, }, &db.globals, )?; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index b7b2d4b..d63fa02 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -4,7 +4,7 @@ use crate::{ pdu::{PduBuilder, PduEvent}, utils, ConduitResult, Database, Error, Result, Ruma, }; -use log::warn; +use log::{info, warn}; use ruma::{ api::{ client::{ @@ -21,11 +21,9 @@ use ruma::{ serde::{to_canonical_value, CanonicalJsonObject, Raw}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; -// use state_res::Event; use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap}, convert::TryFrom, - iter, sync::Arc, }; @@ -439,6 +437,7 @@ pub async fn joined_members_route( Ok(joined_members::Response { joined }.into()) } +#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, @@ -566,23 +565,22 @@ async fn join_room_by_id_helper( Ok((event_id, value)) }; - let room_state = send_join_response.room_state.state.iter().map(add_event_id); + let count = db.globals.next_count()?; - let _state_events = room_state - .clone() - .map(|pdu: Result<(EventId, CanonicalJsonObject)>| Ok(pdu?.0)) - .chain(iter::once(Ok(event_id.clone()))) // Add join event we just created - .collect::>>()?; + let mut pdu_id = room_id.as_bytes().to_vec(); + pdu_id.push(0xff); + pdu_id.extend_from_slice(&count.to_be_bytes()); - let auth_chain = send_join_response + let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + .map_err(|_| Error::BadServerResponse("Invalid PDU in send_join response."))?; + + let mut state = HashMap::new(); + + for pdu in send_join_response .room_state - .auth_chain + .state .iter() - .map(add_event_id); - - let mut event_map = room_state - .chain(auth_chain) - .chain(iter::once(Ok((event_id, join_event)))) // Add join event we just created + .map(add_event_id) .map(|r| { let (event_id, value) = r?; PduEvent::from_id_val(&event_id, value.clone()) @@ -592,97 +590,78 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") }) }) - .collect::>>>()?; - - let control_events = event_map - .values() - .filter(|pdu| state_res::is_power_event(pdu)) - .map(|pdu| pdu.event_id.clone()) - .collect::>(); - - // These events are not guaranteed to be sorted but they are resolved according to spec - // we auth them anyways to weed out faulty/malicious server. The following is basically the - // full state resolution algorithm. - let event_ids = event_map.keys().cloned().collect::>(); - - let sorted_control_events = state_res::StateResolution::reverse_topological_power_sort( - &room_id, - &control_events, - &mut event_map, - &event_ids, - ); - - // Auth check each event against the "partial" state created by the preceding events - let resolved_control_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_control_events, - &BTreeMap::new(), // We have no "clean/resolved" events to add (these extend the `resolved_control_events`) - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // This removes the control events that failed auth, leaving the resolved - // to be mainline sorted. In the actual `state_res::StateResolution::resolve` - // function both are removed since these are all events we don't know of - // we must keep track of everything to add to our DB. - let events_to_sort = event_map - .keys() - .filter(|id| { - !sorted_control_events.contains(id) - || resolved_control_events.values().any(|rid| *id == rid) - }) - .cloned() - .collect::>(); - - let power_level = - resolved_control_events.get(&(EventType::RoomPowerLevels, Some("".to_string()))); - // Sort the remaining non control events - let sorted_event_ids = state_res::StateResolution::mainline_sort( - room_id, - &events_to_sort, - power_level, - &mut event_map, - ); - - let resolved_events = state_res::StateResolution::iterative_auth_check( - room_id, - &RoomVersionId::Version6, - &sorted_event_ids, - &resolved_control_events, - &mut event_map, - ) - .expect("iterative auth check failed on resolved events"); - - // filter the events that failed the auth check keeping the remaining events - // sorted correctly - for ev_id in sorted_event_ids - .iter() - .filter(|id| resolved_events.values().any(|rid| rid == *id)) { - let pdu = event_map - .get(ev_id) - .expect("Found event_id in sorted events that is not in resolved state"); + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + if let Some(state_key) = &pdu.state_key { + if pdu.kind == EventType::RoomMember { + let target_user_id = UserId::try_from(state_key.clone()).map_err(|_| { + Error::BadServerResponse("Invalid user id in send_join response.") + })?; - // We do not rebuild the PDU in this case only insert to DB - let count = db.globals.next_count()?; - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&count.to_be_bytes()); - - let hash = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?; - - db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"), - count, - pdu_id.clone().into(), - &[pdu.event_id.clone()], - &db, - )?; - - db.rooms.set_room_state(room_id, &hash)?; + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + db.rooms.update_membership( + &pdu.room_id, + &target_user_id, + serde_json::from_value::(pdu.content.clone()) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid member event content.", + ) + })?, + &pdu.sender, + &db.account_data, + &db.globals, + )?; + } + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(id.as_bytes()); + state.insert((pdu.kind.clone(), state_key.clone()), long_id); + } } + + state.insert( + ( + pdu.kind.clone(), + pdu.state_key.clone().expect("join event has state key"), + ), + pdu_id.clone(), + ); + + db.rooms.force_state(room_id, state, &db.globals)?; + + for pdu in send_join_response + .room_state + .auth_chain + .iter() + .map(add_event_id) + .map(|r| { + let (event_id, value) = r?; + PduEvent::from_id_val(&event_id, value.clone()) + .map(|ev| (event_id, Arc::new(ev))) + .map_err(|e| { + warn!("{:?}: {}", value, e); + Error::BadServerResponse("Invalid PDU in send_join response.") + }) + }) + { + let (id, pdu) = pdu?; + info!("adding {} to outliers: {:#?}", id, pdu); + db.rooms.add_pdu_outlier(&pdu)?; + } + + db.rooms.append_pdu( + &pdu, + utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), + db.globals.next_count()?, + pdu_id.into(), + &[pdu.event_id.clone()], + db, + )?; } else { let event = member::MemberEventContent { membership: member::MembershipState::Join, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index fac6b15..f01eb39 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -102,9 +102,14 @@ pub async fn sync_events_route( // since and the current room state, meaning there should be no updates. // The inner Option is None when there is an event, but there is no state hash associated // with it. This can happen for the RoomCreate event, so all updates should arrive. - let first_pdu_after_since = db.rooms.pdus_after(sender_user, &room_id, since).next(); + let first_pdu_before_since = db.rooms.pdus_until(sender_user, &room_id, since).next(); + let pdus_after_since = db + .rooms + .pdus_after(sender_user, &room_id, since) + .next() + .is_some(); - let since_state_hash = first_pdu_after_since + let since_state_hash = first_pdu_before_since .as_ref() .map(|pdu| db.rooms.pdu_state_hash(&pdu.as_ref().ok()?.0).ok()?); @@ -114,7 +119,7 @@ pub async fn sync_events_route( invited_member_count, joined_since_last_sync, state_events, - ) = if since_state_hash != None && Some(¤t_state_hash) != since_state_hash.as_ref() { + ) = if pdus_after_since && Some(¤t_state_hash) != since_state_hash.as_ref() { let current_state = db.rooms.room_state_full(&room_id)?; let current_members = current_state .iter() @@ -138,9 +143,9 @@ pub async fn sync_events_route( // Calculations: let new_encrypted_room = - encrypted_room && since_encryption.map_or(false, |encryption| encryption.is_none()); + encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none()); - let send_member_count = since_state.as_ref().map_or(false, |since_state| { + let send_member_count = since_state.as_ref().map_or(true, |since_state| { since_state.as_ref().map_or(true, |since_state| { current_members.len() != since_state @@ -179,7 +184,7 @@ pub async fn sync_events_route( let since_membership = since_state .as_ref() - .map_or(MembershipState::Join, |since_state| { + .map_or(MembershipState::Leave, |since_state| { since_state .as_ref() .and_then(|since_state| { @@ -221,7 +226,7 @@ pub async fn sync_events_route( } } - let joined_since_last_sync = since_sender_member.map_or(false, |member| { + let joined_since_last_sync = since_sender_member.map_or(true, |member| { member.map_or(true, |member| member.membership != MembershipState::Join) }); @@ -310,7 +315,7 @@ pub async fn sync_events_route( (None, None, Vec::new()) }; - let state_events = if joined_since_last_sync { + let state_events = if dbg!(joined_since_last_sync) { current_state .into_iter() .map(|(_, pdu)| pdu.to_sync_state_event()) diff --git a/src/database.rs b/src/database.rs index bf3e0f0..0f5e4b4 100644 --- a/src/database.rs +++ b/src/database.rs @@ -165,9 +165,8 @@ impl Database { stateid_pduid: db.open_tree("stateid_pduid")?, pduid_statehash: db.open_tree("pduid_statehash")?, roomid_statehash: db.open_tree("roomid_statehash")?, - roomeventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, + eventid_outlierpdu: db.open_tree("roomeventid_outlierpdu")?, prevevent_parent: db.open_tree("prevevent_parent")?, - roomeventid_outlierpducount: db.open_tree("roomeventid_outlierpducount")?, }, account_data: account_data::AccountData { roomuserdataid_accountdata: db.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/globals.rs b/src/database/globals.rs index 8c0463d..dd594c5 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -24,7 +24,7 @@ pub struct Globals { reqwest_client: reqwest::Client, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - pub(super) servertimeout_signingkey: sled::Tree, // ServerName -> algorithm:key + pubkey + pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey } impl Globals { @@ -157,37 +157,31 @@ impl Globals { /// /// This doesn't actually check that the keys provided are newer than the old set. pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> { - // Remove outdated keys - let now = crate::utils::millis_since_unix_epoch(); - for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { - let (k, _) = item?; - let valid_until = k - .splitn(2, |&b| b == 0xff) - .nth(1) - .map(crate::utils::u64_from_bytes) - .ok_or_else(|| Error::bad_database("Invalid signing keys."))? - .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; + let mut key1 = origin.as_bytes().to_vec(); + key1.push(0xff); - if now > valid_until { - self.servertimeout_signingkey.remove(k)?; - } - } + let mut key2 = key1.clone(); - let mut key = origin.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice( - &(keys - .valid_until_ts - .duration_since(std::time::UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64) - .to_be_bytes(), - ); + let ts = keys + .valid_until_ts + .duration_since(std::time::UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64; + + key1.extend_from_slice(&ts.to_be_bytes()); + key2.extend_from_slice(&(ts + 1).to_be_bytes()); self.servertimeout_signingkey.insert( - key, + key1, serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"), )?; + + self.servertimeout_signingkey.insert( + key2, + serde_json::to_vec(&keys.old_verify_keys) + .expect("ServerSigningKeys are a valid string"), + )?; + Ok(()) } @@ -196,7 +190,10 @@ impl Globals { &self, origin: &ServerName, ) -> Result> { + let mut response = BTreeMap::new(); + let now = crate::utils::millis_since_unix_epoch(); + for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) { let (k, bytes) = item?; let valid_until = k @@ -207,10 +204,11 @@ impl Globals { .map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?; // If these keys are still valid use em! if valid_until > now { - return serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys")); + let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?; + response.extend(btree); } } - Ok(BTreeMap::default()) + Ok(response) } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index d48494b..2a88628 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,7 @@ mod edus; pub use edus::RoomEdus; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; -use log::error; +use log::{error, info, warn}; use regex::Regex; use ring::digest; use ruma::{ @@ -71,10 +71,7 @@ pub struct Rooms { /// RoomId + EventId -> outlier PDU. /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) roomeventid_outlierpdu: sled::Tree, - /// RoomId + EventId -> count of the last known pdu when the outlier was inserted. - /// This allows us to skip any state snapshots that would for sure not have the outlier. - pub(super) roomeventid_outlierpducount: sled::Tree, + pub(super) eventid_outlierpdu: sled::Tree, /// RoomId + EventId -> Parent PDU EventId. pub(super) prevevent_parent: sled::Tree, @@ -89,19 +86,21 @@ impl Rooms { room_id: &RoomId, state_hash: &StateHashId, ) -> Result> { - self.stateid_pduid + let r = self + .stateid_pduid .scan_prefix(&state_hash) .values() - .map(|pduid_short| { - let mut pduid = room_id.as_bytes().to_vec(); - pduid.push(0xff); - pduid.extend_from_slice(&pduid_short?); - match self.pduid_pdu.get(&pduid)? { + .map(|short_id| { + let short_id = short_id?; + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); + match self.pduid_pdu.get(&long_id)? { Some(b) => serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")), None => self - .roomeventid_outlierpdu - .get(pduid)? + .eventid_outlierpdu + .get(short_id)? .map(|b| { serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -124,7 +123,9 @@ impl Rooms { pdu, )) }) - .collect() + .collect(); + + r } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -140,6 +141,8 @@ impl Rooms { key.push(0xff); key.extend_from_slice(&state_key.as_bytes()); + info!("Looking for {} {:?}", event_type, state_key); + let short = self.statekey_short.get(&key)?; if let Some(short) = short { @@ -147,32 +150,40 @@ impl Rooms { stateid.push(0xff); stateid.extend_from_slice(&short); + info!("trying to find pduid/eventid. short: {:?}", stateid); self.stateid_pduid .get(&stateid)? - .map_or(Ok(None), |pdu_id_short| { - let mut pdu_id = room_id.as_bytes().to_vec(); - pdu_id.push(0xff); - pdu_id.extend_from_slice(&pdu_id_short); + .map_or(Ok(None), |short_id| { + info!("found in stateid_pduid"); + let mut long_id = room_id.as_bytes().to_vec(); + long_id.push(0xff); + long_id.extend_from_slice(&short_id); - Ok::<_, Error>(Some(( - pdu_id.clone().into(), - match self.pduid_pdu.get(&pdu_id)? { - Some(b) => serde_json::from_slice::(&b) + Ok::<_, Error>(Some(match self.pduid_pdu.get(&long_id)? { + Some(b) => ( + long_id.clone().into(), + serde_json::from_slice::(&b) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - None => self - .roomeventid_outlierpdu - .get(pdu_id)? - .map(|b| { - serde_json::from_slice::(&b) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })??, - }, - ))) + ), + None => { + info!("looking in outliers"); + ( + short_id.clone().into(), + self.eventid_outlierpdu + .get(&short_id)? + .map(|b| { + serde_json::from_slice::(&b) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .ok_or_else(|| { + Error::bad_database("Event is not in pdu tree or outliers.") + })??, + ) + } + })) }) } else { + info!("short id not found"); Ok(None) } } @@ -215,6 +226,8 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Saved auth event with no state key."))?, )? { events.insert((event_type, state_key), pdu); + } else { + warn!("Could not find {} {:?} in state", event_type, state_key); } } Ok(events) @@ -253,11 +266,11 @@ impl Rooms { globals: &super::globals::Globals, ) -> Result<()> { let state_hash = - self.calculate_hash(&state.values().map(|pdu_id| &**pdu_id).collect::>())?; + self.calculate_hash(&state.values().map(|long_id| &**long_id).collect::>())?; let mut prefix = state_hash.to_vec(); prefix.push(0xff); - for ((event_type, state_key), id_long) in state { + for ((event_type, state_key), long_id) in state { let mut statekey = event_type.as_ref().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(&state_key.as_bytes()); @@ -273,16 +286,13 @@ impl Rooms { } }; - // Because of outliers this could also be an eventID but that - // is handled by `state_full` - let pdu_id_short = id_long - .splitn(2, |&b| b == 0xff) - .nth(1) - .ok_or_else(|| Error::bad_database("Invalid pduid in state."))?; + // If it's a pdu id we remove the room id, if it's an event id we leave it the same + let short_id = long_id.splitn(2, |&b| b == 0xff).nth(1).unwrap_or(&long_id); let mut state_id = prefix.clone(); state_id.extend_from_slice(&short.to_be_bytes()); - self.stateid_pduid.insert(state_id, pdu_id_short)?; + info!("inserting {:?} into {:?}", short_id, state_id); + self.stateid_pduid.insert(state_id, short_id)?; } self.roomid_statehash @@ -348,20 +358,19 @@ impl Rooms { pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => self - .roomeventid_outlierpdu - .get(event_id.as_bytes())? - .ok_or_else(|| { - Error::bad_database("Event is not in pdu tree or outliers.") - })?, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu's id. @@ -371,24 +380,31 @@ impl Rooms { .map_or(Ok(None), |pdu_id| Ok(Some(pdu_id))) } + pub fn get_long_id(&self, event_id: &EventId) -> Result> { + Ok(self + .get_pdu_id(event_id)? + .map_or_else(|| event_id.as_bytes().to_vec(), |pduid| pduid.to_vec())) + } + /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu_id| { - Ok(Some( - serde_json::from_slice(&match self.pduid_pdu.get(&pdu_id)? { - Some(b) => b, - None => match self.roomeventid_outlierpdu.get(event_id.as_bytes())? { - Some(b) => b, - None => return Ok(None), - }, - }) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + .map_or_else::, _, _>( + || Ok(self.eventid_outlierpdu.get(event_id.as_bytes())?), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + Ok(serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?) }) + .transpose() } /// Returns the pdu. @@ -484,7 +500,7 @@ impl Rooms { /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.roomeventid_outlierpdu + self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) @@ -494,25 +510,12 @@ impl Rooms { /// Append the PDU as an outlier. /// /// Any event given to this will be processed (state-res) on another thread. - pub fn append_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { - log::info!( - "Number of outlier pdu's {}", - self.roomeventid_outlierpdu.len() - ); - - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - - self.eventid_pduid - .insert(pdu.event_id().as_bytes(), key.as_slice())?; - - self.roomeventid_outlierpdu.insert( - &key, + pub fn add_pdu_outlier(&self, pdu: &PduEvent) -> Result<()> { + self.eventid_outlierpdu.insert( + &pdu.event_id.as_bytes(), &*serde_json::to_string(&pdu).expect("PduEvent is always a valid String"), )?; - self.roomeventid_outlierpducount - .insert(&key, &self.latest_pdu_count(pdu.room_id())?.to_be_bytes())?; + Ok(()) } @@ -557,50 +560,6 @@ impl Rooms { } } - // We no longer keep this pdu as an outlier - let mut key = pdu.room_id().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu.event_id().as_bytes()); - if self.roomeventid_outlierpdu.remove(&key)?.is_some() { - if let Some(state_key) = pdu.state_key.as_deref() { - let mut statekey = pdu.kind().as_ref().as_bytes().to_vec(); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_short.get(&statekey)? { - Some(short) => utils::u64_from_bytes(&short).map_err(|_| { - Error::bad_database("Invalid short bytes in statekey_short.") - })?, - None => { - error!( - "This event has been inserted into the state snapshot tree previously." - ); - let short = db.globals.next_count()?; - self.statekey_short - .insert(&statekey, &short.to_be_bytes())?; - short - } - }; - - let mut start = pdu.room_id().as_bytes().to_vec(); - start.extend_from_slice( - &self - .roomeventid_outlierpducount - .get(&key)? - .unwrap_or_default(), - ); - for hash in self.pduid_statehash.range(start..).values() { - let mut hash = hash?.to_vec(); - hash.extend_from_slice(&short.to_be_bytes()); - - let _ = dbg!(self.stateid_pduid.compare_and_swap( - hash, - Some(pdu.event_id().as_bytes()), - Some(pdu_id.as_ref()), - )?); - } - } - } - // We must keep track of all events that have been referenced. for leaf in leaves { let mut key = pdu.room_id().as_bytes().to_vec(); @@ -1275,7 +1234,7 @@ impl Rooms { } /// Update current membership data. - fn update_membership( + pub fn update_membership( &self, room_id: &RoomId, user_id: &UserId, diff --git a/src/database/sending.rs b/src/database/sending.rs index 00073af..f96e489 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -346,6 +346,8 @@ impl Sending { .collect::>(); let permit = maximum_requests.acquire().await; + + info!("sending pdus to {}: {:#?}", server, pdu_jsons); let response = server_server::send_request( &globals, &*server, @@ -361,7 +363,10 @@ impl Sending { }, ) .await - .map(|_response| (server.clone(), is_appservice)) + .map(|response| { + info!("server response: {:?}", response); + (server.clone(), is_appservice) + }) .map_err(|e| (server, is_appservice, e)); drop(permit); diff --git a/src/main.rs b/src/main.rs index a2c020f..eb89fea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -204,6 +204,8 @@ async fn main() { rocket.launch().await.unwrap(); } else { + pretty_env_logger::init(); + let root = span!(tracing::Level::INFO, "app_start", work_units = 2); let _enter = root.enter(); diff --git a/src/server_server.rs b/src/server_server.rs index a665fe9..02610e8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -509,7 +509,7 @@ pub async fn send_transaction_message_route<'a>( return Err(Error::bad_config("Federation is disabled.")); } - // dbg!(&*body); + info!("Incoming PDUs: {:?}", &body.pdus); for edu in &body.edus { match serde_json::from_str::(edu.json().get()) { @@ -600,37 +600,11 @@ pub async fn send_transaction_message_route<'a>( // events over federation. For example, the Federation API's /send endpoint would // discard the event whereas the Client Server API's /send/{eventType} endpoint // would return a M_BAD_JSON error. - 'main_pdu_loop: for (event_id, room_id, value) in pdus_to_resolve { + 'main_pdu_loop: for (event_id, _room_id, value) in pdus_to_resolve { + info!("Working on incoming pdu: {:?}", value); let server_name = &body.body.origin; let mut pub_key_map = BTreeMap::new(); - if let Some(CanonicalJsonValue::String(sender)) = value.get("sender") { - let sender = - UserId::try_from(sender.as_str()).expect("All PDUs have a valid sender field"); - let origin = sender.server_name(); - - let keys = match fetch_signing_keys(&db, &room_id, origin).await { - Ok(keys) => keys, - Err(_) => { - resolved_map.insert( - event_id, - Err("Could not find signing keys for this server".to_string()), - ); - continue; - } - }; - - pub_key_map.insert( - origin.to_string(), - keys.into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(), - ); - } else { - resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string())); - continue; - } - // TODO: make this persist but not a DB Tree... // This is all the auth_events that have been recursively fetched so they don't have to be // deserialized over and over again. This could potentially also be some sort of trie (suffix tree) @@ -645,11 +619,11 @@ pub async fn send_transaction_message_route<'a>( // 7. if not timeline event: stop // TODO; 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // the events found in step 8 can be authed/resolved and appended to the DB - let (pdu, previous): (Arc, Vec>) = match validate_event( + let (pdu, previous_create): (Arc, Option>) = match validate_event( &db, value, event_id.clone(), - &pub_key_map, + &mut pub_key_map, server_name, // All the auth events gathered will be here &mut auth_cache, @@ -662,15 +636,11 @@ pub async fn send_transaction_message_route<'a>( continue; } }; - - let single_prev = if previous.len() == 1 { - previous.first().cloned() - } else { - None - }; + info!("Validated event."); // 6. persist the event as an outlier. - db.rooms.append_pdu_outlier(&pdu)?; + db.rooms.add_pdu_outlier(&pdu)?; + info!("Added pdu as outlier."); // Step 9. fetch missing state by calling /state_ids at backwards extremities doing all // the checks in this list starting at 1. These are not timeline events. @@ -679,6 +649,7 @@ pub async fn send_transaction_message_route<'a>( // // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event + info!("Requesting state at event."); let (state_at_event, incoming_auth_events): (StateMap>, Vec>) = match db .sending @@ -693,14 +664,20 @@ pub async fn send_transaction_message_route<'a>( .await { Ok(res) => { - let state = fetch_events( + info!("Fetching state events at event."); + let state = match fetch_events( &db, server_name, - &pub_key_map, + &mut pub_key_map, &res.pdu_ids, &mut auth_cache, ) - .await?; + .await + { + Ok(state) => state, + Err(_) => continue, + }; + // Sanity check: there are no conflicting events in the state we received let mut seen = BTreeSet::new(); for ev in &state { @@ -716,17 +693,21 @@ pub async fn send_transaction_message_route<'a>( .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) .collect(); - ( - state, - fetch_events( - &db, - server_name, - &pub_key_map, - &res.auth_chain_ids, - &mut auth_cache, - ) - .await?, + let incoming_auth_events = match fetch_events( + &db, + server_name, + &mut pub_key_map, + &res.auth_chain_ids, + &mut auth_cache, ) + .await + { + Ok(state) => state, + Err(_) => continue, + }; + + info!("Fetching auth events of state events at event."); + (state, incoming_auth_events) } Err(_) => { resolved_map.insert( @@ -741,7 +722,7 @@ pub async fn send_transaction_message_route<'a>( if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create.clone(), &state_at_event, None, // TODO: third party invite ) @@ -754,6 +735,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; } + info!("Auth check succeeded."); // End of step 10. // 12. check if the event passes auth based on the "current state" of the room, if not "soft fail" it @@ -764,10 +746,12 @@ pub async fn send_transaction_message_route<'a>( .map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v))) .collect(); + info!("current state: {:#?}", current_state); + if !state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - single_prev.clone(), + previous_create, ¤t_state, None, ) @@ -780,6 +764,7 @@ pub async fn send_transaction_message_route<'a>( ); continue; }; + info!("Auth check with current state succeeded."); // Step 11. Ensure that the state is derived from the previous current state (i.e. we calculated by doing state res // where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote) @@ -787,7 +772,10 @@ pub async fn send_transaction_message_route<'a>( // calculate_forward_extremities takes care of adding the current state if not already in the state sets // it also calculates the new pdu leaves for the `roomid_pduleaves` DB Tree. let extremities = match calculate_forward_extremities(&db, &pdu).await { - Ok(fork_ids) => fork_ids, + Ok(fork_ids) => { + info!("Calculated new forward extremities: {:?}", fork_ids); + fork_ids + } Err(_) => { resolved_map.insert(event_id, Err("Failed to gather forward extremities".into())); continue; @@ -836,7 +824,6 @@ pub async fn send_transaction_message_route<'a>( // We do need to force an update to this rooms state update_state = true; - // TODO: remove this is for current debugging Jan, 15 2021 let mut auth_events = vec![]; for map in &fork_states { let mut state_auth = vec![]; @@ -877,6 +864,8 @@ pub async fn send_transaction_message_route<'a>( .map(|(_, pdu)| (pdu.event_id().clone(), pdu)), ); + info!("auth events: {:?}", auth_cache); + let res = match state_res::StateResolution::resolve( pdu.room_id(), &RoomVersionId::Version6, @@ -927,6 +916,7 @@ pub async fn send_transaction_message_route<'a>( // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. append_incoming_pdu(&db, &pdu, &extremities, &state_at_event)?; + info!("Appended incoming pdu."); // Set the new room state to the resolved state update_resolved_state( @@ -938,6 +928,7 @@ pub async fn send_transaction_message_route<'a>( None }, )?; + info!("Updated resolved state"); // Event has passed all auth/stateres checks } @@ -962,17 +953,52 @@ type AsyncRecursiveResult<'a, T> = Pin( db: &'a Database, value: CanonicalJsonObject, event_id: EventId, - pub_key_map: &'a PublicKeyMap, + pub_key_map: &'a mut PublicKeyMap, origin: &'a ServerName, auth_cache: &'a mut EventMap>, -) -> AsyncRecursiveResult<'a, (Arc, Vec>)> { +) -> AsyncRecursiveResult<'a, (Arc, Option>)> { Box::pin(async move { + for signature_server in match value + .get("signatures") + .ok_or_else(|| "No signatures in server response pdu.".to_string())? + { + CanonicalJsonValue::Object(map) => map, + _ => return Err("Invalid signatures object in server response pdu.".to_string()), + } + .keys() + { + info!("Fetching signing keys for {}", signature_server); + let keys = match fetch_signing_keys( + &db, + &Box::::try_from(&**signature_server).map_err(|_| { + "Invalid servername in signatures of server response pdu.".to_string() + })?, + ) + .await + { + Ok(keys) => { + info!("Keys: {:?}", keys); + keys + } + Err(_) => { + return Err( + "Signature verification failed: Could not fetch signing key.".to_string(), + ); + } + }; + + pub_key_map.insert(signature_server.clone(), keys); + + info!("Fetched signing keys"); + } + let mut val = - match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) { + match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version5) { Ok(ver) => { if let ruma::signatures::Verified::Signatures = ver { match ruma::signatures::redact(&value, &RoomVersionId::Version6) { @@ -1000,26 +1026,34 @@ fn validate_event<'a>( ) .map_err(|_| "Event is not a valid PDU".to_string())?; + info!("Fetching auth events."); fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache) .await .map_err(|e| e.to_string())?; let pdu = Arc::new(pdu.clone()); + /* // 8. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let previous = fetch_events(&db, origin, &pub_key_map, &pdu.prev_events, auth_cache) + info!("Fetching prev events."); + let previous = fetch_events(&db, origin, pub_key_map, &pdu.prev_events, auth_cache) .await .map_err(|e| e.to_string())?; + */ + + // if the previous event was the create event special rules apply + let previous_create = if pdu.auth_events.len() == 1 && pdu.prev_events == pdu.auth_events { + auth_cache.get(&pdu.auth_events[0]).cloned() + } else { + None + }; // Check that the event passes auth based on the auth_events + info!("Checking auth."); let is_authed = state_res::event_auth::auth_check( &RoomVersionId::Version6, &pdu, - if previous.len() == 1 { - previous.first().cloned() - } else { - None - }, + previous_create.clone(), &pdu.auth_events .iter() .map(|id| { @@ -1039,39 +1073,20 @@ fn validate_event<'a>( return Err("Event has failed auth check with auth events".to_string()); } - Ok((pdu, previous)) + info!("Validation successful."); + Ok((pdu, previous_create)) }) } -/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events -/// The check in `fetch_check_auth_events` is that a complete chain is found for the -/// events `auth_events`. If the chain is found to have any missing events it fails. +#[tracing::instrument(skip(db))] async fn fetch_check_auth_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, event_ids: &[EventId], auth_cache: &mut EventMap>, ) -> Result<()> { - let mut stack = event_ids.to_vec(); - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if auth_cache.contains_key(&ev_id) { - continue; - } - - // TODO: Batch these async calls so we can wait on multiple at once - let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache) - .await - .map(|mut vec| { - vec.pop() - .ok_or_else(|| Error::Conflict("Event was not found in fetch_events")) - })??; - - stack.extend(ev.auth_events()); - } + fetch_events(db, origin, key_map, event_ids, auth_cache).await?; Ok(()) } @@ -1086,44 +1101,58 @@ async fn fetch_check_auth_events( /// /// If the event is unknown to the `auth_cache` it is added. This guarantees that any /// event we need to know of will be present. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_events( db: &Database, origin: &ServerName, - key_map: &PublicKeyMap, + key_map: &mut PublicKeyMap, events: &[EventId], auth_cache: &mut EventMap>, ) -> Result>> { let mut pdus = vec![]; for id in events { + info!("Fetching event: {}", id); let pdu = match auth_cache.get(id) { - Some(pdu) => pdu.clone(), + Some(pdu) => { + info!("Event found in cache"); + pdu.clone() + } // `get_pdu` checks the outliers tree for us None => match db.rooms.get_pdu(&id)? { - Some(pdu) => Arc::new(pdu), - None => match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &id }, - ) - .await - { - Ok(res) => { - let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu); - let (pdu, _) = - validate_event(db, value, event_id, key_map, origin, auth_cache) - .await - .map_err(|e| { - error!("{:?}", e); - Error::Conflict("Authentication of event failed") - })?; + Some(pdu) => { + info!("Event found in outliers"); + Arc::new(pdu) + } + None => { + info!("Fetching event over federation"); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &id }, + ) + .await + { + Ok(res) => { + info!("Got event over federation: {:?}", res); + let (event_id, value) = + crate::pdu::gen_event_id_canonical_json(&res.pdu); + let (pdu, _) = + validate_event(db, value, event_id, key_map, origin, auth_cache) + .await + .map_err(|e| { + error!("ERROR: {:?}", e); + Error::Conflict("Authentication of event failed") + })?; - db.rooms.append_pdu_outlier(&pdu)?; - pdu + info!("Added fetched pdu as outlier."); + db.rooms.add_pdu_outlier(&pdu)?; + pdu + } + Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), } - Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")), - }, + } }, }; auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone()); @@ -1134,14 +1163,23 @@ pub(crate) async fn fetch_events( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. +#[tracing::instrument(skip(db))] pub(crate) async fn fetch_signing_keys( db: &Database, - room_id: &RoomId, origin: &ServerName, -) -> Result> { +) -> Result> { + let mut result = BTreeMap::new(); + match db.globals.signing_keys_for(origin)? { - keys if !keys.is_empty() => Ok(keys), + keys if !keys.is_empty() => { + info!("we knew the signing keys already: {:?}", keys); + Ok(keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect()) + } _ => { + info!("Asking {} for it's signing key", origin); match db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) @@ -1149,13 +1187,24 @@ pub(crate) async fn fetch_signing_keys( { Ok(keys) => { db.globals.add_signing_key(origin, &keys.server_key)?; - Ok(keys.server_key.verify_keys) + + result.extend( + keys.server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + keys.server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + return Ok(result); } _ => { - for server in db.rooms.room_servers(room_id).filter( - |ser| matches!(ser, Ok(s) if db.globals.trusted_servers().contains(s)), - ) { - let server = server?; + for server in db.globals.trusted_servers() { + info!("Asking {} for {}'s signing key", server, origin); if let Ok(keys) = db .sending .send_federation_request( @@ -1170,30 +1219,21 @@ pub(crate) async fn fetch_signing_keys( ) .await { - let mut trust = 0; - let keys: Vec = keys.server_keys; - let key = keys.iter().fold(None, |mut key, next| { - if let Some(verified) = &key { - // rustc cannot elide this type for some reason - let v: &ServerSigningKeys = verified; - if v.verify_keys - .iter() - .zip(next.verify_keys.iter()) - .all(|(a, b)| a.1.key == b.1.key) - { - trust += 1; - } - } else { - key = Some(next.clone()) - } - key - }); - - if trust == (keys.len() - 1) && key.is_some() { - let k = key.unwrap(); + info!("Got signing keys: {:?}", keys); + for k in keys.server_keys.into_iter() { db.globals.add_signing_key(origin, &k)?; - return Ok(k.verify_keys); + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); } + return Ok(result); } } Err(Error::BadServerResponse( @@ -1211,6 +1251,7 @@ pub(crate) async fn fetch_signing_keys( /// where one of the inputs was a previously trusted set of state, don't just trust a set of state we got from a remote). /// /// The state snapshot of the incoming event __needs__ to be added to the resulting list. +#[tracing::instrument(skip(db))] pub(crate) async fn calculate_forward_extremities( db: &Database, pdu: &PduEvent, @@ -1261,6 +1302,7 @@ pub(crate) async fn calculate_forward_extremities( /// /// This guarantees that the incoming event will be in the state sets (at least our servers /// and the sending server). +#[tracing::instrument(skip(db))] pub(crate) async fn build_forward_extremity_snapshots( db: &Database, pdu: Arc, @@ -1275,12 +1317,14 @@ pub(crate) async fn build_forward_extremity_snapshots( let mut includes_current_state = false; let mut fork_states = BTreeSet::new(); for id in current_leaves { + if id == &pdu.event_id { + continue; + } match db.rooms.get_pdu_id(id)? { // We can skip this because it is handled outside of this function // The current server state and incoming event state are built to be // the state after. // This would be the incoming state from the server. - Some(_) if id == pdu.event_id() => {} Some(pduid) if db.rooms.get_pdu_from_id(&pduid)?.is_some() => { let state_hash = db .rooms @@ -1308,40 +1352,7 @@ pub(crate) async fn build_forward_extremity_snapshots( } _ => { error!("Missing state snapshot for {:?} - {:?}", id, pdu.kind()); - - let res = db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id: pdu.room_id(), - event_id: id, - }, - ) - .await?; - - // TODO: This only adds events to the auth_cache, there is for sure a better way to - // do this... - fetch_events(&db, origin, pub_key_map, &res.auth_chain_ids, auth_cache).await?; - - let mut state_before = - fetch_events(&db, origin, pub_key_map, &res.pdu_ids, auth_cache) - .await? - .into_iter() - .map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu)) - .collect::>(); - - if let Some(pdu) = fetch_events(db, origin, pub_key_map, &[id.clone()], auth_cache) - .await? - .pop() - { - let key = (pdu.kind.clone(), pdu.state_key()); - state_before.insert(key, pdu); - } - - // Now it's the state after - fork_states.insert(state_before); + return Err(Error::BadDatabase("Missing state snapshot.")); } } } @@ -1353,9 +1364,11 @@ pub(crate) async fn build_forward_extremity_snapshots( fork_states.insert(current_state); } + info!("Fork states: {:?}", fork_states); Ok(fork_states) } +#[tracing::instrument(skip(db))] pub(crate) fn update_resolved_state( db: &Database, room_id: &RoomId, @@ -1366,22 +1379,14 @@ pub(crate) fn update_resolved_state( if let Some(state) = state { let mut new_state = HashMap::new(); for ((ev_type, state_k), pdu) in state { - match db.rooms.get_pdu_id(pdu.event_id())? { - Some(pduid) => { - new_state.insert( - ( - ev_type, - state_k.ok_or_else(|| { - Error::Conflict("State contained non state event") - })?, - ), - pduid.to_vec(), - ); - } - None => { - error!("We are missing a state event for the current room state."); - } - } + let long_id = db.rooms.get_long_id(&pdu.event_id)?; + new_state.insert( + ( + ev_type, + state_k.ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id, + ); } db.rooms.force_state(room_id, new_state, &db.globals)?; @@ -1392,6 +1397,7 @@ pub(crate) fn update_resolved_state( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. +#[tracing::instrument(skip(db))] pub(crate) fn append_incoming_pdu( db: &Database, pdu: &PduEvent, @@ -1402,20 +1408,16 @@ pub(crate) fn append_incoming_pdu( // We can tell if we need to do this based on wether state resolution took place or not let mut new_state = HashMap::new(); for ((ev_type, state_k), state_pdu) in state { - match db.rooms.get_pdu_id(state_pdu.event_id())? { - Some(state_pduid) => { - new_state.insert( - ( - ev_type.clone(), - state_k - .clone() - .ok_or_else(|| Error::Conflict("State contained non state event"))?, - ), - state_pduid.to_vec(), - ); - } - None => error!("We are missing a state event for the incoming event snapshot"), - } + let long_id = db.rooms.get_long_id(state_pdu.event_id())?; + new_state.insert( + ( + ev_type.clone(), + state_k + .clone() + .ok_or_else(|| Error::Conflict("State contained non state event"))?, + ), + long_id.to_vec(), + ); } db.rooms