Merge branch 'statediffs' into 'master'
Statediffs See merge request famedly/conduit!145
This commit is contained in:
		
						commit
						33481ec062
					
				
					 17 changed files with 1850 additions and 922 deletions
				
			
		
							
								
								
									
										114
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										114
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							|  | @ -248,7 +248,7 @@ dependencies = [ | |||
|  "jsonwebtoken", | ||||
|  "lru-cache", | ||||
|  "num_cpus", | ||||
|  "opentelemetry", | ||||
|  "opentelemetry 0.16.0", | ||||
|  "opentelemetry-jaeger", | ||||
|  "parking_lot", | ||||
|  "pretty_env_logger", | ||||
|  | @ -1466,16 +1466,46 @@ dependencies = [ | |||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "opentelemetry-jaeger" | ||||
| version = "0.14.0" | ||||
| name = "opentelemetry" | ||||
| version = "0.16.0" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "09a9fc8192722e7daa0c56e59e2336b797122fb8598383dcb11c8852733b435c" | ||||
| checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" | ||||
| dependencies = [ | ||||
|  "async-trait", | ||||
|  "crossbeam-channel", | ||||
|  "futures", | ||||
|  "js-sys", | ||||
|  "lazy_static", | ||||
|  "percent-encoding", | ||||
|  "pin-project", | ||||
|  "rand 0.8.4", | ||||
|  "thiserror", | ||||
|  "tokio", | ||||
|  "tokio-stream", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "opentelemetry-jaeger" | ||||
| version = "0.15.0" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" | ||||
| dependencies = [ | ||||
|  "async-trait", | ||||
|  "lazy_static", | ||||
|  "opentelemetry", | ||||
|  "opentelemetry 0.16.0", | ||||
|  "opentelemetry-semantic-conventions", | ||||
|  "thiserror", | ||||
|  "thrift", | ||||
|  "tokio", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
| name = "opentelemetry-semantic-conventions" | ||||
| version = "0.8.0" | ||||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" | ||||
| dependencies = [ | ||||
|  "opentelemetry 0.16.0", | ||||
| ] | ||||
| 
 | ||||
| [[package]] | ||||
|  | @ -2014,8 +2044,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma" | ||||
| version = "0.2.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "assign", | ||||
|  "js_int", | ||||
|  | @ -2035,8 +2065,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-api" | ||||
| version = "0.17.1" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.18.3" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "bytes", | ||||
|  "http", | ||||
|  | @ -2051,8 +2081,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-api-macros" | ||||
| version = "0.17.1" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.18.3" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "proc-macro-crate", | ||||
|  "proc-macro2", | ||||
|  | @ -2062,8 +2092,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-appservice-api" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.4.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "ruma-api", | ||||
|  "ruma-common", | ||||
|  | @ -2076,8 +2106,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-client-api" | ||||
| version = "0.11.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.12.2" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "assign", | ||||
|  "bytes", | ||||
|  | @ -2096,8 +2126,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-common" | ||||
| version = "0.5.4" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.6.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "indexmap", | ||||
|  "js_int", | ||||
|  | @ -2111,8 +2141,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-events" | ||||
| version = "0.23.2" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.24.4" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "indoc", | ||||
|  "js_int", | ||||
|  | @ -2127,8 +2157,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-events-macros" | ||||
| version = "0.23.2" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.24.4" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "proc-macro-crate", | ||||
|  "proc-macro2", | ||||
|  | @ -2138,8 +2168,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-federation-api" | ||||
| version = "0.2.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "js_int", | ||||
|  "ruma-api", | ||||
|  | @ -2153,8 +2183,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-identifiers" | ||||
| version = "0.19.4" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.20.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "paste", | ||||
|  "rand 0.8.4", | ||||
|  | @ -2167,8 +2197,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-identifiers-macros" | ||||
| version = "0.19.4" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.20.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "quote", | ||||
|  "ruma-identifiers-validation", | ||||
|  | @ -2177,13 +2207,13 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-identifiers-validation" | ||||
| version = "0.4.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.5.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-identity-service-api" | ||||
| version = "0.2.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "js_int", | ||||
|  "ruma-api", | ||||
|  | @ -2195,8 +2225,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-push-gateway-api" | ||||
| version = "0.2.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "js_int", | ||||
|  "ruma-api", | ||||
|  | @ -2210,8 +2240,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-serde" | ||||
| version = "0.4.1" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.5.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "bytes", | ||||
|  "form_urlencoded", | ||||
|  | @ -2224,8 +2254,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-serde-macros" | ||||
| version = "0.4.1" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.5.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "proc-macro-crate", | ||||
|  "proc-macro2", | ||||
|  | @ -2235,8 +2265,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-signatures" | ||||
| version = "0.8.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.9.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "base64 0.13.0", | ||||
|  "ed25519-dalek", | ||||
|  | @ -2252,8 +2282,8 @@ dependencies = [ | |||
| 
 | ||||
| [[package]] | ||||
| name = "ruma-state-res" | ||||
| version = "0.2.0" | ||||
| source = "git+https://github.com/timokoesters/ruma?rev=a2d93500e1dbc87e7032a3c74f3b2479a7f84e93#a2d93500e1dbc87e7032a3c74f3b2479a7f84e93" | ||||
| version = "0.3.0" | ||||
| source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" | ||||
| dependencies = [ | ||||
|  "itertools 0.10.1", | ||||
|  "js_int", | ||||
|  | @ -3022,7 +3052,7 @@ version = "0.14.0" | |||
| source = "registry+https://github.com/rust-lang/crates.io-index" | ||||
| checksum = "c47440f2979c4cd3138922840eec122e3c0ba2148bc290f756bd7fd60fc97fff" | ||||
| dependencies = [ | ||||
|  "opentelemetry", | ||||
|  "opentelemetry 0.15.0", | ||||
|  "tracing", | ||||
|  "tracing-core", | ||||
|  "tracing-log", | ||||
|  |  | |||
|  | @ -18,8 +18,8 @@ edition = "2018" | |||
| rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests | ||||
| 
 | ||||
| # Used for matrix spec type definitions and helpers | ||||
| #ruma = { git = "https://github.com/ruma/ruma", rev = "eb19b0e08a901b87d11b3be0890ec788cc760492", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } | ||||
| ruma = { git = "https://github.com/timokoesters/ruma", rev = "a2d93500e1dbc87e7032a3c74f3b2479a7f84e93", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } | ||||
| ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } | ||||
| #ruma = { git = "https://github.com/timokoesters/ruma", rev = "995ccea20f5f6d4a8fb22041749ed4de22fa1b6a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } | ||||
| #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } | ||||
| 
 | ||||
| # Used for long polling and federation sender, should be the same as rocket::tokio | ||||
|  | @ -66,11 +66,11 @@ regex = "1.5.4" | |||
| jsonwebtoken = "7.2.0" | ||||
| # Performance measurements | ||||
| tracing = { version = "0.1.26", features = ["release_max_level_warn"] } | ||||
| opentelemetry = "0.15.0" | ||||
| tracing-subscriber = "0.2.19" | ||||
| tracing-opentelemetry = "0.14.0" | ||||
| tracing-flame = "0.1.0" | ||||
| opentelemetry-jaeger = "0.14.0" | ||||
| opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } | ||||
| opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } | ||||
| pretty_env_logger = "0.4.0" | ||||
| lru-cache = "0.1.2" | ||||
| rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } | ||||
|  |  | |||
|  | @ -249,6 +249,8 @@ pub async fn register_route( | |||
| 
 | ||||
|         let room_id = RoomId::new(db.globals.server_name()); | ||||
| 
 | ||||
|         db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; | ||||
| 
 | ||||
|         let mutex_state = Arc::clone( | ||||
|             db.globals | ||||
|                 .roomid_mutex_state | ||||
|  | @ -290,6 +292,7 @@ pub async fn register_route( | |||
|                     is_direct: None, | ||||
|                     third_party_invite: None, | ||||
|                     blurhash: None, | ||||
|                     reason: None, | ||||
|                 }) | ||||
|                 .expect("event is valid, we just created it"), | ||||
|                 unsigned: None, | ||||
|  | @ -455,6 +458,7 @@ pub async fn register_route( | |||
|                     is_direct: None, | ||||
|                     third_party_invite: None, | ||||
|                     blurhash: None, | ||||
|                     reason: None, | ||||
|                 }) | ||||
|                 .expect("event is valid, we just created it"), | ||||
|                 unsigned: None, | ||||
|  | @ -476,6 +480,7 @@ pub async fn register_route( | |||
|                     is_direct: None, | ||||
|                     third_party_invite: None, | ||||
|                     blurhash: None, | ||||
|                     reason: None, | ||||
|                 }) | ||||
|                 .expect("event is valid, we just created it"), | ||||
|                 unsigned: None, | ||||
|  | @ -681,6 +686,7 @@ pub async fn deactivate_route( | |||
|             is_direct: None, | ||||
|             third_party_invite: None, | ||||
|             blurhash: None, | ||||
|             reason: None, | ||||
|         }; | ||||
| 
 | ||||
|         let mutex_state = Arc::clone( | ||||
|  | @ -731,7 +737,7 @@ pub async fn deactivate_route( | |||
| pub async fn third_party_route( | ||||
|     body: Ruma<get_contacts::Request>, | ||||
| ) -> ConduitResult<get_contacts::Response> { | ||||
|     let sender_user = body.sender_user.as_ref().expect("user is authenticated"); | ||||
|     let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); | ||||
| 
 | ||||
|     Ok(get_contacts::Response::new(Vec::new()).into()) | ||||
| } | ||||
|  |  | |||
|  | @ -44,7 +44,7 @@ pub async fn get_context_route( | |||
| 
 | ||||
|     let events_before = db | ||||
|         .rooms | ||||
|         .pdus_until(&sender_user, &body.room_id, base_token) | ||||
|         .pdus_until(&sender_user, &body.room_id, base_token)? | ||||
|         .take( | ||||
|             u32::try_from(body.limit).map_err(|_| { | ||||
|                 Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") | ||||
|  | @ -66,7 +66,7 @@ pub async fn get_context_route( | |||
| 
 | ||||
|     let events_after = db | ||||
|         .rooms | ||||
|         .pdus_after(&sender_user, &body.room_id, base_token) | ||||
|         .pdus_after(&sender_user, &body.room_id, base_token)? | ||||
|         .take( | ||||
|             u32::try_from(body.limit).map_err(|_| { | ||||
|                 Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") | ||||
|  |  | |||
|  | @ -262,6 +262,7 @@ pub async fn ban_user_route( | |||
|                 is_direct: None, | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: db.users.blurhash(&body.user_id)?, | ||||
|                 reason: None, | ||||
|             }), | ||||
|             |event| { | ||||
|                 let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>( | ||||
|  | @ -563,6 +564,7 @@ async fn join_room_by_id_helper( | |||
|                 is_direct: None, | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: db.users.blurhash(&sender_user)?, | ||||
|                 reason: None, | ||||
|             }) | ||||
|             .expect("event is valid, we just created it"), | ||||
|         ); | ||||
|  | @ -609,6 +611,8 @@ async fn join_room_by_id_helper( | |||
|             ) | ||||
|             .await?; | ||||
| 
 | ||||
|         db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; | ||||
| 
 | ||||
|         let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) | ||||
|             .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; | ||||
| 
 | ||||
|  | @ -693,6 +697,7 @@ async fn join_room_by_id_helper( | |||
|             is_direct: None, | ||||
|             third_party_invite: None, | ||||
|             blurhash: db.users.blurhash(&sender_user)?, | ||||
|             reason: None, | ||||
|         }; | ||||
| 
 | ||||
|         db.rooms.build_and_append_pdu( | ||||
|  | @ -844,6 +849,7 @@ pub async fn invite_helper<'a>( | |||
|                 membership: MembershipState::Invite, | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: None, | ||||
|                 reason: None, | ||||
|             }) | ||||
|             .expect("member event is valid value"); | ||||
| 
 | ||||
|  | @ -1038,6 +1044,7 @@ pub async fn invite_helper<'a>( | |||
|                 is_direct: Some(is_direct), | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: db.users.blurhash(&user_id)?, | ||||
|                 reason: None, | ||||
|             }) | ||||
|             .expect("event is valid, we just created it"), | ||||
|             unsigned: None, | ||||
|  |  | |||
|  | @ -128,7 +128,7 @@ pub async fn get_message_events_route( | |||
|         get_message_events::Direction::Forward => { | ||||
|             let events_after = db | ||||
|                 .rooms | ||||
|                 .pdus_after(&sender_user, &body.room_id, from) | ||||
|                 .pdus_after(&sender_user, &body.room_id, from)? | ||||
|                 .take(limit) | ||||
|                 .filter_map(|r| r.ok()) // Filter out buggy events
 | ||||
|                 .filter_map(|(pdu_id, pdu)| { | ||||
|  | @ -158,7 +158,7 @@ pub async fn get_message_events_route( | |||
|         get_message_events::Direction::Backward => { | ||||
|             let events_before = db | ||||
|                 .rooms | ||||
|                 .pdus_until(&sender_user, &body.room_id, from) | ||||
|                 .pdus_until(&sender_user, &body.room_id, from)? | ||||
|                 .take(limit) | ||||
|                 .filter_map(|r| r.ok()) // Filter out buggy events
 | ||||
|                 .filter_map(|(pdu_id, pdu)| { | ||||
|  |  | |||
|  | @ -33,6 +33,8 @@ pub async fn create_room_route( | |||
| 
 | ||||
|     let room_id = RoomId::new(db.globals.server_name()); | ||||
| 
 | ||||
|     db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; | ||||
| 
 | ||||
|     let mutex_state = Arc::clone( | ||||
|         db.globals | ||||
|             .roomid_mutex_state | ||||
|  | @ -105,6 +107,7 @@ pub async fn create_room_route( | |||
|                 is_direct: Some(body.is_direct), | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: db.users.blurhash(&sender_user)?, | ||||
|                 reason: None, | ||||
|             }) | ||||
|             .expect("event is valid, we just created it"), | ||||
|             unsigned: None, | ||||
|  | @ -173,7 +176,6 @@ pub async fn create_room_route( | |||
|     )?; | ||||
| 
 | ||||
|     // 4. Canonical room alias
 | ||||
| 
 | ||||
|     if let Some(room_alias_id) = &alias { | ||||
|         db.rooms.build_and_append_pdu( | ||||
|             PduBuilder { | ||||
|  | @ -193,7 +195,7 @@ pub async fn create_room_route( | |||
|             &room_id, | ||||
|             &db, | ||||
|             &state_lock, | ||||
|         ); | ||||
|         )?; | ||||
|     } | ||||
| 
 | ||||
|     // 5. Events set by preset
 | ||||
|  | @ -516,6 +518,7 @@ pub async fn upgrade_room_route( | |||
|                 is_direct: None, | ||||
|                 third_party_invite: None, | ||||
|                 blurhash: db.users.blurhash(&sender_user)?, | ||||
|                 reason: None, | ||||
|             }) | ||||
|             .expect("event is valid, we just created it"), | ||||
|             unsigned: None, | ||||
|  |  | |||
|  | @ -3,7 +3,10 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; | |||
| use ruma::{ | ||||
|     api::client::{ | ||||
|         error::ErrorKind, | ||||
|         r0::session::{get_login_types, login, logout, logout_all}, | ||||
|         r0::{ | ||||
|             session::{get_login_types, login, logout, logout_all}, | ||||
|             uiaa::IncomingUserIdentifier, | ||||
|         }, | ||||
|     }, | ||||
|     UserId, | ||||
| }; | ||||
|  | @ -60,7 +63,7 @@ pub async fn login_route( | |||
|             identifier, | ||||
|             password, | ||||
|         } => { | ||||
|             let username = if let login::IncomingUserIdentifier::MatrixId(matrix_id) = identifier { | ||||
|             let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { | ||||
|                 matrix_id | ||||
|             } else { | ||||
|                 return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); | ||||
|  |  | |||
|  | @ -205,7 +205,7 @@ async fn sync_helper( | |||
| 
 | ||||
|         let mut non_timeline_pdus = db | ||||
|             .rooms | ||||
|             .pdus_until(&sender_user, &room_id, u64::MAX) | ||||
|             .pdus_until(&sender_user, &room_id, u64::MAX)? | ||||
|             .filter_map(|r| { | ||||
|                 // Filter out buggy events
 | ||||
|                 if r.is_err() { | ||||
|  | @ -248,13 +248,13 @@ async fn sync_helper( | |||
| 
 | ||||
|         let first_pdu_before_since = db | ||||
|             .rooms | ||||
|             .pdus_until(&sender_user, &room_id, since) | ||||
|             .pdus_until(&sender_user, &room_id, since)? | ||||
|             .next() | ||||
|             .transpose()?; | ||||
| 
 | ||||
|         let pdus_after_since = db | ||||
|             .rooms | ||||
|             .pdus_after(&sender_user, &room_id, since) | ||||
|             .pdus_after(&sender_user, &room_id, since)? | ||||
|             .next() | ||||
|             .is_some(); | ||||
| 
 | ||||
|  | @ -286,7 +286,7 @@ async fn sync_helper( | |||
| 
 | ||||
|                 for hero in db | ||||
|                     .rooms | ||||
|                     .all_pdus(&sender_user, &room_id) | ||||
|                     .all_pdus(&sender_user, &room_id)? | ||||
|                     .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
 | ||||
|                     .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) | ||||
|                     .map(|(_, pdu)| { | ||||
|  | @ -328,11 +328,11 @@ async fn sync_helper( | |||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             ( | ||||
|             Ok::<_, Error>(( | ||||
|                 Some(joined_member_count), | ||||
|                 Some(invited_member_count), | ||||
|                 heroes, | ||||
|             ) | ||||
|             )) | ||||
|         }; | ||||
| 
 | ||||
|         let ( | ||||
|  | @ -343,7 +343,7 @@ async fn sync_helper( | |||
|             state_events, | ||||
|         ) = if since_shortstatehash.is_none() { | ||||
|             // Probably since = 0, we will do an initial sync
 | ||||
|             let (joined_member_count, invited_member_count, heroes) = calculate_counts(); | ||||
|             let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; | ||||
| 
 | ||||
|             let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; | ||||
|             let state_events = current_state_ids | ||||
|  | @ -510,7 +510,7 @@ async fn sync_helper( | |||
|             } | ||||
| 
 | ||||
|             let (joined_member_count, invited_member_count, heroes) = if send_member_count { | ||||
|                 calculate_counts() | ||||
|                 calculate_counts()? | ||||
|             } else { | ||||
|                 (None, None, Vec::new()) | ||||
|             }; | ||||
|  |  | |||
							
								
								
									
										279
									
								
								src/database.rs
									
									
									
									
									
								
							
							
						
						
									
										279
									
								
								src/database.rs
									
									
									
									
									
								
							|  | @ -24,13 +24,14 @@ use rocket::{ | |||
|     request::{FromRequest, Request}, | ||||
|     Shutdown, State, | ||||
| }; | ||||
| use ruma::{DeviceId, RoomId, ServerName, UserId}; | ||||
| use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; | ||||
| use serde::{de::IgnoredAny, Deserialize}; | ||||
| use std::{ | ||||
|     collections::{BTreeMap, HashMap}, | ||||
|     convert::TryFrom, | ||||
|     collections::{BTreeMap, HashMap, HashSet}, | ||||
|     convert::{TryFrom, TryInto}, | ||||
|     fs::{self, remove_dir_all}, | ||||
|     io::Write, | ||||
|     mem::size_of, | ||||
|     ops::Deref, | ||||
|     path::Path, | ||||
|     sync::{Arc, Mutex, RwLock}, | ||||
|  | @ -107,7 +108,7 @@ fn default_db_cache_capacity_mb() -> f64 { | |||
| } | ||||
| 
 | ||||
| fn default_sqlite_wal_clean_second_interval() -> u32 { | ||||
|     15 * 60 // every 15 minutes
 | ||||
|     1 * 60 // every minute
 | ||||
| } | ||||
| 
 | ||||
| fn default_max_request_size() -> u32 { | ||||
|  | @ -261,7 +262,11 @@ impl Database { | |||
|                 userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, | ||||
| 
 | ||||
|                 statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, | ||||
|                 stateid_shorteventid: builder.open_tree("stateid_shorteventid")?, | ||||
| 
 | ||||
|                 shortroomid_roomid: builder.open_tree("shortroomid_roomid")?, | ||||
|                 roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, | ||||
| 
 | ||||
|                 shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, | ||||
|                 eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, | ||||
|                 shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, | ||||
|                 shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, | ||||
|  | @ -270,8 +275,12 @@ impl Database { | |||
| 
 | ||||
|                 eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, | ||||
|                 referencedevents: builder.open_tree("referencedevents")?, | ||||
|                 pdu_cache: Mutex::new(LruCache::new(1_000_000)), | ||||
|                 auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), | ||||
|                 pdu_cache: Mutex::new(LruCache::new(100_000)), | ||||
|                 auth_chain_cache: Mutex::new(LruCache::new(100_000)), | ||||
|                 shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), | ||||
|                 eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), | ||||
|                 statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), | ||||
|                 stateinfo_cache: Mutex::new(LruCache::new(50)), | ||||
|             }, | ||||
|             account_data: account_data::AccountData { | ||||
|                 roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, | ||||
|  | @ -424,7 +433,6 @@ impl Database { | |||
|             } | ||||
| 
 | ||||
|             if db.globals.database_version()? < 6 { | ||||
|                 // TODO update to 6
 | ||||
|                 // Set room member count
 | ||||
|                 for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { | ||||
|                     let room_id = | ||||
|  | @ -437,6 +445,261 @@ impl Database { | |||
| 
 | ||||
|                 println!("Migration: 5 -> 6 finished"); | ||||
|             } | ||||
| 
 | ||||
|             if db.globals.database_version()? < 7 { | ||||
|                 // Upgrade state store
 | ||||
|                 let mut last_roomstates: HashMap<RoomId, u64> = HashMap::new(); | ||||
|                 let mut current_sstatehash: Option<u64> = None; | ||||
|                 let mut current_room = None; | ||||
|                 let mut current_state = HashSet::new(); | ||||
|                 let mut counter = 0; | ||||
| 
 | ||||
|                 let mut handle_state = | ||||
|                     |current_sstatehash: u64, | ||||
|                      current_room: &RoomId, | ||||
|                      current_state: HashSet<_>, | ||||
|                      last_roomstates: &mut HashMap<_, _>| { | ||||
|                         counter += 1; | ||||
|                         println!("counter: {}", counter); | ||||
|                         let last_roomsstatehash = last_roomstates.get(current_room); | ||||
| 
 | ||||
|                         let states_parents = last_roomsstatehash.map_or_else( | ||||
|                             || Ok(Vec::new()), | ||||
|                             |&last_roomsstatehash| { | ||||
|                                 db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash)) | ||||
|                             }, | ||||
|                         )?; | ||||
| 
 | ||||
|                         let (statediffnew, statediffremoved) = | ||||
|                             if let Some(parent_stateinfo) = states_parents.last() { | ||||
|                                 let statediffnew = current_state | ||||
|                                     .difference(&parent_stateinfo.1) | ||||
|                                     .cloned() | ||||
|                                     .collect::<HashSet<_>>(); | ||||
| 
 | ||||
|                                 let statediffremoved = parent_stateinfo | ||||
|                                     .1 | ||||
|                                     .difference(¤t_state) | ||||
|                                     .cloned() | ||||
|                                     .collect::<HashSet<_>>(); | ||||
| 
 | ||||
|                                 (statediffnew, statediffremoved) | ||||
|                             } else { | ||||
|                                 (current_state, HashSet::new()) | ||||
|                             }; | ||||
| 
 | ||||
|                         db.rooms.save_state_from_diff( | ||||
|                             dbg!(current_sstatehash), | ||||
|                             statediffnew, | ||||
|                             statediffremoved, | ||||
|                             2, // every state change is 2 event changes on average
 | ||||
|                             states_parents, | ||||
|                         )?; | ||||
| 
 | ||||
|                         /* | ||||
|                         let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?; | ||||
|                         let state = tmp.pop().unwrap(); | ||||
|                         println!( | ||||
|                             "{}\t{}{:?}: {:?} + {:?} - {:?}", | ||||
|                             current_room, | ||||
|                             "  ".repeat(tmp.len()), | ||||
|                             utils::u64_from_bytes(¤t_sstatehash).unwrap(), | ||||
|                             tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), | ||||
|                             state | ||||
|                                 .2 | ||||
|                                 .iter() | ||||
|                                 .map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap()) | ||||
|                                 .collect::<Vec<_>>(), | ||||
|                             state | ||||
|                                 .3 | ||||
|                                 .iter() | ||||
|                                 .map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap()) | ||||
|                                 .collect::<Vec<_>>() | ||||
|                         ); | ||||
|                         */ | ||||
| 
 | ||||
|                         Ok::<_, Error>(()) | ||||
|                     }; | ||||
| 
 | ||||
|                 for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { | ||||
|                     let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()]) | ||||
|                         .expect("number of bytes is correct"); | ||||
|                     let sstatekey = k[size_of::<u64>()..].to_vec(); | ||||
|                     if Some(sstatehash) != current_sstatehash { | ||||
|                         if let Some(current_sstatehash) = current_sstatehash { | ||||
|                             handle_state( | ||||
|                                 current_sstatehash, | ||||
|                                 current_room.as_ref().unwrap(), | ||||
|                                 current_state, | ||||
|                                 &mut last_roomstates, | ||||
|                             )?; | ||||
|                             last_roomstates | ||||
|                                 .insert(current_room.clone().unwrap(), current_sstatehash); | ||||
|                         } | ||||
|                         current_state = HashSet::new(); | ||||
|                         current_sstatehash = Some(sstatehash); | ||||
| 
 | ||||
|                         let event_id = db | ||||
|                             .rooms | ||||
|                             .shorteventid_eventid | ||||
|                             .get(&seventid) | ||||
|                             .unwrap() | ||||
|                             .unwrap(); | ||||
|                         let event_id = | ||||
|                             EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) | ||||
|                                 .unwrap(); | ||||
|                         let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); | ||||
| 
 | ||||
|                         if Some(&pdu.room_id) != current_room.as_ref() { | ||||
|                             current_room = Some(pdu.room_id.clone()); | ||||
|                         } | ||||
|                     } | ||||
| 
 | ||||
|                     let mut val = sstatekey; | ||||
|                     val.extend_from_slice(&seventid); | ||||
|                     current_state.insert(val.try_into().expect("size is correct")); | ||||
|                 } | ||||
| 
 | ||||
|                 if let Some(current_sstatehash) = current_sstatehash { | ||||
|                     handle_state( | ||||
|                         current_sstatehash, | ||||
|                         current_room.as_ref().unwrap(), | ||||
|                         current_state, | ||||
|                         &mut last_roomstates, | ||||
|                     )?; | ||||
|                 } | ||||
| 
 | ||||
|                 db.globals.bump_database_version(7)?; | ||||
| 
 | ||||
|                 println!("Migration: 6 -> 7 finished"); | ||||
|             } | ||||
| 
 | ||||
|             if db.globals.database_version()? < 8 { | ||||
|                 // Generate short room ids for all rooms
 | ||||
|                 for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { | ||||
|                     let shortroomid = db.globals.next_count()?.to_be_bytes(); | ||||
|                     db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; | ||||
|                     db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?; | ||||
|                     println!("Migration: 8"); | ||||
|                 } | ||||
|                 // Update pduids db layout
 | ||||
|                 let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { | ||||
|                     if !key.starts_with(b"!") { | ||||
|                         return None; | ||||
|                     } | ||||
|                     let mut parts = key.splitn(2, |&b| b == 0xff); | ||||
|                     let room_id = parts.next().unwrap(); | ||||
|                     let count = parts.next().unwrap(); | ||||
| 
 | ||||
|                     let short_room_id = db | ||||
|                         .rooms | ||||
|                         .roomid_shortroomid | ||||
|                         .get(&room_id) | ||||
|                         .unwrap() | ||||
|                         .expect("shortroomid should exist"); | ||||
| 
 | ||||
|                     let mut new_key = short_room_id; | ||||
|                     new_key.extend_from_slice(count); | ||||
| 
 | ||||
|                     Some((new_key, v)) | ||||
|                 }); | ||||
| 
 | ||||
|                 db.rooms.pduid_pdu.insert_batch(&mut batch)?; | ||||
| 
 | ||||
|                 let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| { | ||||
|                     if !value.starts_with(b"!") { | ||||
|                         return None; | ||||
|                     } | ||||
|                     let mut parts = value.splitn(2, |&b| b == 0xff); | ||||
|                     let room_id = parts.next().unwrap(); | ||||
|                     let count = parts.next().unwrap(); | ||||
| 
 | ||||
|                     let short_room_id = db | ||||
|                         .rooms | ||||
|                         .roomid_shortroomid | ||||
|                         .get(&room_id) | ||||
|                         .unwrap() | ||||
|                         .expect("shortroomid should exist"); | ||||
| 
 | ||||
|                     let mut new_value = short_room_id; | ||||
|                     new_value.extend_from_slice(count); | ||||
| 
 | ||||
|                     Some((k, new_value)) | ||||
|                 }); | ||||
| 
 | ||||
|                 db.rooms.eventid_pduid.insert_batch(&mut batch2)?; | ||||
| 
 | ||||
|                 db.globals.bump_database_version(8)?; | ||||
| 
 | ||||
|                 println!("Migration: 7 -> 8 finished"); | ||||
|             } | ||||
| 
 | ||||
|             if db.globals.database_version()? < 9 { | ||||
|                 // Update tokenids db layout
 | ||||
|                 let batch = db | ||||
|                     .rooms | ||||
|                     .tokenids | ||||
|                     .iter() | ||||
|                     .filter_map(|(key, _)| { | ||||
|                         if !key.starts_with(b"!") { | ||||
|                             return None; | ||||
|                         } | ||||
|                         let mut parts = key.splitn(4, |&b| b == 0xff); | ||||
|                         let room_id = parts.next().unwrap(); | ||||
|                         let word = parts.next().unwrap(); | ||||
|                         let _pdu_id_room = parts.next().unwrap(); | ||||
|                         let pdu_id_count = parts.next().unwrap(); | ||||
| 
 | ||||
|                         let short_room_id = db | ||||
|                             .rooms | ||||
|                             .roomid_shortroomid | ||||
|                             .get(&room_id) | ||||
|                             .unwrap() | ||||
|                             .expect("shortroomid should exist"); | ||||
|                         let mut new_key = short_room_id; | ||||
|                         new_key.extend_from_slice(word); | ||||
|                         new_key.push(0xff); | ||||
|                         new_key.extend_from_slice(pdu_id_count); | ||||
|                         println!("old {:?}", key); | ||||
|                         println!("new {:?}", new_key); | ||||
|                         Some((new_key, Vec::new())) | ||||
|                     }) | ||||
|                     .collect::<Vec<_>>(); | ||||
| 
 | ||||
|                 let mut iter = batch.into_iter().peekable(); | ||||
| 
 | ||||
|                 while iter.peek().is_some() { | ||||
|                     db.rooms | ||||
|                         .tokenids | ||||
|                         .insert_batch(&mut iter.by_ref().take(1000))?; | ||||
|                     println!("smaller batch done"); | ||||
|                 } | ||||
| 
 | ||||
|                 println!("Deleting starts"); | ||||
| 
 | ||||
|                 let batch2 = db | ||||
|                     .rooms | ||||
|                     .tokenids | ||||
|                     .iter() | ||||
|                     .filter_map(|(key, _)| { | ||||
|                         if key.starts_with(b"!") { | ||||
|                             println!("del {:?}", key); | ||||
|                             Some(key) | ||||
|                         } else { | ||||
|                             None | ||||
|                         } | ||||
|                     }) | ||||
|                     .collect::<Vec<_>>(); | ||||
| 
 | ||||
|                 for key in batch2 { | ||||
|                     println!("del"); | ||||
|                     db.rooms.tokenids.remove(&key)?; | ||||
|                 } | ||||
| 
 | ||||
|                 db.globals.bump_database_version(9)?; | ||||
| 
 | ||||
|                 println!("Migration: 8 -> 9 finished"); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         let guard = db.read().await; | ||||
|  |  | |||
|  | @ -35,6 +35,7 @@ pub trait Tree: Send + Sync { | |||
|     ) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>; | ||||
| 
 | ||||
|     fn increment(&self, key: &[u8]) -> Result<Vec<u8>>; | ||||
|     fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()>; | ||||
| 
 | ||||
|     fn scan_prefix<'a>( | ||||
|         &'a self, | ||||
|  |  | |||
|  | @ -9,15 +9,13 @@ use std::{ | |||
|     path::{Path, PathBuf}, | ||||
|     pin::Pin, | ||||
|     sync::Arc, | ||||
|     time::{Duration, Instant}, | ||||
| }; | ||||
| use tokio::sync::oneshot::Sender; | ||||
| use tracing::{debug, warn}; | ||||
| 
 | ||||
| pub const MILLI: Duration = Duration::from_millis(1); | ||||
| use tracing::debug; | ||||
| 
 | ||||
| thread_local! { | ||||
|     static READ_CONNECTION: RefCell<Option<&'static Connection>> = RefCell::new(None); | ||||
|     static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = RefCell::new(None); | ||||
| } | ||||
| 
 | ||||
| struct PreparedStatementIterator<'a> { | ||||
|  | @ -51,11 +49,11 @@ impl Engine { | |||
|     fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> { | ||||
|         let conn = Connection::open(&path)?; | ||||
| 
 | ||||
|         conn.pragma_update(Some(Main), "page_size", &32768)?; | ||||
|         conn.pragma_update(Some(Main), "page_size", &2048)?; | ||||
|         conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; | ||||
|         conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; | ||||
|         conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; | ||||
|         conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; | ||||
|         conn.pragma_update(Some(Main), "wal_autocheckpoint", &2000)?; | ||||
| 
 | ||||
|         Ok(conn) | ||||
|     } | ||||
|  | @ -79,9 +77,25 @@ impl Engine { | |||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     fn read_lock_iterator(&self) -> &'static Connection { | ||||
|         READ_CONNECTION_ITERATOR.with(|cell| { | ||||
|             let connection = &mut cell.borrow_mut(); | ||||
| 
 | ||||
|             if (*connection).is_none() { | ||||
|                 let c = Box::leak(Box::new( | ||||
|                     Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap(), | ||||
|                 )); | ||||
|                 **connection = Some(c); | ||||
|             } | ||||
| 
 | ||||
|             connection.unwrap() | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     pub fn flush_wal(self: &Arc<Self>) -> Result<()> { | ||||
|         self.write_lock() | ||||
|             .pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?; | ||||
|         // We use autocheckpoints
 | ||||
|         //self.write_lock()
 | ||||
|         //.pragma_update(Some(Main), "wal_checkpoint", &"TRUNCATE")?;
 | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
|  | @ -153,6 +167,34 @@ impl SqliteTable { | |||
|         )?; | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn iter_with_guard<'a>( | ||||
|         &'a self, | ||||
|         guard: &'a Connection, | ||||
|     ) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> { | ||||
|         let statement = Box::leak(Box::new( | ||||
|             guard | ||||
|                 .prepare(&format!( | ||||
|                     "SELECT key, value FROM {} ORDER BY key ASC", | ||||
|                     &self.name | ||||
|                 )) | ||||
|                 .unwrap(), | ||||
|         )); | ||||
| 
 | ||||
|         let statement_ref = NonAliasingBox(statement); | ||||
| 
 | ||||
|         let iterator = Box::new( | ||||
|             statement | ||||
|                 .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) | ||||
|                 .unwrap() | ||||
|                 .map(|r| r.unwrap()), | ||||
|         ); | ||||
| 
 | ||||
|         Box::new(PreparedStatementIterator { | ||||
|             iterator, | ||||
|             statement_ref, | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Tree for SqliteTable { | ||||
|  | @ -164,16 +206,7 @@ impl Tree for SqliteTable { | |||
|     #[tracing::instrument(skip(self, key, value))] | ||||
|     fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { | ||||
|         let guard = self.engine.write_lock(); | ||||
| 
 | ||||
|         let start = Instant::now(); | ||||
| 
 | ||||
|         self.insert_with_guard(&guard, key, value)?; | ||||
| 
 | ||||
|         let elapsed = start.elapsed(); | ||||
|         if elapsed > MILLI { | ||||
|             warn!("insert took {:?} : {}", elapsed, &self.name); | ||||
|         } | ||||
| 
 | ||||
|         drop(guard); | ||||
| 
 | ||||
|         let watchers = self.watchers.read(); | ||||
|  | @ -216,53 +249,41 @@ impl Tree for SqliteTable { | |||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     #[tracing::instrument(skip(self, iter))] | ||||
|     fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> { | ||||
|         let guard = self.engine.write_lock(); | ||||
| 
 | ||||
|         guard.execute("BEGIN", [])?; | ||||
|         for key in iter { | ||||
|             let old = self.get_with_guard(&guard, &key)?; | ||||
|             let new = crate::utils::increment(old.as_deref()) | ||||
|                 .expect("utils::increment always returns Some"); | ||||
|             self.insert_with_guard(&guard, &key, &new)?; | ||||
|         } | ||||
|         guard.execute("COMMIT", [])?; | ||||
| 
 | ||||
|         drop(guard); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     #[tracing::instrument(skip(self, key))] | ||||
|     fn remove(&self, key: &[u8]) -> Result<()> { | ||||
|         let guard = self.engine.write_lock(); | ||||
| 
 | ||||
|         let start = Instant::now(); | ||||
| 
 | ||||
|         guard.execute( | ||||
|             format!("DELETE FROM {} WHERE key = ?", self.name).as_str(), | ||||
|             [key], | ||||
|         )?; | ||||
| 
 | ||||
|         let elapsed = start.elapsed(); | ||||
| 
 | ||||
|         if elapsed > MILLI { | ||||
|             debug!("remove:    took {:012?} : {}", elapsed, &self.name); | ||||
|         } | ||||
|         // debug!("remove key: {:?}", &key);
 | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     #[tracing::instrument(skip(self))] | ||||
|     fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> { | ||||
|         let guard = self.engine.read_lock(); | ||||
|         let guard = self.engine.read_lock_iterator(); | ||||
| 
 | ||||
|         let statement = Box::leak(Box::new( | ||||
|             guard | ||||
|                 .prepare(&format!( | ||||
|                     "SELECT key, value FROM {} ORDER BY key ASC", | ||||
|                     &self.name | ||||
|                 )) | ||||
|                 .unwrap(), | ||||
|         )); | ||||
| 
 | ||||
|         let statement_ref = NonAliasingBox(statement); | ||||
| 
 | ||||
|         let iterator = Box::new( | ||||
|             statement | ||||
|                 .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) | ||||
|                 .unwrap() | ||||
|                 .map(|r| r.unwrap()), | ||||
|         ); | ||||
| 
 | ||||
|         Box::new(PreparedStatementIterator { | ||||
|             iterator, | ||||
|             statement_ref, | ||||
|         }) | ||||
|         self.iter_with_guard(&guard) | ||||
|     } | ||||
| 
 | ||||
|     #[tracing::instrument(skip(self, from, backwards))] | ||||
|  | @ -271,7 +292,7 @@ impl Tree for SqliteTable { | |||
|         from: &[u8], | ||||
|         backwards: bool, | ||||
|     ) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> { | ||||
|         let guard = self.engine.read_lock(); | ||||
|         let guard = self.engine.read_lock_iterator(); | ||||
|         let from = from.to_vec(); // TODO change interface?
 | ||||
| 
 | ||||
|         if backwards { | ||||
|  | @ -326,8 +347,6 @@ impl Tree for SqliteTable { | |||
|     fn increment(&self, key: &[u8]) -> Result<Vec<u8>> { | ||||
|         let guard = self.engine.write_lock(); | ||||
| 
 | ||||
|         let start = Instant::now(); | ||||
| 
 | ||||
|         let old = self.get_with_guard(&guard, key)?; | ||||
| 
 | ||||
|         let new = | ||||
|  | @ -335,26 +354,11 @@ impl Tree for SqliteTable { | |||
| 
 | ||||
|         self.insert_with_guard(&guard, key, &new)?; | ||||
| 
 | ||||
|         let elapsed = start.elapsed(); | ||||
| 
 | ||||
|         if elapsed > MILLI { | ||||
|             debug!("increment: took {:012?} : {}", elapsed, &self.name); | ||||
|         } | ||||
|         // debug!("increment key: {:?}", &key);
 | ||||
| 
 | ||||
|         Ok(new) | ||||
|     } | ||||
| 
 | ||||
|     #[tracing::instrument(skip(self, prefix))] | ||||
|     fn scan_prefix<'a>(&'a self, prefix: Vec<u8>) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> { | ||||
|         // let name = self.name.clone();
 | ||||
|         // self.iter_from_thread(
 | ||||
|         //     format!(
 | ||||
|         //         "SELECT key, value FROM {} WHERE key BETWEEN ?1 AND ?1 || X'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' ORDER BY key ASC",
 | ||||
|         //         name
 | ||||
|         //     )
 | ||||
|         //     [prefix]
 | ||||
|         // )
 | ||||
|         Box::new( | ||||
|             self.iter_from(&prefix, false) | ||||
|                 .take_while(move |(key, _)| key.starts_with(&prefix)), | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -4,11 +4,14 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; | |||
| use ruma::{ | ||||
|     api::client::{ | ||||
|         error::ErrorKind, | ||||
|         r0::uiaa::{IncomingAuthData, UiaaInfo}, | ||||
|         r0::uiaa::{ | ||||
|             IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, | ||||
|         }, | ||||
|     }, | ||||
|     signatures::CanonicalJsonValue, | ||||
|     DeviceId, UserId, | ||||
| }; | ||||
| use tracing::error; | ||||
| 
 | ||||
| use super::abstraction::Tree; | ||||
| 
 | ||||
|  | @ -49,14 +52,8 @@ impl Uiaa { | |||
|         users: &super::users::Users, | ||||
|         globals: &super::globals::Globals, | ||||
|     ) -> Result<(bool, UiaaInfo)> { | ||||
|         if let IncomingAuthData::DirectRequest { | ||||
|             kind, | ||||
|             session, | ||||
|             auth_parameters, | ||||
|         } = &auth | ||||
|         { | ||||
|             let mut uiaainfo = session | ||||
|                 .as_ref() | ||||
|         let mut uiaainfo = auth | ||||
|             .session() | ||||
|             .map(|session| self.get_uiaa_session(&user_id, &device_id, session)) | ||||
|             .unwrap_or_else(|| Ok(uiaainfo.clone()))?; | ||||
| 
 | ||||
|  | @ -64,55 +61,29 @@ impl Uiaa { | |||
|             uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); | ||||
|         } | ||||
| 
 | ||||
|         match auth { | ||||
|             // Find out what the user completed
 | ||||
|             match &**kind { | ||||
|                 "m.login.password" => { | ||||
|                     let identifier = auth_parameters.get("identifier").ok_or(Error::BadRequest( | ||||
|                         ErrorKind::MissingParam, | ||||
|                         "m.login.password needs identifier.", | ||||
|                     ))?; | ||||
| 
 | ||||
|                     let identifier_type = identifier.get("type").ok_or(Error::BadRequest( | ||||
|                         ErrorKind::MissingParam, | ||||
|                         "Identifier needs a type.", | ||||
|                     ))?; | ||||
| 
 | ||||
|                     if identifier_type != "m.id.user" { | ||||
|             IncomingAuthData::Password(IncomingPassword { | ||||
|                 identifier, | ||||
|                 password, | ||||
|                 .. | ||||
|             }) => { | ||||
|                 let username = match identifier { | ||||
|                     MatrixId(username) => username, | ||||
|                     _ => { | ||||
|                         return Err(Error::BadRequest( | ||||
|                             ErrorKind::Unrecognized, | ||||
|                             "Identifier type not recognized.", | ||||
|                         )); | ||||
|                         )) | ||||
|                     } | ||||
|                 }; | ||||
| 
 | ||||
|                     let username = identifier | ||||
|                         .get("user") | ||||
|                         .ok_or(Error::BadRequest( | ||||
|                             ErrorKind::MissingParam, | ||||
|                             "Identifier needs user field.", | ||||
|                         ))? | ||||
|                         .as_str() | ||||
|                         .ok_or(Error::BadRequest( | ||||
|                             ErrorKind::BadJson, | ||||
|                             "User is not a string.", | ||||
|                         ))?; | ||||
| 
 | ||||
|                     let user_id = UserId::parse_with_server_name(username, globals.server_name()) | ||||
|                 let user_id = | ||||
|                     UserId::parse_with_server_name(username.clone(), globals.server_name()) | ||||
|                         .map_err(|_| { | ||||
|                             Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") | ||||
|                         })?; | ||||
| 
 | ||||
|                     let password = auth_parameters | ||||
|                         .get("password") | ||||
|                         .ok_or(Error::BadRequest( | ||||
|                             ErrorKind::MissingParam, | ||||
|                             "Password is missing.", | ||||
|                         ))? | ||||
|                         .as_str() | ||||
|                         .ok_or(Error::BadRequest( | ||||
|                             ErrorKind::BadJson, | ||||
|                             "Password is not a string.", | ||||
|                         ))?; | ||||
| 
 | ||||
|                 // Check if password is correct
 | ||||
|                 if let Some(hash) = users.password_hash(&user_id)? { | ||||
|                     let hash_matches = | ||||
|  | @ -130,10 +101,10 @@ impl Uiaa { | |||
|                 // Password was correct! Let's add it to `completed`
 | ||||
|                 uiaainfo.completed.push("m.login.password".to_owned()); | ||||
|             } | ||||
|                 "m.login.dummy" => { | ||||
|             IncomingAuthData::Dummy(_) => { | ||||
|                 uiaainfo.completed.push("m.login.dummy".to_owned()); | ||||
|             } | ||||
|                 k => panic!("type not supported: {}", k), | ||||
|             k => error!("type not supported: {:?}", k), | ||||
|         } | ||||
| 
 | ||||
|         // Check if a flow now succeeds
 | ||||
|  | @ -166,9 +137,6 @@ impl Uiaa { | |||
|             None, | ||||
|         )?; | ||||
|         Ok((true, uiaainfo)) | ||||
|         } else { | ||||
|             panic!("FallbackAcknowledgement is not supported yet"); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn set_uiaa_request( | ||||
|  |  | |||
							
								
								
									
										11
									
								
								src/main.rs
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								src/main.rs
									
									
									
									
									
								
							|  | @ -17,7 +17,7 @@ use std::sync::Arc; | |||
| use database::Config; | ||||
| pub use database::Database; | ||||
| pub use error::{Error, Result}; | ||||
| use opentelemetry::trace::Tracer; | ||||
| use opentelemetry::trace::{FutureExt, Tracer}; | ||||
| pub use pdu::PduEvent; | ||||
| pub use rocket::State; | ||||
| use ruma::api::client::error::ErrorKind; | ||||
|  | @ -220,14 +220,17 @@ async fn main() { | |||
|     }; | ||||
| 
 | ||||
|     if config.allow_jaeger { | ||||
|         opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); | ||||
|         let tracer = opentelemetry_jaeger::new_pipeline() | ||||
|             .with_service_name("conduit") | ||||
|             .install_simple() | ||||
|             .install_batch(opentelemetry::runtime::Tokio) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let span = tracer.start("conduit"); | ||||
|         start.await; | ||||
|         start.with_current_context().await; | ||||
|         drop(span); | ||||
| 
 | ||||
|         println!("exporting"); | ||||
|         opentelemetry::global::shutdown_tracer_provider(); | ||||
|     } else { | ||||
|         std::env::set_var("RUST_LOG", &config.log); | ||||
| 
 | ||||
|  |  | |||
|  | @ -12,7 +12,7 @@ use ruma::{ | |||
| use serde::{Deserialize, Serialize}; | ||||
| use serde_json::json; | ||||
| use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; | ||||
| use tracing::error; | ||||
| use tracing::warn; | ||||
| 
 | ||||
| #[derive(Clone, Deserialize, Serialize, Debug)] | ||||
| pub struct PduEvent { | ||||
|  | @ -322,7 +322,7 @@ pub(crate) fn gen_event_id_canonical_json( | |||
|     pdu: &Raw<ruma::events::pdu::Pdu>, | ||||
| ) -> crate::Result<(EventId, CanonicalJsonObject)> { | ||||
|     let value = serde_json::from_str(pdu.json().get()).map_err(|e| { | ||||
|         error!("{:?}: {:?}", pdu, e); | ||||
|         warn!("Error parsing incoming event {:?}: {:?}", pdu, e); | ||||
|         Error::BadServerResponse("Invalid PDU in server response") | ||||
|     })?; | ||||
| 
 | ||||
|  |  | |||
|  | @ -111,7 +111,7 @@ impl FedDest { | |||
|     } | ||||
| } | ||||
| 
 | ||||
| #[tracing::instrument(skip(globals))] | ||||
| #[tracing::instrument(skip(globals, request))] | ||||
| pub async fn send_request<T: OutgoingRequest>( | ||||
|     globals: &crate::database::globals::Globals, | ||||
|     destination: &ServerName, | ||||
|  | @ -254,7 +254,7 @@ where | |||
|             }); // TODO: handle timeout
 | ||||
| 
 | ||||
|             if status != 200 { | ||||
|                 info!( | ||||
|                 warn!( | ||||
|                     "{} {}: {}", | ||||
|                     url, | ||||
|                     status, | ||||
|  | @ -272,14 +272,20 @@ where | |||
|             if status == 200 { | ||||
|                 let response = T::IncomingResponse::try_from_http_response(http_response); | ||||
|                 response.map_err(|e| { | ||||
|                     warn!("Invalid 200 response from {}: {}", &destination, e); | ||||
|                     warn!( | ||||
|                         "Invalid 200 response from {} on: {} {}", | ||||
|                         &destination, url, e | ||||
|                     ); | ||||
|                     Error::BadServerResponse("Server returned bad 200 response.") | ||||
|                 }) | ||||
|             } else { | ||||
|                 Err(Error::FederationError( | ||||
|                     destination.to_owned(), | ||||
|                     RumaError::try_from_http_response(http_response).map_err(|e| { | ||||
|                         warn!("Server returned bad error response: {}", e); | ||||
|                         warn!( | ||||
|                             "Invalid {} response from {} on: {} {}", | ||||
|                             status, &destination, url, e | ||||
|                         ); | ||||
|                         Error::BadServerResponse("Server returned bad error response.") | ||||
|                     })?, | ||||
|                 )) | ||||
|  | @ -495,7 +501,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json<String> { | |||
|     ) | ||||
|     .unwrap(); | ||||
| 
 | ||||
|     Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical")) | ||||
|     Json(serde_json::to_string(&response).expect("JSON is canonical")) | ||||
| } | ||||
| 
 | ||||
| #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] | ||||
|  | @ -668,7 +674,7 @@ pub async fn send_transaction_message_route( | |||
| 
 | ||||
|         let elapsed = start_time.elapsed(); | ||||
|         warn!( | ||||
|             "Handling event {} took {}m{}s", | ||||
|             "Handling transaction of event {} took {}m{}s", | ||||
|             event_id, | ||||
|             elapsed.as_secs() / 60, | ||||
|             elapsed.as_secs() % 60 | ||||
|  | @ -721,7 +727,8 @@ pub async fn send_transaction_message_route( | |||
|                                 &db.globals, | ||||
|                             )?; | ||||
|                         } else { | ||||
|                             warn!("No known event ids in read receipt: {:?}", user_updates); | ||||
|                             // TODO fetch missing events
 | ||||
|                             debug!("No known event ids in read receipt: {:?}", user_updates); | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|  | @ -839,7 +846,7 @@ type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>; | |||
| /// 14. Use state resolution to find new room state
 | ||||
| // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively
 | ||||
| #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] | ||||
| pub fn handle_incoming_pdu<'a>( | ||||
| pub async fn handle_incoming_pdu<'a>( | ||||
|     origin: &'a ServerName, | ||||
|     event_id: &'a EventId, | ||||
|     room_id: &'a RoomId, | ||||
|  | @ -847,9 +854,7 @@ pub fn handle_incoming_pdu<'a>( | |||
|     is_timeline_event: bool, | ||||
|     db: &'a Database, | ||||
|     pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, | ||||
| ) -> AsyncRecursiveType<'a, StdResult<Option<Vec<u8>>, String>> { | ||||
|     Box::pin(async move { | ||||
|         // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
 | ||||
| ) -> StdResult<Option<Vec<u8>>, String> { | ||||
|     match db.rooms.exists(&room_id) { | ||||
|         Ok(true) => {} | ||||
|         _ => { | ||||
|  | @ -862,6 +867,162 @@ pub fn handle_incoming_pdu<'a>( | |||
|         return Ok(Some(pdu_id.to_vec())); | ||||
|     } | ||||
| 
 | ||||
|     let create_event = db | ||||
|         .rooms | ||||
|         .room_state_get(&room_id, &EventType::RoomCreate, "") | ||||
|         .map_err(|_| "Failed to ask database for event.".to_owned())? | ||||
|         .ok_or_else(|| "Failed to find create event in db.".to_owned())?; | ||||
| 
 | ||||
|     let (incoming_pdu, val) = handle_outlier_pdu( | ||||
|         origin, | ||||
|         &create_event, | ||||
|         event_id, | ||||
|         room_id, | ||||
|         value, | ||||
|         db, | ||||
|         pub_key_map, | ||||
|     ) | ||||
|     .await?; | ||||
| 
 | ||||
|     // 8. if not timeline event: stop
 | ||||
|     if !is_timeline_event { | ||||
|         return Ok(None); | ||||
|     } | ||||
| 
 | ||||
|     // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
 | ||||
|     let mut graph = HashMap::new(); | ||||
|     let mut eventid_info = HashMap::new(); | ||||
|     let mut todo_outlier_stack = incoming_pdu.prev_events.clone(); | ||||
| 
 | ||||
|     let mut amount = 0; | ||||
| 
 | ||||
|     while let Some(prev_event_id) = todo_outlier_stack.pop() { | ||||
|         if let Some((pdu, json_opt)) = fetch_and_handle_outliers( | ||||
|             db, | ||||
|             origin, | ||||
|             &[prev_event_id.clone()], | ||||
|             &create_event, | ||||
|             &room_id, | ||||
|             pub_key_map, | ||||
|         ) | ||||
|         .await | ||||
|         .pop() | ||||
|         { | ||||
|             if amount > 100 { | ||||
|                 // Max limit reached
 | ||||
|                 warn!("Max prev event limit reached!"); | ||||
|                 graph.insert(prev_event_id.clone(), HashSet::new()); | ||||
|                 continue; | ||||
|             } | ||||
| 
 | ||||
|             if let Some(json) = | ||||
|                 json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) | ||||
|             { | ||||
|                 if pdu.origin_server_ts | ||||
|                     > db.rooms | ||||
|                         .first_pdu_in_room(&room_id) | ||||
|                         .map_err(|_| "Error loading first room event.".to_owned())? | ||||
|                         .expect("Room exists") | ||||
|                         .origin_server_ts | ||||
|                 { | ||||
|                     amount += 1; | ||||
|                     for prev_prev in &pdu.prev_events { | ||||
|                         if !graph.contains_key(prev_prev) { | ||||
|                             todo_outlier_stack.push(dbg!(prev_prev.clone())); | ||||
|                         } | ||||
|                     } | ||||
| 
 | ||||
|                     graph.insert( | ||||
|                         prev_event_id.clone(), | ||||
|                         pdu.prev_events.iter().cloned().collect(), | ||||
|                     ); | ||||
|                     eventid_info.insert(prev_event_id.clone(), (pdu, json)); | ||||
|                 } else { | ||||
|                     // Time based check failed
 | ||||
|                     graph.insert(prev_event_id.clone(), HashSet::new()); | ||||
|                     eventid_info.insert(prev_event_id.clone(), (pdu, json)); | ||||
|                 } | ||||
|             } else { | ||||
|                 // Get json failed
 | ||||
|                 graph.insert(prev_event_id.clone(), HashSet::new()); | ||||
|             } | ||||
|         } else { | ||||
|             // Fetch and handle failed
 | ||||
|             graph.insert(prev_event_id.clone(), HashSet::new()); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     let sorted = | ||||
|         state_res::StateResolution::lexicographical_topological_sort(dbg!(&graph), |event_id| { | ||||
|             // This return value is the key used for sorting events,
 | ||||
|             // events are then sorted by power level, time,
 | ||||
|             // and lexically by event_id.
 | ||||
|             println!("{}", event_id); | ||||
|             Ok(( | ||||
|                 0, | ||||
|                 MilliSecondsSinceUnixEpoch( | ||||
|                     eventid_info | ||||
|                         .get(event_id) | ||||
|                         .map_or_else(|| uint!(0), |info| info.0.origin_server_ts.clone()), | ||||
|                 ), | ||||
|                 ruma::event_id!("$notimportant"), | ||||
|             )) | ||||
|         }) | ||||
|         .map_err(|_| "Error sorting prev events".to_owned())?; | ||||
| 
 | ||||
|     for prev_id in dbg!(sorted) { | ||||
|         if let Some((pdu, json)) = eventid_info.remove(&prev_id) { | ||||
|             let start_time = Instant::now(); | ||||
|             let event_id = pdu.event_id.clone(); | ||||
|             if let Err(e) = upgrade_outlier_to_timeline_pdu( | ||||
|                 pdu, | ||||
|                 json, | ||||
|                 &create_event, | ||||
|                 origin, | ||||
|                 db, | ||||
|                 room_id, | ||||
|                 pub_key_map, | ||||
|             ) | ||||
|             .await | ||||
|             { | ||||
|                 warn!("Prev event {} failed: {}", event_id, e); | ||||
|             } | ||||
|             let elapsed = start_time.elapsed(); | ||||
|             warn!( | ||||
|                 "Handling prev event {} took {}m{}s", | ||||
|                 event_id, | ||||
|                 elapsed.as_secs() / 60, | ||||
|                 elapsed.as_secs() % 60 | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     upgrade_outlier_to_timeline_pdu( | ||||
|         incoming_pdu, | ||||
|         val, | ||||
|         &create_event, | ||||
|         origin, | ||||
|         db, | ||||
|         room_id, | ||||
|         pub_key_map, | ||||
|     ) | ||||
|     .await | ||||
| } | ||||
| 
 | ||||
| #[tracing::instrument(skip(origin, create_event, event_id, room_id, value, db, pub_key_map))] | ||||
| fn handle_outlier_pdu<'a>( | ||||
|     origin: &'a ServerName, | ||||
|     create_event: &'a PduEvent, | ||||
|     event_id: &'a EventId, | ||||
|     room_id: &'a RoomId, | ||||
|     value: BTreeMap<String, CanonicalJsonValue>, | ||||
|     db: &'a Database, | ||||
|     pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, | ||||
| ) -> AsyncRecursiveType<'a, StdResult<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>), String>> | ||||
| { | ||||
|     Box::pin(async move { | ||||
|         // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
 | ||||
| 
 | ||||
|         // We go through all the signatures we see on the value and fetch the corresponding signing
 | ||||
|         // keys
 | ||||
|         fetch_required_signing_keys(&value, &pub_key_map, db) | ||||
|  | @ -870,11 +1031,6 @@ pub fn handle_incoming_pdu<'a>( | |||
| 
 | ||||
|         // 2. Check signatures, otherwise drop
 | ||||
|         // 3. check content hash, redact if doesn't match
 | ||||
|         let create_event = db | ||||
|             .rooms | ||||
|             .room_state_get(&room_id, &EventType::RoomCreate, "") | ||||
|             .map_err(|_| "Failed to ask database for event.".to_owned())? | ||||
|             .ok_or_else(|| "Failed to find create event in db.".to_owned())?; | ||||
| 
 | ||||
|         let create_event_content = | ||||
|             serde_json::from_value::<Raw<CreateEventContent>>(create_event.content.clone()) | ||||
|  | @ -921,13 +1077,13 @@ pub fn handle_incoming_pdu<'a>( | |||
|         // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
 | ||||
|         // EDIT: Step 5 is not applied anymore because it failed too often
 | ||||
|         debug!("Fetching auth events for {}", incoming_pdu.event_id); | ||||
|         fetch_and_handle_events( | ||||
|         fetch_and_handle_outliers( | ||||
|             db, | ||||
|             origin, | ||||
|             &incoming_pdu.auth_events, | ||||
|             &create_event, | ||||
|             &room_id, | ||||
|             pub_key_map, | ||||
|             false, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|  | @ -1010,31 +1166,23 @@ pub fn handle_incoming_pdu<'a>( | |||
|             .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; | ||||
|         debug!("Added pdu as outlier."); | ||||
| 
 | ||||
|         // 8. if not timeline event: stop
 | ||||
|         if !is_timeline_event | ||||
|             || incoming_pdu.origin_server_ts | ||||
|                 < db.rooms | ||||
|                     .first_pdu_in_room(&room_id) | ||||
|                     .map_err(|_| "Error loading first room event.".to_owned())? | ||||
|                     .expect("Room exists") | ||||
|                     .origin_server_ts | ||||
|         { | ||||
|             return Ok(None); | ||||
|         Ok((incoming_pdu, val)) | ||||
|     }) | ||||
| } | ||||
| 
 | ||||
| #[tracing::instrument(skip(incoming_pdu, val, create_event, origin, db, room_id, pub_key_map))] | ||||
| async fn upgrade_outlier_to_timeline_pdu( | ||||
|     incoming_pdu: Arc<PduEvent>, | ||||
|     val: BTreeMap<String, CanonicalJsonValue>, | ||||
|     create_event: &PduEvent, | ||||
|     origin: &ServerName, | ||||
|     db: &Database, | ||||
|     room_id: &RoomId, | ||||
|     pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, | ||||
| ) -> StdResult<Option<Vec<u8>>, String> { | ||||
|     if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { | ||||
|         return Ok(Some(pduid)); | ||||
|     } | ||||
| 
 | ||||
|         // Load missing prev events first
 | ||||
|         fetch_and_handle_events( | ||||
|             db, | ||||
|             origin, | ||||
|             &incoming_pdu.prev_events, | ||||
|             &room_id, | ||||
|             pub_key_map, | ||||
|             true, | ||||
|         ) | ||||
|         .await; | ||||
| 
 | ||||
|         // TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
 | ||||
| 
 | ||||
|     // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
 | ||||
|     //     doing all the checks in this list starting at 1. These are not timeline events.
 | ||||
| 
 | ||||
|  | @ -1046,24 +1194,27 @@ pub fn handle_incoming_pdu<'a>( | |||
| 
 | ||||
|     if incoming_pdu.prev_events.len() == 1 { | ||||
|         let prev_event = &incoming_pdu.prev_events[0]; | ||||
|             let state = db | ||||
|         let prev_event_sstatehash = db | ||||
|             .rooms | ||||
|             .pdu_shortstatehash(prev_event) | ||||
|                 .map_err(|_| "Failed talking to db".to_owned())? | ||||
|                 .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) | ||||
|                 .flatten(); | ||||
|             if let Some(state) = state { | ||||
|                 let mut state = fetch_and_handle_events( | ||||
|             .map_err(|_| "Failed talking to db".to_owned())?; | ||||
| 
 | ||||
|         let state = | ||||
|             prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); | ||||
| 
 | ||||
|         if let Some(Ok(state)) = state { | ||||
|             warn!("Using cached state"); | ||||
|             let mut state = fetch_and_handle_outliers( | ||||
|                 db, | ||||
|                 origin, | ||||
|                 &state.into_iter().collect::<Vec<_>>(), | ||||
|                 &create_event, | ||||
|                 &room_id, | ||||
|                 pub_key_map, | ||||
|                     false, | ||||
|             ) | ||||
|             .await | ||||
|             .into_iter() | ||||
|                 .map(|pdu| { | ||||
|             .map(|(pdu, _)| { | ||||
|                 ( | ||||
|                     ( | ||||
|                         pdu.kind.clone(), | ||||
|  | @ -1076,7 +1227,8 @@ pub fn handle_incoming_pdu<'a>( | |||
|             }) | ||||
|             .collect::<HashMap<_, _>>(); | ||||
| 
 | ||||
|                 let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { | ||||
|             let prev_pdu = | ||||
|                 db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { | ||||
|                     "Could not find prev event, but we know the state.".to_owned() | ||||
|                 })?; | ||||
| 
 | ||||
|  | @ -1090,6 +1242,7 @@ pub fn handle_incoming_pdu<'a>( | |||
|     } | ||||
| 
 | ||||
|     if state_at_incoming_event.is_none() { | ||||
|         warn!("Calling /state_ids"); | ||||
|         // Call /state_ids to find out what the state at this pdu is. We trust the server's
 | ||||
|         // response to some extend, but we still do a lot of checks on the events
 | ||||
|         match db | ||||
|  | @ -1106,27 +1259,31 @@ pub fn handle_incoming_pdu<'a>( | |||
|         { | ||||
|             Ok(res) => { | ||||
|                 debug!("Fetching state events at event."); | ||||
|                     let state_vec = fetch_and_handle_events( | ||||
|                 let state_vec = fetch_and_handle_outliers( | ||||
|                     &db, | ||||
|                     origin, | ||||
|                     &res.pdu_ids, | ||||
|                     &create_event, | ||||
|                     &room_id, | ||||
|                     pub_key_map, | ||||
|                         false, | ||||
|                 ) | ||||
|                 .await; | ||||
| 
 | ||||
|                 let mut state = HashMap::new(); | ||||
|                     for pdu in state_vec { | ||||
|                         match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) { | ||||
|                 for (pdu, _) in state_vec { | ||||
|                     match state.entry(( | ||||
|                         pdu.kind.clone(), | ||||
|                         pdu.state_key | ||||
|                             .clone() | ||||
|                             .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?, | ||||
|                     )) { | ||||
|                         Entry::Vacant(v) => { | ||||
|                             v.insert(pdu); | ||||
|                         } | ||||
|                             Entry::Occupied(_) => { | ||||
|                                 return Err( | ||||
|                                     "State event's type and state_key combination exists multiple times.".to_owned(), | ||||
|                                 ) | ||||
|                             } | ||||
|                         Entry::Occupied(_) => return Err( | ||||
|                             "State event's type and state_key combination exists multiple times." | ||||
|                                 .to_owned(), | ||||
|                         ), | ||||
|                     } | ||||
|                 } | ||||
| 
 | ||||
|  | @ -1140,13 +1297,13 @@ pub fn handle_incoming_pdu<'a>( | |||
|                 } | ||||
| 
 | ||||
|                 debug!("Fetching auth chain events at event."); | ||||
|                     fetch_and_handle_events( | ||||
|                 fetch_and_handle_outliers( | ||||
|                     &db, | ||||
|                     origin, | ||||
|                     &res.auth_chain_ids, | ||||
|                     &create_event, | ||||
|                     &room_id, | ||||
|                     pub_key_map, | ||||
|                         false, | ||||
|                 ) | ||||
|                 .await; | ||||
| 
 | ||||
|  | @ -1162,6 +1319,27 @@ pub fn handle_incoming_pdu<'a>( | |||
|         state_at_incoming_event.expect("we always set this to some above"); | ||||
| 
 | ||||
|     // 11. Check the auth of the event passes based on the state of the event
 | ||||
|     let create_event_content = | ||||
|         serde_json::from_value::<Raw<CreateEventContent>>(create_event.content.clone()) | ||||
|             .expect("Raw::from_value always works.") | ||||
|             .deserialize() | ||||
|             .map_err(|_| "Invalid PowerLevels event in db.".to_owned())?; | ||||
| 
 | ||||
|     let room_version_id = &create_event_content.room_version; | ||||
|     let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); | ||||
| 
 | ||||
|     // If the previous event was the create event special rules apply
 | ||||
|     let previous_create = if incoming_pdu.auth_events.len() == 1 | ||||
|         && incoming_pdu.prev_events == incoming_pdu.auth_events | ||||
|     { | ||||
|         db.rooms | ||||
|             .get_pdu(&incoming_pdu.auth_events[0]) | ||||
|             .map_err(|e| e.to_string())? | ||||
|             .filter(|maybe_create| **maybe_create == *create_event) | ||||
|     } else { | ||||
|         None | ||||
|     }; | ||||
| 
 | ||||
|     if !state_res::event_auth::auth_check( | ||||
|         &room_version, | ||||
|         &incoming_pdu, | ||||
|  | @ -1204,6 +1382,18 @@ pub fn handle_incoming_pdu<'a>( | |||
|     // Only keep those extremities were not referenced yet
 | ||||
|     extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); | ||||
| 
 | ||||
|     let current_statehash = db | ||||
|         .rooms | ||||
|         .current_shortstatehash(&room_id) | ||||
|         .map_err(|_| "Failed to load current state hash.".to_owned())? | ||||
|         .expect("every room has state"); | ||||
| 
 | ||||
|     let current_state = db | ||||
|         .rooms | ||||
|         .state_full(current_statehash) | ||||
|         .map_err(|_| "Failed to load room state.")?; | ||||
| 
 | ||||
|     if incoming_pdu.state_key.is_some() { | ||||
|         let mut extremity_statehashes = Vec::new(); | ||||
| 
 | ||||
|         for id in &extremities { | ||||
|  | @ -1239,16 +1429,6 @@ pub fn handle_incoming_pdu<'a>( | |||
|         //     don't just trust a set of state we got from a remote).
 | ||||
| 
 | ||||
|         // We do this by adding the current state to the list of fork states
 | ||||
|         let current_statehash = db | ||||
|             .rooms | ||||
|             .current_shortstatehash(&room_id) | ||||
|             .map_err(|_| "Failed to load current state hash.".to_owned())? | ||||
|             .expect("every room has state"); | ||||
| 
 | ||||
|         let current_state = db | ||||
|             .rooms | ||||
|             .state_full(current_statehash) | ||||
|             .map_err(|_| "Failed to load room state.")?; | ||||
| 
 | ||||
|         extremity_statehashes.push((current_statehash.clone(), None)); | ||||
| 
 | ||||
|  | @ -1271,7 +1451,6 @@ pub fn handle_incoming_pdu<'a>( | |||
|         } | ||||
| 
 | ||||
|         // We also add state after incoming event to the fork states
 | ||||
|         extremities.insert(incoming_pdu.event_id.clone()); | ||||
|         let mut state_after = state_at_incoming_event.clone(); | ||||
|         if let Some(state_key) = &incoming_pdu.state_key { | ||||
|             state_after.insert( | ||||
|  | @ -1309,7 +1488,8 @@ pub fn handle_incoming_pdu<'a>( | |||
|             for state in fork_states { | ||||
|                 auth_chain_sets.push( | ||||
|                     get_auth_chain(state.iter().map(|(_, id)| id.clone()).collect(), db) | ||||
|                         .map_err(|_| "Failed to load auth chain.".to_owned())?, | ||||
|                         .map_err(|_| "Failed to load auth chain.".to_owned())? | ||||
|                         .collect(), | ||||
|                 ); | ||||
|             } | ||||
| 
 | ||||
|  | @ -1335,6 +1515,17 @@ pub fn handle_incoming_pdu<'a>( | |||
|             state | ||||
|         }; | ||||
| 
 | ||||
|         // Set the new room state to the resolved state
 | ||||
|         if update_state { | ||||
|             db.rooms | ||||
|                 .force_state(&room_id, new_room_state, &db) | ||||
|                 .map_err(|_| "Failed to set new room state.".to_owned())?; | ||||
|         } | ||||
|         debug!("Updated resolved state"); | ||||
|     } | ||||
| 
 | ||||
|     extremities.insert(incoming_pdu.event_id.clone()); | ||||
| 
 | ||||
|     debug!("starting soft fail auth check"); | ||||
|     // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
 | ||||
|     let soft_fail = !state_res::event_auth::auth_check( | ||||
|  | @ -1367,14 +1558,6 @@ pub fn handle_incoming_pdu<'a>( | |||
|         warn!("Event was soft failed: {:?}", incoming_pdu); | ||||
|     } | ||||
| 
 | ||||
|         // Set the new room state to the resolved state
 | ||||
|         if update_state { | ||||
|             db.rooms | ||||
|                 .force_state(&room_id, new_room_state, &db) | ||||
|                 .map_err(|_| "Failed to set new room state.".to_owned())?; | ||||
|         } | ||||
|         debug!("Updated resolved state"); | ||||
| 
 | ||||
|     if soft_fail { | ||||
|         // Soft fail, we leave the event as an outlier but don't add it to the timeline
 | ||||
|         return Err("Event has been soft failed".into()); | ||||
|  | @ -1383,25 +1566,26 @@ pub fn handle_incoming_pdu<'a>( | |||
|     // Event has passed all auth/stateres checks
 | ||||
|     drop(state_lock); | ||||
|     Ok(pdu_id) | ||||
|     }) | ||||
| } | ||||
| 
 | ||||
| /// Find the event and auth it. Once the event is validated (steps 1 - 8)
 | ||||
| /// it is appended to the outliers Tree.
 | ||||
| ///
 | ||||
| /// Returns pdu and if we fetched it over federation the raw json.
 | ||||
| ///
 | ||||
| /// a. Look in the main timeline (pduid_pdu tree)
 | ||||
| /// b. Look at outlier pdu tree
 | ||||
| /// c. Ask origin server over federation
 | ||||
| /// d. TODO: Ask other servers over federation?
 | ||||
| //#[tracing::instrument(skip(db, key_map, auth_cache))]
 | ||||
| pub(crate) fn fetch_and_handle_events<'a>( | ||||
| #[tracing::instrument(skip(db, origin, events, create_event, room_id, pub_key_map))] | ||||
| pub(crate) fn fetch_and_handle_outliers<'a>( | ||||
|     db: &'a Database, | ||||
|     origin: &'a ServerName, | ||||
|     events: &'a [EventId], | ||||
|     create_event: &'a PduEvent, | ||||
|     room_id: &'a RoomId, | ||||
|     pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, | ||||
|     are_timeline_events: bool, | ||||
| ) -> AsyncRecursiveType<'a, Vec<Arc<PduEvent>>> { | ||||
| ) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> { | ||||
|     Box::pin(async move { | ||||
|         let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { | ||||
|             Entry::Vacant(e) => { | ||||
|  | @ -1412,35 +1596,32 @@ pub(crate) fn fetch_and_handle_events<'a>( | |||
| 
 | ||||
|         let mut pdus = vec![]; | ||||
|         for id in events { | ||||
|             info!("loading {}", id); | ||||
|             if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { | ||||
|                 // Exponential backoff
 | ||||
|                 let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); | ||||
|                 let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); | ||||
|                 if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { | ||||
|                     min_elapsed_duration = Duration::from_secs(60 * 60 * 24); | ||||
|                 } | ||||
| 
 | ||||
|                 if time.elapsed() < min_elapsed_duration { | ||||
|                     debug!("Backing off from {}", id); | ||||
|                     info!("Backing off from {}", id); | ||||
|                     continue; | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             // a. Look in the main timeline (pduid_pdu tree)
 | ||||
|             // b. Look at outlier pdu tree
 | ||||
|             // (get_pdu checks both)
 | ||||
|             let local_pdu = if are_timeline_events { | ||||
|                 db.rooms.get_non_outlier_pdu(&id).map(|o| o.map(Arc::new)) | ||||
|             } else { | ||||
|                 db.rooms.get_pdu(&id) | ||||
|             }; | ||||
|             // (get_pdu_json checks both)
 | ||||
|             let local_pdu = db.rooms.get_pdu(&id); | ||||
|             let pdu = match local_pdu { | ||||
|                 Ok(Some(pdu)) => { | ||||
|                     trace!("Found {} in db", id); | ||||
|                     pdu | ||||
|                     (pdu, None) | ||||
|                 } | ||||
|                 Ok(None) => { | ||||
|                     // c. Ask origin server over federation
 | ||||
|                     debug!("Fetching {} over federation.", id); | ||||
|                     info!("Fetching {} over federation.", id); | ||||
|                     match db | ||||
|                         .sending | ||||
|                         .send_federation_request( | ||||
|  | @ -1451,41 +1632,29 @@ pub(crate) fn fetch_and_handle_events<'a>( | |||
|                         .await | ||||
|                     { | ||||
|                         Ok(res) => { | ||||
|                             debug!("Got {} over federation", id); | ||||
|                             let (event_id, mut value) = | ||||
|                             info!("Got {} over federation", id); | ||||
|                             let (event_id, value) = | ||||
|                                 match crate::pdu::gen_event_id_canonical_json(&res.pdu) { | ||||
|                                     Ok(t) => t, | ||||
|                                     Err(_) => continue, | ||||
|                                     Err(_) => { | ||||
|                                         back_off(id.clone()); | ||||
|                                         continue; | ||||
|                                     } | ||||
|                                 }; | ||||
| 
 | ||||
|                             // This will also fetch the auth chain
 | ||||
|                             match handle_incoming_pdu( | ||||
|                             match handle_outlier_pdu( | ||||
|                                 origin, | ||||
|                                 create_event, | ||||
|                                 &event_id, | ||||
|                                 &room_id, | ||||
|                                 value.clone(), | ||||
|                                 are_timeline_events, | ||||
|                                 db, | ||||
|                                 pub_key_map, | ||||
|                             ) | ||||
|                             .await | ||||
|                             { | ||||
|                                 Ok(_) => { | ||||
|                                     value.insert( | ||||
|                                         "event_id".to_owned(), | ||||
|                                         CanonicalJsonValue::String(event_id.into()), | ||||
|                                     ); | ||||
| 
 | ||||
|                                     Arc::new( | ||||
|                                         serde_json::from_value( | ||||
|                                             serde_json::to_value(value) | ||||
|                                                 .expect("canonicaljsonobject is valid value"), | ||||
|                                         ) | ||||
|                                         .expect( | ||||
|                                             "This is possible because handle_incoming_pdu worked", | ||||
|                                         ), | ||||
|                                     ) | ||||
|                                 } | ||||
|                                 Ok((pdu, json)) => (pdu, Some(json)), | ||||
|                                 Err(e) => { | ||||
|                                     warn!("Authentication of event {} failed: {:?}", id, e); | ||||
|                                     back_off(id.clone()); | ||||
|  | @ -1501,7 +1670,7 @@ pub(crate) fn fetch_and_handle_events<'a>( | |||
|                     } | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     debug!("Error loading {}: {}", id, e); | ||||
|                     warn!("Error loading {}: {}", id, e); | ||||
|                     continue; | ||||
|                 } | ||||
|             }; | ||||
|  | @ -1513,7 +1682,7 @@ pub(crate) fn fetch_and_handle_events<'a>( | |||
| 
 | ||||
| /// Search the DB for the signing keys of the given server, if we don't have them
 | ||||
| /// fetch them from the server and save to our DB.
 | ||||
| #[tracing::instrument(skip(db))] | ||||
| #[tracing::instrument(skip(db, origin, signature_ids))] | ||||
| pub(crate) async fn fetch_signing_keys( | ||||
|     db: &Database, | ||||
|     origin: &ServerName, | ||||
|  | @ -1684,7 +1853,7 @@ fn append_incoming_pdu( | |||
|     // We append to state before appending the pdu, so we don't have a moment in time with the
 | ||||
|     // pdu without it's state. This is okay because append_pdu can't fail.
 | ||||
|     db.rooms | ||||
|         .set_event_state(&pdu.event_id, state, &db.globals)?; | ||||
|         .set_event_state(&pdu.event_id, &pdu.room_id, state, &db.globals)?; | ||||
| 
 | ||||
|     let pdu_id = db.rooms.append_pdu( | ||||
|         pdu, | ||||
|  | @ -1754,51 +1923,72 @@ fn append_incoming_pdu( | |||
|     Ok(pdu_id) | ||||
| } | ||||
| 
 | ||||
| fn get_auth_chain(starting_events: Vec<EventId>, db: &Database) -> Result<HashSet<EventId>> { | ||||
| #[tracing::instrument(skip(starting_events, db))] | ||||
| fn get_auth_chain( | ||||
|     starting_events: Vec<EventId>, | ||||
|     db: &Database, | ||||
| ) -> Result<impl Iterator<Item = EventId> + '_> { | ||||
|     let mut full_auth_chain = HashSet::new(); | ||||
| 
 | ||||
|     let starting_events = starting_events | ||||
|         .iter() | ||||
|         .map(|id| { | ||||
|             db.rooms | ||||
|                 .get_or_create_shorteventid(id, &db.globals) | ||||
|                 .map(|s| (s, id)) | ||||
|         }) | ||||
|         .collect::<Result<Vec<_>>>()?; | ||||
| 
 | ||||
|     let mut cache = db.rooms.auth_chain_cache(); | ||||
| 
 | ||||
|     for event_id in &starting_events { | ||||
|         if let Some(cached) = cache.get_mut(&[event_id.clone()][..]) { | ||||
|     for (sevent_id, event_id) in starting_events { | ||||
|         if let Some(cached) = cache.get_mut(&sevent_id) { | ||||
|             full_auth_chain.extend(cached.iter().cloned()); | ||||
|         } else { | ||||
|             drop(cache); | ||||
|             let mut auth_chain = HashSet::new(); | ||||
|             get_auth_chain_recursive(&event_id, &mut auth_chain, db)?; | ||||
|             let auth_chain = get_auth_chain_inner(&event_id, db)?; | ||||
|             cache = db.rooms.auth_chain_cache(); | ||||
|             cache.insert(vec![event_id.clone()], auth_chain.clone()); | ||||
|             cache.insert(sevent_id, auth_chain.clone()); | ||||
|             full_auth_chain.extend(auth_chain); | ||||
|         }; | ||||
|     } | ||||
| 
 | ||||
|     Ok(full_auth_chain) | ||||
|     drop(cache); | ||||
| 
 | ||||
|     Ok(full_auth_chain | ||||
|         .into_iter() | ||||
|         .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) | ||||
| } | ||||
| 
 | ||||
| fn get_auth_chain_recursive( | ||||
|     event_id: &EventId, | ||||
|     found: &mut HashSet<EventId>, | ||||
|     db: &Database, | ||||
| ) -> Result<()> { | ||||
|     let r = db.rooms.get_pdu(&event_id); | ||||
|     match r { | ||||
| #[tracing::instrument(skip(event_id, db))] | ||||
| fn get_auth_chain_inner(event_id: &EventId, db: &Database) -> Result<HashSet<u64>> { | ||||
|     let mut todo = vec![event_id.clone()]; | ||||
|     let mut found = HashSet::new(); | ||||
| 
 | ||||
|     while let Some(event_id) = todo.pop() { | ||||
|         match db.rooms.get_pdu(&event_id) { | ||||
|             Ok(Some(pdu)) => { | ||||
|                 for auth_event in &pdu.auth_events { | ||||
|                 if !found.contains(auth_event) { | ||||
|                     found.insert(auth_event.clone()); | ||||
|                     get_auth_chain_recursive(&auth_event, found, db)?; | ||||
|                     let sauthevent = db | ||||
|                         .rooms | ||||
|                         .get_or_create_shorteventid(auth_event, &db.globals)?; | ||||
| 
 | ||||
|                     if !found.contains(&sauthevent) { | ||||
|                         found.insert(sauthevent); | ||||
|                         todo.push(auth_event.clone()); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|             Ok(None) => { | ||||
|             warn!("Could not find pdu mentioned in auth events."); | ||||
|                 warn!("Could not find pdu mentioned in auth events: {}", event_id); | ||||
|             } | ||||
|             Err(e) => { | ||||
|             warn!("Could not load event in auth chain: {}", e); | ||||
|                 warn!("Could not load event in auth chain: {} {}", event_id, e); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     Ok(()) | ||||
|     Ok(found) | ||||
| } | ||||
| 
 | ||||
| #[cfg_attr(
 | ||||
|  | @ -1892,7 +2082,6 @@ pub fn get_event_authorization_route( | |||
| 
 | ||||
|     Ok(get_event_authorization::v1::Response { | ||||
|         auth_chain: auth_chain_ids | ||||
|             .into_iter() | ||||
|             .filter_map(|id| Some(db.rooms.get_pdu_json(&id).ok()??)) | ||||
|             .map(|event| PduEvent::convert_to_outgoing_federation_event(event)) | ||||
|             .collect(), | ||||
|  | @ -1936,7 +2125,6 @@ pub fn get_room_state_route( | |||
| 
 | ||||
|     Ok(get_room_state::v1::Response { | ||||
|         auth_chain: auth_chain_ids | ||||
|             .into_iter() | ||||
|             .map(|id| { | ||||
|                 Ok::<_, Error>(PduEvent::convert_to_outgoing_federation_event( | ||||
|                     db.rooms.get_pdu_json(&id)?.unwrap(), | ||||
|  | @ -1979,7 +2167,7 @@ pub fn get_room_state_ids_route( | |||
|     let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; | ||||
| 
 | ||||
|     Ok(get_room_state_ids::v1::Response { | ||||
|         auth_chain_ids: auth_chain_ids.into_iter().collect(), | ||||
|         auth_chain_ids: auth_chain_ids.collect(), | ||||
|         pdu_ids, | ||||
|     } | ||||
|     .into()) | ||||
|  | @ -2056,6 +2244,7 @@ pub fn create_join_event_template_route( | |||
|         is_direct: None, | ||||
|         membership: MembershipState::Join, | ||||
|         third_party_invite: None, | ||||
|         reason: None, | ||||
|     }) | ||||
|     .expect("member event is valid value"); | ||||
| 
 | ||||
|  | @ -2248,7 +2437,6 @@ pub async fn create_join_event_route( | |||
|     Ok(create_join_event::v2::Response { | ||||
|         room_state: RoomState { | ||||
|             auth_chain: auth_chain_ids | ||||
|                 .iter() | ||||
|                 .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) | ||||
|                 .map(PduEvent::convert_to_outgoing_federation_event) | ||||
|                 .collect(), | ||||
|  | @ -2359,6 +2547,7 @@ pub async fn create_invite_route( | |||
|             &sender, | ||||
|             Some(invite_state), | ||||
|             &db, | ||||
|             true, | ||||
|         )?; | ||||
|     } | ||||
| 
 | ||||
|  | @ -2532,6 +2721,7 @@ pub async fn claim_keys_route( | |||
|     .into()) | ||||
| } | ||||
| 
 | ||||
| #[tracing::instrument(skip(event, pub_key_map, db))] | ||||
| pub async fn fetch_required_signing_keys( | ||||
|     event: &BTreeMap<String, CanonicalJsonValue>, | ||||
|     pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue