diff --git a/Cargo.lock b/Cargo.lock index 258af13..5d8f46f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,9 +299,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" +checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" [[package]] name = "encoding_rs" @@ -632,9 +632,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "jpeg-decoder" @@ -1261,7 +1261,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "ruma-api", "ruma-client-api", @@ -1275,7 +1275,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "http", "percent-encoding 2.1.0", @@ -1290,7 +1290,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.16.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", @@ -1300,7 +1300,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "http", "js_int", @@ -1317,7 +1317,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.1.3" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "matches", "ruma-serde", @@ -1354,7 +1354,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.0.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "js_int", "matches", @@ -1369,7 +1369,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.16.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "rand", "serde", @@ -1379,7 +1379,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.2.2" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "dtoa", "itoa", @@ -1392,7 +1392,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.6.0-dev.1" -source = "git+https://github.com/ruma/ruma?rev=baa87104569b45dc07a9a7a16d3c7592ab8f4d6b#baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +source = "git+https://github.com/timokoesters/ruma#2e75b221b27698dea528d92b87e29f0e2968c495" dependencies = [ "base64 0.12.2", "ring", @@ -1684,6 +1684,12 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + [[package]] name = "tokio" version = "0.2.21" @@ -1795,11 +1801,11 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "smallvec", + "tinyvec", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 62d7b7a..4bdb929 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,15 +29,16 @@ thiserror = "1.0.19" image = { version = "0.23.4", default-features = false, features = ["jpeg", "png", "gif"] } [dependencies.ruma] -git = "https://github.com/ruma/ruma" -rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" +git = "https://github.com/timokoesters/ruma" +#rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" #path = "../ruma/ruma" features = ["rand", "client-api", "federation-api"] # These are required only until ruma-events and ruma-federation-api are merged into ruma/ruma [patch.crates-io] -ruma-common = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -ruma-serde = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } -ruma-identifiers = { git = "https://github.com/ruma/ruma", rev = "baa87104569b45dc07a9a7a16d3c7592ab8f4d6b" } +ruma-common = { git = "https://github.com/timokoesters/ruma" } +ruma-serde = { git = "https://github.com/timokoesters/ruma" } +ruma-identifiers = { git = "https://github.com/timokoesters/ruma" } #ruma-common = { path = "../ruma/ruma-common" } #ruma-serde = { path = "../ruma/ruma-serde" } +#ruma-identifiers = { path = "../ruma/ruma-identifiers" } diff --git a/src/client_server.rs b/src/client_server.rs index c1bb693..fac0286 100644 --- a/src/client_server.rs +++ b/src/client_server.rs @@ -5,6 +5,7 @@ use std::{ }; use crate::{utils, ConduitResult, Database, Error, Ruma}; +use keys::{upload_signatures, upload_signing_keys}; use log::warn; use rocket::{delete, get, options, post, put, State}; use ruma::{ @@ -13,6 +14,10 @@ use ruma::{ r0::{ account::{get_username_availability, register}, alias::{create_alias, delete_alias, get_alias}, + backup::{ + add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup, + update_backup, + }, capabilities::get_capabilities, config::{get_global_account_data, set_global_account_data}, context::get_context, @@ -33,7 +38,7 @@ use ruma::{ profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, - push::{get_pushrules_all, set_pushrule, set_pushrule_enabled}, + push::{get_pushers, get_pushrules_all, set_pushrule, set_pushrule_enabled}, read_marker::set_read_marker, redact::redact_event, room::{self, create_room}, @@ -71,9 +76,13 @@ const SESSION_ID_LENGTH: usize = 256; #[get("/_matrix/client/versions")] pub fn get_supported_versions_route() -> ConduitResult { + let mut unstable_features = BTreeMap::new(); + + unstable_features.insert("org.matrix.e2e_cross_signing".to_owned(), true); + Ok(get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features: BTreeMap::new(), + unstable_features, } .into()) } @@ -349,11 +358,11 @@ pub fn get_pushrules_all_route( #[put( "/_matrix/client/r0/pushrules/<_scope>/<_kind>/<_rule_id>", - data = "" + //data = "" )] pub fn set_pushrule_route( - db: State<'_, Database>, - body: Ruma, + //db: State<'_, Database>, + //body: Ruma, _scope: String, _kind: String, _rule_id: String, @@ -694,8 +703,13 @@ pub fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - db.users - .add_device_keys(user_id, device_id, device_keys, &db.globals)?; + // This check is needed to assure that signatures are kept + if db.users.get_device_keys(user_id, device_id)?.is_none() { + db.users + .add_device_keys(user_id, device_id, device_keys, &db.globals)?; + } else { + println!("Key from {} was skipped: {:?}", user_id, device_keys); + } } Ok(upload_keys::Response { @@ -709,33 +723,38 @@ pub fn get_keys_route( db: State<'_, Database>, body: Ruma, ) -> ConduitResult { + let sender_id = body.user_id.as_ref().expect("user is authenticated"); + + let mut master_keys = BTreeMap::new(); + let mut self_signing_keys = BTreeMap::new(); + let mut user_signing_keys = BTreeMap::new(); let mut device_keys = BTreeMap::new(); for (user_id, device_ids) in &body.device_keys { if device_ids.is_empty() { let mut container = BTreeMap::new(); - for result in db.users.all_device_keys(&user_id.clone()) { - let (device_id, mut keys) = result?; + for device_id in db.users.all_device_ids(user_id) { + let device_id = device_id?; + if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { + let metadata = db + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| { + Error::bad_database("all_device_keys contained nonexistent device.") + })?; - let metadata = db - .users - .get_device_metadata(user_id, &device_id)? - .ok_or_else(|| { - Error::bad_database("all_device_keys contained nonexistent device.") - })?; + keys.unsigned = Some(keys::UnsignedDeviceInfo { + device_display_name: metadata.display_name, + }); - keys.unsigned = Some(keys::UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }); - - container.insert(device_id, keys); + container.insert(device_id.to_owned(), keys); + } } device_keys.insert(user_id.clone(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - for keys in db.users.get_device_keys(&user_id.clone(), &device_id) { - let mut keys = keys?; + if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? { let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, @@ -752,11 +771,26 @@ pub fn get_keys_route( device_keys.insert(user_id.clone(), container); } } + + if let Some(master_key) = db.users.get_master_key(user_id, sender_id)? { + master_keys.insert(user_id.clone(), master_key); + } + if let Some(self_signing_key) = db.users.get_self_signing_key(user_id, sender_id)? { + self_signing_keys.insert(user_id.clone(), self_signing_key); + } + if user_id == sender_id { + if let Some(user_signing_key) = db.users.get_user_signing_key(sender_id)? { + user_signing_keys.insert(user_id.clone(), user_signing_key); + } + } } Ok(get_keys::Response { - failures: BTreeMap::new(), + master_keys, + self_signing_keys, + user_signing_keys, device_keys, + failures: BTreeMap::new(), } .into()) } @@ -789,6 +823,125 @@ pub fn claim_keys_route( .into()) } +#[post("/_matrix/client/unstable/room_keys/version", data = "")] +pub fn create_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let version = db + .key_backups + .create_backup(&user_id, &body.algorithm, &db.globals)?; + + Ok(create_backup::Response { version }.into()) +} + +#[put( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" +)] +pub fn update_backup_route( + db: State<'_, Database>, + body: Ruma, + _version: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + db.key_backups + .update_backup(&user_id, &body.version, &body.algorithm, &db.globals)?; + + Ok(update_backup::Response.into()) +} + +#[get("/_matrix/client/unstable/room_keys/version", data = "")] +pub fn get_latest_backup_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let (version, algorithm) = + db.key_backups + .get_latest_backup(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_latest_backup::Response { + algorithm, + count: (db.key_backups.count_keys(user_id, &version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &version)?, + version, + } + .into()) +} + +#[get( + "/_matrix/client/unstable/room_keys/version/<_version>", + data = "" +)] +pub fn get_backup_route( + db: State<'_, Database>, + body: Ruma, + _version: String, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let algorithm = + db.key_backups + .get_backup(&user_id, &body.version)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_backup::Response { + algorithm, + count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &body.version)?, + version: body.version.clone(), + } + .into()) +} + +#[put("/_matrix/client/unstable/room_keys/keys", data = "")] +pub fn add_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + for (room_id, room) in &body.rooms { + for (session_id, key_data) in &room.sessions { + db.key_backups.add_key( + &user_id, + &body.version, + &room_id, + &session_id, + &key_data, + &db.globals, + )? + } + } + + Ok(add_backup_keys::Response { + count: (db.key_backups.count_keys(user_id, &body.version)? as u32).into(), + etag: db.key_backups.get_etag(user_id, &body.version)?, + } + .into()) +} + +#[get("/_matrix/client/unstable/room_keys/keys", data = "")] +pub fn get_backup_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + + let rooms = db.key_backups.get_all(&user_id, &body.version)?; + + Ok(get_backup_keys::Response { rooms }.into()) +} + #[post("/_matrix/client/r0/rooms/<_room_id>/read_markers", data = "")] pub fn set_read_marker_route( db: State<'_, Database>, @@ -2040,7 +2193,7 @@ pub fn sync_route( let mut pdus = db .rooms - .pdus_since(&room_id, since)? + .pdus_since(&user_id, &room_id, since)? .filter_map(|r| r.ok()) // Filter out buggy events .collect::>(); @@ -2083,7 +2236,7 @@ pub fn sync_route( for hero in db .rooms - .all_pdus(&room_id)? + .all_pdus(&user_id, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|pdu| pdu.kind == EventType::RoomMember) .map(|pdu| { @@ -2157,7 +2310,7 @@ pub fn sync_route( if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &user_id)? { Some( (db.rooms - .pdus_since(&room_id, last_read)? + .pdus_since(&user_id, &room_id, last_read)? .filter_map(|pdu| pdu.ok()) // Filter out buggy events .filter(|pdu| { matches!( @@ -2271,7 +2424,7 @@ pub fn sync_route( let mut left_rooms = BTreeMap::new(); for room_id in db.rooms.rooms_left(&user_id) { let room_id = room_id?; - let pdus = db.rooms.pdus_since(&room_id, since)?; + let pdus = db.rooms.pdus_since(&user_id, &room_id, since)?; let room_events = pdus .filter_map(|pdu| pdu.ok()) // Filter out buggy events .map(|pdu| pdu.to_room_event()) @@ -2375,7 +2528,7 @@ pub fn sync_route( device_lists: sync_events::DeviceLists { changed: if since != 0 { db.users - .device_keys_changed(since) + .keys_changed(since) .filter_map(|u| u.ok()) .collect() // Filter out buggy events } else { @@ -2426,7 +2579,7 @@ pub fn get_context_route( let events_before = db .rooms - .pdus_until(&body.room_id, base_token) + .pdus_until(&user_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -2452,7 +2605,7 @@ pub fn get_context_route( let events_after = db .rooms - .pdus_after(&body.room_id, base_token) + .pdus_after(&user_id, &body.room_id, base_token) .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -2516,7 +2669,7 @@ pub fn get_message_events_route( get_message_events::Direction::Forward => { let events_after = db .rooms - .pdus_after(&body.room_id, from) + .pdus_after(&user_id, &body.room_id, from) // Use limit or else 10 .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { Ok(u32::try_from(l).map_err(|_| { @@ -2551,7 +2704,7 @@ pub fn get_message_events_route( get_message_events::Direction::Backward => { let events_before = db .rooms - .pdus_until(&body.room_id, from) + .pdus_until(&user_id, &body.room_id, from) // Use limit or else 10 .take(body.limit.map_or(Ok::<_, Error>(10_usize), |l| { Ok(u32::try_from(l).map_err(|_| { @@ -2871,9 +3024,126 @@ pub fn delete_devices_route( Ok(delete_devices::Response.into()) } +#[post("/_matrix/client/unstable/keys/device_signing/upload", data = "")] +pub fn upload_signing_keys_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let user_id = body.user_id.as_ref().expect("user is authenticated"); + let device_id = body.device_id.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec!["m.login.password".to_owned()], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = db.uiaa.try_auth( + &user_id, + &device_id, + auth, + &uiaainfo, + &db.users, + &db.globals, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + db.uiaa.create(&user_id, &device_id, &uiaainfo)?; + return Err(Error::Uiaa(uiaainfo)); + } + + if let Some(master_key) = &body.master_key { + db.users.add_cross_signing_keys( + user_id, + &master_key, + &body.self_signing_key, + &body.user_signing_key, + &db.globals, + )?; + } + + Ok(upload_signing_keys::Response.into()) +} + +#[post("/_matrix/client/unstable/keys/signatures/upload", data = "")] +pub fn upload_signatures_route( + db: State<'_, Database>, + body: Ruma, +) -> ConduitResult { + let sender_id = body.user_id.as_ref().expect("user is authenticated"); + + for (user_id, signed_keys) in &body.signed_keys { + for (key_id, signed_key) in signed_keys { + for signature in signed_key + .get("signatures") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Missing signatures field.", + ))? + .get(sender_id.to_string()) + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid user in signatures field.", + ))? + .as_object() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature.", + ))? + .clone() + .into_iter() + { + // Signature validation? + let signature = ( + signature.0, + signature + .1 + .as_str() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature value.", + ))? + .to_owned(), + ); + db.users + .sign_key(&user_id, &key_id, signature, &sender_id, &db.globals)?; + } + } + } + + Ok(upload_signatures::Response.into()) +} + +#[get("/_matrix/client/r0/pushers")] +pub fn pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} + +#[post("/_matrix/client/r0/pushers/set")] +pub fn set_pushers_route() -> ConduitResult { + Ok(get_pushers::Response { + pushers: Vec::new(), + } + .into()) +} + #[options("/<_segments..>")] pub fn options_route( _segments: rocket::http::uri::Segments<'_>, ) -> ConduitResult { Ok(send_event_to_device::Response.into()) } + diff --git a/src/database.rs b/src/database.rs index 2cc01ea..370fde7 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,6 +1,7 @@ pub(self) mod account_data; pub(self) mod global_edus; pub(self) mod globals; +pub(self) mod key_backups; pub(self) mod media; pub(self) mod rooms; pub(self) mod uiaa; @@ -21,6 +22,7 @@ pub struct Database { pub account_data: account_data::AccountData, pub global_edus: global_edus::GlobalEdus, pub media: media::Media, + pub key_backups: key_backups::KeyBackups, pub _db: sled::Db, } @@ -73,8 +75,11 @@ impl Database { userdeviceid_metadata: db.open_tree("userdeviceid_metadata")?, token_userdeviceid: db.open_tree("token_userdeviceid")?, onetimekeyid_onetimekeys: db.open_tree("onetimekeyid_onetimekeys")?, - userdeviceid_devicekeys: db.open_tree("userdeviceid_devicekeys")?, - devicekeychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keychangeid_userid: db.open_tree("devicekeychangeid_userid")?, + keyid_key: db.open_tree("keyid_key")?, + userid_masterkeyid: db.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: db.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: db.open_tree("userid_usersigningkeyid")?, todeviceid_events: db.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { @@ -111,6 +116,11 @@ impl Database { media: media::Media { mediaid_file: db.open_tree("mediaid_file")?, }, + key_backups: key_backups::KeyBackups { + backupid_algorithm: db.open_tree("backupid_algorithm")?, + backupid_etag: db.open_tree("backupid_etag")?, + backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?, + }, _db: db, }) } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs new file mode 100644 index 0000000..991931b --- /dev/null +++ b/src/database/key_backups.rs @@ -0,0 +1,207 @@ +use crate::{utils, Error, Result}; +use ruma::{ + api::client::{ + error::ErrorKind, + r0::backup::{get_backup_keys::Sessions, BackupAlgorithm, KeyData}, + }, + identifiers::{RoomId, UserId}, +}; +use std::{collections::BTreeMap, convert::TryFrom}; + +pub struct KeyBackups { + pub(super) backupid_algorithm: sled::Tree, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: sled::Tree, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: sled::Tree, // BackupKeyId = UserId + Version + RoomId + SessionId +} + +impl KeyBackups { + pub fn create_backup( + &self, + user_id: &UserId, + backup_metadata: &BackupAlgorithm, + globals: &super::globals::Globals, + ) -> Result { + let version = globals.next_count()?.to_string(); + + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + self.backupid_algorithm.insert( + &key, + &*serde_json::to_string(backup_metadata) + .expect("BackupAlgorithm::to_string always works"), + )?; + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + Ok(version) + } + + pub fn update_backup( + &self, + user_id: &UserId, + version: &str, + backup_metadata: &BackupAlgorithm, + globals: &super::globals::Globals, + ) -> Result { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Tried to update nonexistent backup.", + )); + } + + self.backupid_algorithm.insert( + &key, + &*serde_json::to_string(backup_metadata) + .expect("BackupAlgorithm::to_string always works"), + )?; + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + Ok(version.to_string()) + } + + pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + self.backupid_algorithm + .scan_prefix(&prefix) + .last() + .map_or(Ok(None), |r| { + let (key, value) = r?; + let version = utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; + + Ok(Some(( + version, + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("Algorithm in backupid_algorithm is invalid.") + })?, + ))) + }) + } + + pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(version.as_bytes()); + + self.backupid_algorithm.get(key)?.map_or(Ok(None), |bytes| { + Ok(serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?) + }) + } + + pub fn add_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + key_data: &KeyData, + globals: &super::globals::Globals, + ) -> Result<()> { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Tried to update nonexistent backup.", + )); + } + + self.backupid_etag + .insert(&key, &globals.next_count()?.to_be_bytes())?; + + key.push(0xff); + key.extend_from_slice(room_id.to_string().as_bytes()); + key.push(0xff); + key.extend_from_slice(session_id.as_bytes()); + + self.backupkeyid_backup.insert( + &key, + &*serde_json::to_string(&key_data).expect("KeyData::to_string always works"), + )?; + + Ok(()) + } + + pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(version.as_bytes()); + + Ok(self.backupkeyid_backup.scan_prefix(&prefix).count()) + } + + pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + let mut key = user_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&version.as_bytes()); + + Ok(utils::u64_from_bytes( + &self + .backupid_etag + .get(&key)? + .ok_or_else(|| Error::bad_database("Backup has no etag."))?, + ) + .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? + .to_string()) + } + + pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(version.as_bytes()); + + let mut rooms = BTreeMap::::new(); + + for result in self.backupkeyid_backup.scan_prefix(&prefix).map(|r| { + let (key, value) = r?; + let mut parts = key.rsplit(|&b| b == 0xff); + + let session_id = utils::string_from_bytes( + &parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; + + let room_id = RoomId::try_from( + utils::string_from_bytes( + &parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; + + let key_data = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))?; + + Ok::<_, Error>((room_id, session_id, key_data)) + }) { + let (room_id, session_id, key_data) = result?; + rooms + .entry(room_id) + .or_insert_with(|| Sessions { + sessions: BTreeMap::new(), + }) + .sessions + .insert(session_id, key_data); + } + + Ok(rooms) + } +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 604b5aa..bd1a7ba 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -602,13 +602,18 @@ impl Rooms { } /// Returns an iterator over all PDUs in a room. - pub fn all_pdus(&self, room_id: &RoomId) -> Result>> { - self.pdus_since(room_id, 0) + pub fn all_pdus( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { + self.pdus_since(user_id, room_id, 0) } /// Returns an iterator over all events in a room that happened after the event with id `since`. pub fn pdus_since( &self, + user_id: &UserId, room_id: &RoomId, since: u64, ) -> Result>> { @@ -617,12 +622,13 @@ impl Rooms { pdu_id.push(0xff); pdu_id.extend_from_slice(&(since).to_be_bytes()); - self.pdus_since_pduid(room_id, &pdu_id) + self.pdus_since_pduid(user_id, room_id, &pdu_id) } /// Returns an iterator over all events in a room that happened after the event with id `since`. pub fn pdus_since_pduid( &self, + user_id: &UserId, room_id: &RoomId, pdu_id: &[u8], ) -> Result>> { @@ -630,6 +636,7 @@ impl Rooms { let mut prefix = room_id.to_string().as_bytes().to_vec(); prefix.push(0xff); + let user_id = user_id.clone(); Ok(self .pduid_pdu .range(pdu_id..) @@ -641,9 +648,13 @@ impl Rooms { }) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) })) } @@ -651,6 +662,7 @@ impl Rooms { /// `until` in reverse-chronological order. pub fn pdus_until( &self, + user_id: &UserId, room_id: &RoomId, until: u64, ) -> impl Iterator> { @@ -663,14 +675,19 @@ impl Rooms { let current: &[u8] = ¤t; + let user_id = user_id.clone(); self.pduid_pdu .range(..current) .rev() .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) }) } @@ -678,6 +695,7 @@ impl Rooms { /// `from` in chronological order. pub fn pdus_after( &self, + user_id: &UserId, room_id: &RoomId, from: u64, ) -> impl Iterator> { @@ -690,13 +708,18 @@ impl Rooms { let current: &[u8] = ¤t; + let user_id = user_id.clone(); self.pduid_pdu .range(current..) .filter_map(|r| r.ok()) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(_, v)| { - Ok(serde_json::from_slice(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?) + .map(move |(_, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.unsigned.remove("transaction_id"); + } + Ok(pdu) }) } diff --git a/src/database/users.rs b/src/database/users.rs index 07c6912..6f90537 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,9 +1,12 @@ use crate::{utils, Error, Result}; use js_int::UInt; use ruma::{ - api::client::r0::{ - device::Device, - keys::{AlgorithmAndDeviceId, DeviceKeys, KeyAlgorithm, OneTimeKey}, + api::client::{ + error::ErrorKind, + r0::{ + device::Device, + keys::{AlgorithmAndDeviceId, CrossSigningKey, DeviceKeys, KeyAlgorithm, OneTimeKey}, + }, }, events::{to_device::AnyToDeviceEvent, EventJson, EventType}, identifiers::UserId, @@ -19,8 +22,11 @@ pub struct Users { pub(super) token_userdeviceid: sled::Tree, pub(super) onetimekeyid_onetimekeys: sled::Tree, // OneTimeKeyId = UserId + AlgorithmAndDeviceId - pub(super) userdeviceid_devicekeys: sled::Tree, - pub(super) devicekeychangeid_userid: sled::Tree, // DeviceKeyChangeId = Count + pub(super) keychangeid_userid: sled::Tree, // KeyChangeId = Count + pub(super) keyid_key: sled::Tree, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: sled::Tree, + pub(super) userid_selfsigningkeyid: sled::Tree, + pub(super) userid_usersigningkeyid: sled::Tree, pub(super) todeviceid_events: sled::Tree, // ToDeviceId = UserId + DeviceId + Count } @@ -171,9 +177,6 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - // Remove device keys - self.userdeviceid_devicekeys.remove(&userdeviceid)?; - // Remove tokens if let Some(old_token) = self.userdeviceid_token.remove(&userdeviceid)? { self.token_userdeviceid.remove(&old_token)?; @@ -350,38 +353,168 @@ impl Users { userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_devicekeys.insert( + self.keyid_key.insert( &userdeviceid, &*serde_json::to_string(&device_keys).expect("DeviceKeys::to_string always works"), )?; - self.devicekeychangeid_userid + self.keychangeid_userid .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; Ok(()) } - pub fn get_device_keys( + pub fn add_cross_signing_keys( &self, user_id: &UserId, - device_id: &str, - ) -> impl Iterator> { - let mut key = user_id.to_string().as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); + master_key: &CrossSigningKey, + self_signing_key: &Option, + user_signing_key: &Option, + globals: &super::globals::Globals, + ) -> Result<()> { + // TODO: Check signatures - self.userdeviceid_devicekeys - .scan_prefix(key) - .values() - .map(|bytes| { - Ok(serde_json::from_slice(&bytes?) - .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?) - }) + let mut prefix = user_id.to_string().as_bytes().to_vec(); + prefix.push(0xff); + + // Master key + let mut master_key_ids = master_key.keys.values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained no key.", + ))?; + + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); + + self.keyid_key.insert( + &master_key_key, + &*serde_json::to_string(&master_key).expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_masterkeyid + .insert(&*user_id.to_string(), master_key_key)?; + + // Self-signing key + if let Some(self_signing_key) = self_signing_key { + let mut self_signing_key_ids = self_signing_key.keys.values(); + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained no key.", + ))?; + + if self_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained more than one key.", + )); + } + + let mut self_signing_key_key = prefix.clone(); + self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + + self.keyid_key.insert( + &self_signing_key_key, + &*serde_json::to_string(&self_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_selfsigningkeyid + .insert(&*user_id.to_string(), self_signing_key_key)?; + } + + // User-signing key + if let Some(user_signing_key) = user_signing_key { + let mut user_signing_key_ids = user_signing_key.keys.values(); + let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained no key.", + ))?; + + if user_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained more than one key.", + )); + } + + let mut user_signing_key_key = prefix.clone(); + user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + + self.keyid_key.insert( + &user_signing_key_key, + &*serde_json::to_string(&user_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.userid_usersigningkeyid + .insert(&*user_id.to_string(), user_signing_key_key)?; + } + + self.keychangeid_userid + .insert(globals.next_count()?.to_be_bytes(), &*user_id.to_string())?; + + Ok(()) } - pub fn device_keys_changed(&self, since: u64) -> impl Iterator> { - self.devicekeychangeid_userid - .range(since.to_be_bytes()..) + pub fn sign_key( + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, + globals: &super::globals::Globals, + ) -> Result<()> { + println!( + "Adding signatures on {}'s {} by {}: {}->{}", + target_id, key_id, sender_id, signature.0, signature.1 + ); + + let mut key = target_id.to_string().as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(key_id.to_string().as_bytes()); + + let mut cross_signing_key = + serde_json::from_slice::(&self.keyid_key.get(&key)?.ok_or( + Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."), + )?) + .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; + + let signatures = cross_signing_key + .get_mut("signatures") + .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? + .as_object_mut() + .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? + .entry(sender_id.clone()) + .or_insert_with(|| serde_json::Map::new().into()); + + signatures + .as_object_mut() + .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? + .insert(signature.0, signature.1.into()); + + self.keyid_key.insert( + &key, + &*serde_json::to_string(&cross_signing_key) + .expect("CrossSigningKey::to_string always works"), + )?; + + self.keychangeid_userid + .insert(globals.next_count()?.to_be_bytes(), &*target_id.to_string())?; + + Ok(()) + } + + pub fn keys_changed(&self, since: u64) -> impl Iterator> { + self.keychangeid_userid + .range((since + 1).to_be_bytes()..) .values() .map(|bytes| { Ok( @@ -397,29 +530,85 @@ impl Users { }) } - pub fn all_device_keys( - &self, - user_id: &UserId, - ) -> impl Iterator> { + pub fn get_device_keys(&self, user_id: &UserId, device_id: &str) -> Result> { let mut key = user_id.to_string().as_bytes().to_vec(); key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_devicekeys.scan_prefix(key).map(|r| { - let (key, value) = r?; - let userdeviceid = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDeviceID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("UserDeviceId in db is invalid."))?; - Ok(( - userdeviceid, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?, - )) + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("DeviceKeys in db are invalid.") + })?)) }) } + pub fn get_master_key( + &self, + user_id: &UserId, + sender_id: &UserId, + ) -> Result> { + // TODO: hide some signatures + self.userid_masterkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?; + + // A user is not allowed to see signatures from users other than himself and + // the target user + cross_signing_key.signatures = cross_signing_key + .signatures + .into_iter() + .filter(|(user, _)| user == user_id || user == sender_id) + .collect(); + + Ok(Some(cross_signing_key)) + }) + }) + } + + pub fn get_self_signing_key( + &self, + user_id: &UserId, + sender_id: &UserId, + ) -> Result> { + self.userid_selfsigningkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?; + + // A user is not allowed to see signatures from users other than himself and + // the target user + cross_signing_key.signatures = cross_signing_key + .signatures + .into_iter() + .filter(|(user, _)| user == user_id || user == sender_id) + .collect(); + + Ok(Some(cross_signing_key)) + }) + }) + } + + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + self.userid_usersigningkeyid + .get(user_id.to_string())? + .map_or(Ok(None), |key| { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?)) + }) + }) + } + pub fn add_to_device_event( &self, sender: &UserId, diff --git a/src/main.rs b/src/main.rs index 5c51ae0..a6c1afc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,6 +46,12 @@ fn setup_rocket() -> rocket::Rocket { client_server::upload_keys_route, client_server::get_keys_route, client_server::claim_keys_route, + client_server::create_backup_route, + client_server::update_backup_route, + client_server::get_latest_backup_route, + client_server::get_backup_route, + client_server::add_backup_keys_route, + client_server::get_backup_keys_route, client_server::set_read_marker_route, client_server::create_typing_event_route, client_server::create_room_route, @@ -90,6 +96,10 @@ fn setup_rocket() -> rocket::Rocket { client_server::delete_device_route, client_server::delete_devices_route, client_server::options_route, + client_server::upload_signing_keys_route, + client_server::upload_signatures_route, + client_server::pushers_route, + client_server::set_pushers_route, //server_server::well_known_server, //server_server::get_server_version, //server_server::get_server_keys, diff --git a/src/push_rules.rs b/src/push_rules.rs index 44c9363..90e9243 100644 --- a/src/push_rules.rs +++ b/src/push_rules.rs @@ -1,7 +1,5 @@ use ruma::{ - events::push_rules::{ - ConditionalPushRule, PatternedPushRule, PushCondition, PushRule, Ruleset, - }, + events::push_rules::{ConditionalPushRule, PatternedPushRule, PushCondition, Ruleset}, identifiers::UserId, push::{Action, Tweak}, };