Merge branch 'stuff' into 'master'

fix: stuck/duplicate messages, e2ee and sync timeouts

See merge request famedly/conduit!123
next
Timo Kösters 2021-07-15 08:18:56 +00:00
commit f7ecf83ac3
18 changed files with 705 additions and 355 deletions

View File

@ -1 +1 @@
1.52.0 1.52

View File

@ -1,4 +1,4 @@
use std::{collections::BTreeMap, convert::TryInto}; use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
@ -238,6 +238,16 @@ pub async fn register_route(
let room_id = RoomId::new(db.globals.server_name()); let room_id = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
@ -255,6 +265,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 2. Make conduit bot join // 2. Make conduit bot join
@ -276,6 +287,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 3. Power levels // 3. Power levels
@ -300,6 +312,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.1 Join Rules // 4.1 Join Rules
@ -317,6 +330,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.2 History Visibility // 4.2 History Visibility
@ -336,6 +350,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.3 Guest Access // 4.3 Guest Access
@ -353,6 +368,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 6. Events implied by name and topic // 6. Events implied by name and topic
@ -372,6 +388,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -388,6 +405,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Room alias // Room alias
@ -410,6 +428,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
@ -433,6 +452,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
@ -452,6 +472,7 @@ pub async fn register_route(
&user_id, &user_id,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Send welcome message // Send welcome message
@ -470,6 +491,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -641,6 +663,16 @@ pub async fn deactivate_route(
third_party_invite: None, third_party_invite: None,
}; };
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -652,6 +684,7 @@ pub async fn deactivate_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }

View File

@ -203,6 +203,16 @@ pub async fn kick_user_route(
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = ruma::events::room::member::MembershipState::Leave;
// TODO: reason // TODO: reason
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -214,8 +224,11 @@ pub async fn kick_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(kick_user::Response::new().into()) Ok(kick_user::Response::new().into())
@ -261,6 +274,16 @@ pub async fn ban_user_route(
}, },
)?; )?;
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -272,8 +295,11 @@ pub async fn ban_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(ban_user::Response::new().into()) Ok(ban_user::Response::new().into())
@ -310,6 +336,16 @@ pub async fn unban_user_route(
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = ruma::events::room::member::MembershipState::Leave;
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -321,8 +357,11 @@ pub async fn unban_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(unban_user::Response::new().into()) Ok(unban_user::Response::new().into())
@ -446,6 +485,16 @@ async fn join_room_by_id_helper(
) -> ConduitResult<join_room_by_id::Response> { ) -> ConduitResult<join_room_by_id::Response> {
let sender_user = sender_user.expect("user is authenticated"); let sender_user = sender_user.expect("user is authenticated");
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Ask a remote server if we don't have this room // Ask a remote server if we don't have this room
if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() {
let mut make_join_response_and_server = Err(Error::BadServerResponse( let mut make_join_response_and_server = Err(Error::BadServerResponse(
@ -619,16 +668,9 @@ async fn join_room_by_id_helper(
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?;
let count = db.globals.next_count()?;
let mut pdu_id = room_id.as_bytes().to_vec();
pdu_id.push(0xff);
pdu_id.extend_from_slice(&count.to_be_bytes());
db.rooms.append_pdu( db.rooms.append_pdu(
&pdu, &pdu,
utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
count,
&pdu_id,
&[pdu.event_id.clone()], &[pdu.event_id.clone()],
db, db,
)?; )?;
@ -656,9 +698,12 @@ async fn join_room_by_id_helper(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(join_room_by_id::Response::new(room_id.clone()).into()) Ok(join_room_by_id::Response::new(room_id.clone()).into())
@ -728,13 +773,23 @@ async fn validate_and_add_event_id(
Ok((event_id, value)) Ok((event_id, value))
} }
pub async fn invite_helper( pub async fn invite_helper<'a>(
sender_user: &UserId, sender_user: &UserId,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
is_direct: bool, is_direct: bool,
) -> Result<()> { ) -> Result<()> {
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
if user_id.server_name() != db.globals.server_name() { if user_id.server_name() != db.globals.server_name() {
let prev_events = db let prev_events = db
.rooms .rooms
@ -870,6 +925,8 @@ pub async fn invite_helper(
) )
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
drop(mutex_lock);
let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; let invite_room_state = db.rooms.calculate_invite_state(&pdu)?;
let response = db let response = db
.sending .sending
@ -909,8 +966,15 @@ pub async fn invite_helper(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
let pdu_id = let pdu_id = server_server::handle_incoming_pdu(
server_server::handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) &origin,
&event_id,
&room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map_err(|_| { .map_err(|_| {
Error::BadRequest( Error::BadRequest(
@ -953,6 +1017,7 @@ pub async fn invite_helper(
&sender_user, &sender_user,
room_id, room_id,
&db, &db,
&mutex_lock,
)?; )?;
Ok(()) Ok(())

View File

@ -10,6 +10,7 @@ use ruma::{
use std::{ use std::{
collections::BTreeMap, collections::BTreeMap,
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
sync::Arc,
}; };
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -27,6 +28,16 @@ pub async fn send_message_event_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref(); let sender_device = body.sender_device.as_deref();
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Check if this is a new transaction id // Check if this is a new transaction id
if let Some(response) = if let Some(response) =
db.transaction_ids db.transaction_ids
@ -64,6 +75,7 @@ pub async fn send_message_event_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.transaction_ids.add_txnid( db.transaction_ids.add_txnid(
@ -73,6 +85,8 @@ pub async fn send_message_event_route(
event_id.as_bytes(), event_id.as_bytes(),
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(send_message_event::Response::new(event_id).into()) Ok(send_message_event::Response::new(event_id).into())

View File

@ -9,7 +9,7 @@ use ruma::{
events::EventType, events::EventType,
serde::Raw, serde::Raw,
}; };
use std::convert::TryInto; use std::{convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
@ -69,9 +69,19 @@ pub async fn set_displayname_route(
}) })
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
{ {
let _ = db let mutex = Arc::clone(
.rooms db.globals
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); .roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let _ =
db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock);
// Presence update // Presence update
db.rooms.edus.update_presence( db.rooms.edus.update_presence(
@ -171,9 +181,19 @@ pub async fn set_avatar_url_route(
}) })
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
{ {
let _ = db let mutex = Arc::clone(
.rooms db.globals
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); .roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let _ =
db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock);
// Presence update // Presence update
db.rooms.edus.update_presence( db.rooms.edus.update_presence(

View File

@ -1,3 +1,5 @@
use std::sync::Arc;
use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma};
use ruma::{ use ruma::{
api::client::r0::redact::redact_event, api::client::r0::redact::redact_event,
@ -18,6 +20,16 @@ pub async fn redact_event_route(
) -> ConduitResult<redact_event::Response> { ) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let event_id = db.rooms.build_and_append_pdu( let event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomRedaction, event_type: EventType::RoomRedaction,
@ -32,8 +44,11 @@ pub async fn redact_event_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(redact_event::Response { event_id }.into()) Ok(redact_event::Response { event_id }.into())

View File

@ -15,7 +15,7 @@ use ruma::{
serde::Raw, serde::Raw,
RoomAliasId, RoomId, RoomVersionId, RoomAliasId, RoomId, RoomVersionId,
}; };
use std::{cmp::max, collections::BTreeMap, convert::TryFrom}; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, post}; use rocket::{get, post};
@ -33,6 +33,16 @@ pub async fn create_room_route(
let room_id = RoomId::new(db.globals.server_name()); let room_id = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let alias = body let alias = body
.room_alias_name .room_alias_name
.as_ref() .as_ref()
@ -69,6 +79,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 2. Let the room creator join // 2. Let the room creator join
@ -90,6 +101,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 3. Power levels // 3. Power levels
@ -144,6 +156,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4. Events set by preset // 4. Events set by preset
@ -170,6 +183,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.2 History Visibility // 4.2 History Visibility
@ -187,6 +201,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.3 Guest Access // 4.3 Guest Access
@ -212,6 +227,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 5. Events listed in initial_state // 5. Events listed in initial_state
@ -227,7 +243,7 @@ pub async fn create_room_route(
} }
db.rooms db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock)?;
} }
// 6. Events implied by name and topic // 6. Events implied by name and topic
@ -248,6 +264,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -266,10 +283,12 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
// 7. Events implied by invite (and TODO: invite_3pid) // 7. Events implied by invite (and TODO: invite_3pid)
drop(mutex_lock);
for user_id in &body.invite { for user_id in &body.invite {
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
} }
@ -340,6 +359,16 @@ pub async fn upgrade_room_route(
// Create a replacement room // Create a replacement room
let replacement_room = RoomId::new(db.globals.server_name()); let replacement_room = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
// Fail if the sender does not have the required permissions // Fail if the sender does not have the required permissions
let tombstone_event_id = db.rooms.build_and_append_pdu( let tombstone_event_id = db.rooms.build_and_append_pdu(
@ -357,6 +386,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Get the old room federations status // Get the old room federations status
@ -397,6 +427,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
// Join the new room // Join the new room
@ -418,6 +449,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
// Recommended transferable state events list from the specs // Recommended transferable state events list from the specs
@ -451,6 +483,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -494,8 +527,11 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
// Return the replacement room id // Return the replacement room id

View File

@ -1,3 +1,5 @@
use std::sync::Arc;
use crate::{ use crate::{
database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma,
}; };
@ -257,6 +259,16 @@ pub async fn send_state_event_for_key_helper(
} }
} }
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let event_id = db.rooms.build_and_append_pdu( let event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type, event_type,
@ -268,6 +280,7 @@ pub async fn send_state_event_for_key_helper(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
Ok(event_id) Ok(event_id)

View File

@ -89,7 +89,9 @@ pub async fn sync_events_route(
let we_have_to_wait = rx.borrow().is_none(); let we_have_to_wait = rx.borrow().is_none();
if we_have_to_wait { if we_have_to_wait {
let _ = rx.changed().await; if let Err(e) = rx.changed().await {
error!("Error waiting for sync: {}", e);
}
} }
let result = match rx let result = match rx
@ -187,6 +189,18 @@ async fn sync_helper(
for room_id in db.rooms.rooms_joined(&sender_user) { for room_id in db.rooms.rooms_joined(&sender_user) {
let room_id = room_id?; let room_id = room_id?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let mut non_timeline_pdus = db let mut non_timeline_pdus = db
.rooms .rooms
.pdus_until(&sender_user, &room_id, u64::MAX) .pdus_until(&sender_user, &room_id, u64::MAX)
@ -225,13 +239,16 @@ async fn sync_helper(
// Database queries: // Database queries:
let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?; let current_shortstatehash = db
.rooms
.current_shortstatehash(&room_id)?
.expect("All rooms have state");
// These type is Option<Option<_>>. The outer Option is None when there is no event between let first_pdu_before_since = db
// since and the current room state, meaning there should be no updates. .rooms
// The inner Option is None when there is an event, but there is no state hash associated .pdus_until(&sender_user, &room_id, since)
// with it. This can happen for the RoomCreate event, so all updates should arrive. .next()
let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next(); .transpose()?;
let pdus_after_since = db let pdus_after_since = db
.rooms .rooms
@ -239,152 +256,18 @@ async fn sync_helper(
.next() .next()
.is_some(); .is_some();
let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { let since_shortstatehash = first_pdu_before_since
db.rooms
.pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id)
.ok()?
});
let (
heroes,
joined_member_count,
invited_member_count,
joined_since_last_sync,
state_events,
) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash {
let current_state = db.rooms.room_state_full(&room_id)?;
let current_members = current_state
.iter()
.filter(|(key, _)| key.0 == EventType::RoomMember)
.map(|(key, value)| (&key.1, value)) // Only keep state key
.collect::<Vec<_>>();
let encrypted_room = current_state
.get(&(EventType::RoomEncryption, "".to_owned()))
.is_some();
let since_state = since_shortstatehash
.as_ref() .as_ref()
.map(|since_shortstatehash| { .map(|pdu| {
since_shortstatehash db.rooms
.map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash)) .pdu_shortstatehash(&pdu.1.event_id)
.transpose() .transpose()
.expect("all pdus have state")
}) })
.transpose()?; .transpose()?;
let since_encryption = since_state.as_ref().map(|state| { // Calculates joined_member_count, invited_member_count and heroes
state let calculate_counts = || {
.as_ref()
.map(|state| state.get(&(EventType::RoomEncryption, "".to_owned())))
});
// Calculations:
let new_encrypted_room =
encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none());
let send_member_count = since_state.as_ref().map_or(true, |since_state| {
since_state.as_ref().map_or(true, |since_state| {
current_members.len()
!= since_state
.iter()
.filter(|(key, _)| key.0 == EventType::RoomMember)
.count()
})
});
let since_sender_member = since_state.as_ref().map(|since_state| {
since_state.as_ref().and_then(|state| {
state
.get(&(EventType::RoomMember, sender_user.as_str().to_owned()))
.and_then(|pdu| {
serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(pdu.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
})
})
});
if encrypted_room {
for (user_id, current_member) in current_members {
let current_membership = serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(current_member.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
let since_membership =
since_state
.as_ref()
.map_or(MembershipState::Leave, |since_state| {
since_state
.as_ref()
.and_then(|since_state| {
since_state
.get(&(EventType::RoomMember, user_id.clone()))
.and_then(|since_member| {
serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(
since_member.content.clone()
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid PDU in database.")
})
.ok()
})
})
.map_or(MembershipState::Leave, |member| member.membership)
});
let user_id = UserId::try_from(user_id.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
match (since_membership, current_membership) {
(MembershipState::Leave, MembershipState::Join) => {
// A new user joined an encrypted room
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
}
// TODO: Remove, this should never happen here, right?
(MembershipState::Join, MembershipState::Leave) => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
}
_ => {}
}
}
}
let joined_since_last_sync = since_sender_member.map_or(true, |member| {
member.map_or(true, |member| member.membership != MembershipState::Join)
});
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
db.rooms
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
}
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
let joined_member_count = db.rooms.room_members(&room_id).count(); let joined_member_count = db.rooms.room_members(&room_id).count();
let invited_member_count = db.rooms.room_members_invited(&room_id).count(); let invited_member_count = db.rooms.room_members_invited(&room_id).count();
@ -404,13 +287,10 @@ async fn sync_helper(
let content = serde_json::from_value::< let content = serde_json::from_value::<
ruma::events::room::member::MemberEventContent, ruma::events::room::member::MemberEventContent,
>(pdu.content.clone()) >(pdu.content.clone())
.map_err(|_| { .map_err(|_| Error::bad_database("Invalid member event in database."))?;
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
let user_id = let user_id = UserId::try_from(state_key.clone()).map_err(|_| {
UserId::try_from(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.") Error::bad_database("Invalid UserId in member PDU.")
})?; })?;
@ -447,36 +327,183 @@ async fn sync_helper(
Some(invited_member_count), Some(invited_member_count),
heroes, heroes,
) )
} else {
(None, None, Vec::new())
}; };
let state_events = if joined_since_last_sync { let (
current_state heroes,
joined_member_count,
invited_member_count,
joined_since_last_sync,
state_events,
) = if since_shortstatehash.is_none() {
// Probably since = 0, we will do an initial sync
let (joined_member_count, invited_member_count, heroes) = calculate_counts();
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let state_events = current_state_ids
.iter() .iter()
.map(|(_, pdu)| pdu.to_sync_state_event()) .map(|id| db.rooms.get_pdu(id))
.collect() .filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>();
(
heroes,
joined_member_count,
invited_member_count,
true,
state_events,
)
} else if !pdus_after_since || since_shortstatehash == Some(current_shortstatehash) {
// No state changes
(Vec::new(), None, None, false, Vec::new())
} else { } else {
match since_state { // Incremental /sync
None => Vec::new(), let since_shortstatehash = since_shortstatehash.unwrap();
Some(Some(since_state)) => current_state
let since_sender_member = db
.rooms
.state_get(
since_shortstatehash,
&EventType::RoomMember,
sender_user.as_str(),
)?
.and_then(|pdu| {
serde_json::from_value::<Raw<ruma::events::room::member::MemberEventContent>>(
pdu.content.clone(),
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let joined_since_last_sync = since_sender_member
.map_or(true, |member| member.membership != MembershipState::Join);
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
let state_events = if joined_since_last_sync {
current_state_ids
.iter() .iter()
.filter(|(key, value)| { .map(|id| db.rooms.get_pdu(id))
since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id) .filter_map(|r| r.ok().flatten())
}) .collect::<Vec<_>>()
.filter(|(_, value)| { } else {
!timeline_pdus.iter().any(|(_, timeline_pdu)| { current_state_ids
timeline_pdu.kind == value.kind .difference(&since_state_ids)
&& timeline_pdu.state_key == value.state_key .filter(|id| {
}) !timeline_pdus
})
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect(),
Some(None) => current_state
.iter() .iter()
.map(|(_, pdu)| pdu.to_sync_state_event()) .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id)
.collect(), })
.map(|id| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect()
};
let encrypted_room = db
.rooms
.state_get(current_shortstatehash, &EventType::RoomEncryption, "")?
.is_some();
let since_encryption =
db.rooms
.state_get(since_shortstatehash, &EventType::RoomEncryption, "")?;
// Calculations:
let new_encrypted_room = encrypted_room && since_encryption.is_none();
let send_member_count = state_events
.iter()
.any(|event| event.kind == EventType::RoomMember);
if encrypted_room {
for (user_id, current_member) in db
.rooms
.room_members(&room_id)
.filter_map(|r| r.ok())
.filter_map(|user_id| {
db.rooms
.state_get(
current_shortstatehash,
&EventType::RoomMember,
user_id.as_str(),
)
.ok()
.flatten()
.map(|current_member| (user_id, current_member))
})
{
let current_membership = serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(current_member.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
let since_membership = db
.rooms
.state_get(
since_shortstatehash,
&EventType::RoomMember,
user_id.as_str(),
)?
.and_then(|since_member| {
serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(since_member.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
})
.map_or(MembershipState::Leave, |member| member.membership);
let user_id = UserId::try_from(user_id.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
match (since_membership, current_membership) {
(MembershipState::Leave, MembershipState::Join) => {
// A new user joined an encrypted room
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
} }
}
// TODO: Remove, this should never happen here, right?
(MembershipState::Join, MembershipState::Leave) => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
}
_ => {}
}
}
}
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
db.rooms
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
}
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
calculate_counts()
} else {
(None, None, Vec::new())
}; };
( (
@ -486,8 +513,6 @@ async fn sync_helper(
joined_since_last_sync, joined_since_last_sync,
state_events, state_events,
) )
} else {
(Vec::new(), None, None, false, Vec::new())
}; };
// Look for device list updates in this room // Look for device list updates in this room
@ -578,7 +603,10 @@ async fn sync_helper(
events: room_events, events: room_events,
}, },
state: sync_events::State { state: sync_events::State {
events: state_events, events: state_events
.iter()
.map(|pdu| pdu.to_sync_state_event())
.collect(),
}, },
ephemeral: sync_events::Ephemeral { events: edus }, ephemeral: sync_events::Ephemeral { events: edus },
}; };
@ -625,6 +653,19 @@ async fn sync_helper(
let mut left_rooms = BTreeMap::new(); let mut left_rooms = BTreeMap::new();
for result in db.rooms.rooms_left(&sender_user) { for result in db.rooms.rooms_left(&sender_user) {
let (room_id, left_state_events) = result?; let (room_id, left_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
// Left before last sync // Left before last sync
@ -651,6 +692,19 @@ async fn sync_helper(
let mut invited_rooms = BTreeMap::new(); let mut invited_rooms = BTreeMap::new();
for result in db.rooms.rooms_invited(&sender_user) { for result in db.rooms.rooms_invited(&sender_user) {
let (room_id, invite_state_events) = result?; let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
// Invited before last sync // Invited before last sync

View File

@ -19,7 +19,9 @@ pub async fn send_event_to_device_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref(); let sender_device = body.sender_device.as_deref();
// TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved
// Check if this is a new transaction id // Check if this is a new transaction id
/*
if db if db
.transaction_ids .transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)? .existing_txnid(sender_user, sender_device, &body.txn_id)?
@ -27,6 +29,7 @@ pub async fn send_event_to_device_route(
{ {
return Ok(send_event_to_device::Response.into()); return Ok(send_event_to_device::Response.into());
} }
*/
for (target_user_id, map) in &body.messages { for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map { for (target_device_id_maybe, event) in map {

View File

@ -279,7 +279,7 @@ impl Database {
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
prevevent_parent: builder.open_tree("prevevent_parent")?, prevevent_parent: builder.open_tree("prevevent_parent")?,
pdu_cache: RwLock::new(LruCache::new(1_000_000)), pdu_cache: RwLock::new(LruCache::new(10_000)),
}, },
account_data: account_data::AccountData { account_data: account_data::AccountData {
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,

View File

@ -64,7 +64,7 @@ impl Tree for SledEngineTree {
backwards: bool, backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send> { ) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send> {
let iter = if backwards { let iter = if backwards {
self.0.range(..from) self.0.range(..=from)
} else { } else {
self.0.range(from..) self.0.range(from..)
}; };

View File

@ -10,7 +10,7 @@ use ruma::{
events::{room::message, EventType}, events::{room::message, EventType},
UserId, UserId,
}; };
use tokio::sync::{RwLock, RwLockReadGuard}; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
pub enum AdminCommand { pub enum AdminCommand {
RegisterAppservice(serde_yaml::Value), RegisterAppservice(serde_yaml::Value),
@ -48,15 +48,19 @@ impl Admin {
) )
.unwrap(); .unwrap();
if conduit_room.is_none() { let conduit_room = match conduit_room {
None => {
warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this.");
return;
} }
Some(r) => r,
};
drop(guard); drop(guard);
let send_message = let send_message = |message: message::MessageEventContent,
|message: message::MessageEventContent, guard: RwLockReadGuard<'_, Database>| { guard: RwLockReadGuard<'_, Database>,
if let Some(conduit_room) = &conduit_room { mutex_lock: &MutexGuard<'_, ()>| {
guard guard
.rooms .rooms
.build_and_append_pdu( .build_and_append_pdu(
@ -71,15 +75,24 @@ impl Admin {
&conduit_user, &conduit_user,
&conduit_room, &conduit_room,
&guard, &guard,
mutex_lock,
) )
.unwrap(); .unwrap();
}
}; };
loop { loop {
tokio::select! { tokio::select! {
Some(event) = receiver.next() => { Some(event) = receiver.next() => {
let guard = db.read().await; let guard = db.read().await;
let mutex = Arc::clone(
guard.globals
.roomid_mutex
.write()
.unwrap()
.entry(conduit_room.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
match event { match event {
AdminCommand::RegisterAppservice(yaml) => { AdminCommand::RegisterAppservice(yaml) => {
@ -93,15 +106,17 @@ impl Admin {
count, count,
appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ") appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ")
); );
send_message(message::MessageEventContent::text_plain(output), guard); send_message(message::MessageEventContent::text_plain(output), guard, &mutex_lock);
} else { } else {
send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard); send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &mutex_lock);
} }
} }
AdminCommand::SendMessage(message) => { AdminCommand::SendMessage(message) => {
send_message(message, guard) send_message(message, guard, &mutex_lock);
} }
} }
drop(mutex_lock);
} }
} }
} }

View File

@ -5,7 +5,7 @@ use ruma::{
client::r0::sync::sync_events, client::r0::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey}, federation::discovery::{ServerSigningKeys, VerifyKey},
}, },
DeviceId, EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId,
}; };
use rustls::{ServerCertVerifier, WebPKIVerifier}; use rustls::{ServerCertVerifier, WebPKIVerifier};
use std::{ use std::{
@ -16,7 +16,7 @@ use std::{
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::sync::{broadcast, watch::Receiver, Semaphore}; use tokio::sync::{broadcast, watch::Receiver, Mutex, Semaphore};
use trust_dns_resolver::TokioAsyncResolver; use trust_dns_resolver::TokioAsyncResolver;
use super::abstraction::Tree; use super::abstraction::Tree;
@ -45,6 +45,8 @@ pub struct Globals {
pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<BTreeMap<(UserId, Box<DeviceId>), SyncHandle>>, pub sync_receivers: RwLock<BTreeMap<(UserId, Box<DeviceId>), SyncHandle>>,
pub roomid_mutex: RwLock<BTreeMap<RoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_federation: RwLock<BTreeMap<RoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub rotate: RotationHandler, pub rotate: RotationHandler,
} }
@ -197,6 +199,8 @@ impl Globals {
bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
roomid_mutex: RwLock::new(BTreeMap::new()),
roomid_mutex_federation: RwLock::new(BTreeMap::new()),
sync_receivers: RwLock::new(BTreeMap::new()), sync_receivers: RwLock::new(BTreeMap::new()),
rotate: RotationHandler::new(), rotate: RotationHandler::new(),
}; };

View File

@ -2,6 +2,7 @@ mod edus;
pub use edus::RoomEdus; pub use edus::RoomEdus;
use member::MembershipState; use member::MembershipState;
use tokio::sync::MutexGuard;
use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result};
use log::{debug, error, warn}; use log::{debug, error, warn};
@ -21,7 +22,7 @@ use ruma::{
uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, BTreeSet, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
mem, mem,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
@ -89,7 +90,7 @@ pub struct Rooms {
impl Rooms { impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<Vec<EventId>> { pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeSet<EventId>> {
Ok(self Ok(self
.stateid_shorteventid .stateid_shorteventid
.scan_prefix(shortstatehash.to_be_bytes().to_vec()) .scan_prefix(shortstatehash.to_be_bytes().to_vec())
@ -666,11 +667,10 @@ impl Rooms {
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
count: u64,
pdu_id: &[u8],
leaves: &[EventId], leaves: &[EventId],
db: &Database, db: &Database,
) -> Result<()> { ) -> Result<Vec<u8>> {
// returns pdu id
// Make unsigned fields correct. This is not properly documented in the spec, but state // Make unsigned fields correct. This is not properly documented in the spec, but state
// events need to have previous content in the unsigned field, so clients can easily // events need to have previous content in the unsigned field, so clients can easily
// interpret things like membership changes // interpret things like membership changes
@ -708,20 +708,30 @@ impl Rooms {
self.replace_pdu_leaves(&pdu.room_id, leaves)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?;
let count1 = db.globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending // Mark as read first so the sending client doesn't get a notification even if appending
// fails // fails
self.edus self.edus
.private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?;
self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; self.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
let count2 = db.globals.next_count()?;
let mut pdu_id = pdu.room_id.as_bytes().to_vec();
pdu_id.push(0xff);
pdu_id.extend_from_slice(&count2.to_be_bytes());
// There's a brief moment of time here where the count is updated but the pdu does not
// exist. This could theoretically lead to dropped pdus, but it's extremely rare
self.pduid_pdu.insert( self.pduid_pdu.insert(
pdu_id, &pdu_id,
&serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"),
)?; )?;
// This also replaces the eventid of any outliers with the correct // This also replaces the eventid of any outliers with the correct
// pduid, removing the place holder. // pduid, removing the place holder.
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_pduid
.insert(pdu.event_id.as_bytes(), &pdu_id)?;
// See if the event matches any known pushers // See if the event matches any known pushers
for user in db for user in db
@ -909,7 +919,7 @@ impl Rooms {
_ => {} _ => {}
} }
Ok(()) Ok(pdu_id)
} }
pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
@ -1198,6 +1208,7 @@ impl Rooms {
sender: &UserId, sender: &UserId,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
) -> Result<EventId> { ) -> Result<EventId> {
let PduBuilder { let PduBuilder {
event_type, event_type,
@ -1206,7 +1217,7 @@ impl Rooms {
state_key, state_key,
redacts, redacts,
} = pdu_builder; } = pdu_builder;
// TODO: Make sure this isn't called twice in parallel
let prev_events = self let prev_events = self
.get_pdu_leaves(&room_id)? .get_pdu_leaves(&room_id)?
.into_iter() .into_iter()
@ -1354,11 +1365,9 @@ impl Rooms {
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = self.append_to_state(&pdu, &db.globals)?; let statehashid = self.append_to_state(&pdu, &db.globals)?;
self.append_pdu( let pdu_id = self.append_pdu(
&pdu, &pdu,
pdu_json, pdu_json,
count,
&pdu_id,
// Since this PDU references all pdu_leaves we can update the leaves // Since this PDU references all pdu_leaves we can update the leaves
// of the room // of the room
&[pdu.event_id.clone()], &[pdu.event_id.clone()],
@ -1495,7 +1504,7 @@ impl Rooms {
prefix.push(0xff); prefix.push(0xff);
let mut current = prefix.clone(); let mut current = prefix.clone();
current.extend_from_slice(&until.to_be_bytes()); current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until`
let current: &[u8] = &current; let current: &[u8] = &current;
@ -1782,6 +1791,16 @@ impl Rooms {
db, db,
)?; )?;
} else { } else {
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>( let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>(
self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -1809,6 +1828,7 @@ impl Rooms {
user_id, user_id,
room_id, room_id,
db, db,
&mutex_lock,
)?; )?;
} }

View File

@ -726,10 +726,9 @@ impl Users {
json.insert("sender".to_owned(), sender.to_string().into()); json.insert("sender".to_owned(), sender.to_string().into());
json.insert("content".to_owned(), content); json.insert("content".to_owned(), content);
self.todeviceid_events.insert( let value = serde_json::to_vec(&json).expect("Map::to_vec always works");
&key,
&serde_json::to_vec(&json).expect("Map::to_vec always works"), self.todeviceid_events.insert(&key, &value)?;
)?;
Ok(()) Ok(())
} }
@ -774,7 +773,7 @@ impl Users {
for (key, _) in self for (key, _) in self
.todeviceid_events .todeviceid_events
.iter_from(&last, true) .iter_from(&last, true) // this includes last
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(key, _)| { .map(|(key, _)| {
Ok::<_, Error>(( Ok::<_, Error>((

View File

@ -320,6 +320,7 @@ where
}), }),
Err(e) => { Err(e) => {
warn!("{:?}", e); warn!("{:?}", e);
// Bad Json
Failure((Status::new(583), ())) Failure((Status::new(583), ()))
} }
} }

View File

@ -46,7 +46,7 @@ use ruma::{
receipt::ReceiptType, receipt::ReceiptType,
serde::Raw, serde::Raw,
signatures::{CanonicalJsonObject, CanonicalJsonValue}, signatures::{CanonicalJsonObject, CanonicalJsonValue},
state_res::{self, Event, RoomVersion, StateMap}, state_res::{self, RoomVersion, StateMap},
to_device::DeviceIdOrAllDevices, to_device::DeviceIdOrAllDevices,
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UserId, ServerSigningKeyId, UserId,
@ -625,13 +625,44 @@ pub async fn send_transaction_message_route(
} }
}; };
// 0. Check the server is in the room
let room_id = match value
.get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok())
{
Some(id) => id,
None => {
// Event is invalid
resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_string()));
continue;
}
};
let mutex = Arc::clone(
db.globals
.roomid_mutex_federation
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let start_time = Instant::now(); let start_time = Instant::now();
resolved_map.insert( resolved_map.insert(
event_id.clone(), event_id.clone(),
handle_incoming_pdu(&body.origin, &event_id, value, true, &db, &pub_key_map) handle_incoming_pdu(
&body.origin,
&event_id,
&room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map(|_| ()), .map(|_| ()),
); );
drop(mutex_lock);
let elapsed = start_time.elapsed(); let elapsed = start_time.elapsed();
if elapsed > Duration::from_secs(1) { if elapsed > Duration::from_secs(1) {
@ -784,8 +815,8 @@ pub async fn send_transaction_message_route(
type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E>> + 'a + Send>>; type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E>> + 'a + Send>>;
/// When receiving an event one needs to: /// When receiving an event one needs to:
/// 0. Skip the PDU if we already know about it /// 0. Check the server is in the room
/// 1. Check the server is in the room /// 1. Skip the PDU if we already know about it
/// 2. Check signatures, otherwise drop /// 2. Check signatures, otherwise drop
/// 3. Check content hash, redact if doesn't match /// 3. Check content hash, redact if doesn't match
/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not
@ -810,6 +841,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E
pub fn handle_incoming_pdu<'a>( pub fn handle_incoming_pdu<'a>(
origin: &'a ServerName, origin: &'a ServerName,
event_id: &'a EventId, event_id: &'a EventId,
room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
db: &'a Database, db: &'a Database,
@ -817,24 +849,6 @@ pub fn handle_incoming_pdu<'a>(
) -> AsyncRecursiveResult<'a, Option<Vec<u8>>, String> { ) -> AsyncRecursiveResult<'a, Option<Vec<u8>>, String> {
Box::pin(async move { Box::pin(async move {
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// 0. Skip the PDU if we already have it as a timeline event
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) {
return Ok(Some(pdu_id.to_vec()));
}
// 1. Check the server is in the room
let room_id = match value
.get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok())
{
Some(id) => id,
None => {
// Event is invalid
return Err("Event needs a valid RoomId.".to_string());
}
};
match db.rooms.exists(&room_id) { match db.rooms.exists(&room_id) {
Ok(true) => {} Ok(true) => {}
_ => { _ => {
@ -842,6 +856,11 @@ pub fn handle_incoming_pdu<'a>(
} }
} }
// 1. Skip the PDU if we already have it as a timeline event
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) {
return Ok(Some(pdu_id.to_vec()));
}
// We go through all the signatures we see on the value and fetch the corresponding signing // We go through all the signatures we see on the value and fetch the corresponding signing
// keys // keys
fetch_required_signing_keys(&value, &pub_key_map, db) fetch_required_signing_keys(&value, &pub_key_map, db)
@ -901,7 +920,7 @@ pub fn handle_incoming_pdu<'a>(
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// EDIT: Step 5 is not applied anymore because it failed too often // EDIT: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events for {}", incoming_pdu.event_id); debug!("Fetching auth events for {}", incoming_pdu.event_id);
fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, pub_key_map) fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &room_id, pub_key_map)
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -1002,13 +1021,13 @@ pub fn handle_incoming_pdu<'a>(
if incoming_pdu.prev_events.len() == 1 { if incoming_pdu.prev_events.len() == 1 {
let prev_event = &incoming_pdu.prev_events[0]; let prev_event = &incoming_pdu.prev_events[0];
let state_vec = db let state = db
.rooms .rooms
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
.map_err(|_| "Failed talking to db".to_owned())? .map_err(|_| "Failed talking to db".to_owned())?
.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok())
.flatten(); .flatten();
if let Some(mut state_vec) = state_vec { if let Some(mut state) = state {
if db if db
.rooms .rooms
.get_pdu(prev_event) .get_pdu(prev_event)
@ -1018,10 +1037,16 @@ pub fn handle_incoming_pdu<'a>(
.state_key .state_key
.is_some() .is_some()
{ {
state_vec.push(prev_event.clone()); state.insert(prev_event.clone());
} }
state_at_incoming_event = Some( state_at_incoming_event = Some(
fetch_and_handle_events(db, origin, &state_vec, pub_key_map) fetch_and_handle_events(
db,
origin,
&state.into_iter().collect::<Vec<_>>(),
&room_id,
pub_key_map,
)
.await .await
.map_err(|_| "Failed to fetch state events locally".to_owned())? .map_err(|_| "Failed to fetch state events locally".to_owned())?
.into_iter() .into_iter()
@ -1059,8 +1084,14 @@ pub fn handle_incoming_pdu<'a>(
{ {
Ok(res) => { Ok(res) => {
debug!("Fetching state events at event."); debug!("Fetching state events at event.");
let state_vec = let state_vec = match fetch_and_handle_events(
match fetch_and_handle_events(&db, origin, &res.pdu_ids, pub_key_map).await &db,
origin,
&res.pdu_ids,
&room_id,
pub_key_map,
)
.await
{ {
Ok(state) => state, Ok(state) => state,
Err(_) => return Err("Failed to fetch state events.".to_owned()), Err(_) => return Err("Failed to fetch state events.".to_owned()),
@ -1090,7 +1121,13 @@ pub fn handle_incoming_pdu<'a>(
} }
debug!("Fetching auth chain events at event."); debug!("Fetching auth chain events at event.");
match fetch_and_handle_events(&db, origin, &res.auth_chain_ids, pub_key_map) match fetch_and_handle_events(
&db,
origin,
&res.auth_chain_ids,
&room_id,
pub_key_map,
)
.await .await
{ {
Ok(state) => state, Ok(state) => state,
@ -1219,18 +1256,10 @@ pub fn handle_incoming_pdu<'a>(
let mut auth_events = vec![]; let mut auth_events = vec![];
for map in &fork_states { for map in &fork_states {
let mut state_auth = vec![]; let state_auth = map
for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { .values()
match fetch_and_handle_events(&db, origin, &[auth_id.clone()], pub_key_map) .flat_map(|pdu| pdu.auth_events.clone())
.await .collect();
{
// This should always contain exactly one element when Ok
Ok(events) => state_auth.extend_from_slice(&events),
Err(e) => {
debug!("Event was not present: {}", e);
}
}
}
auth_events.push(state_auth); auth_events.push(state_auth);
} }
@ -1245,10 +1274,7 @@ pub fn handle_incoming_pdu<'a>(
.collect::<StateMap<_>>() .collect::<StateMap<_>>()
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
auth_events auth_events,
.into_iter()
.map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect())
.collect(),
&|id| { &|id| {
let res = db.rooms.get_pdu(id); let res = db.rooms.get_pdu(id);
if let Err(e) = &res { if let Err(e) = &res {
@ -1282,11 +1308,13 @@ pub fn handle_incoming_pdu<'a>(
pdu_id = Some( pdu_id = Some(
append_incoming_pdu( append_incoming_pdu(
&db, &db,
&room_id,
&incoming_pdu, &incoming_pdu,
val, val,
extremities, extremities,
&state_at_incoming_event, &state_at_incoming_event,
) )
.await
.map_err(|_| "Failed to add pdu to db.".to_owned())?, .map_err(|_| "Failed to add pdu to db.".to_owned())?,
); );
debug!("Appended incoming pdu."); debug!("Appended incoming pdu.");
@ -1324,6 +1352,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
db: &'a Database, db: &'a Database,
origin: &'a ServerName, origin: &'a ServerName,
events: &'a [EventId], events: &'a [EventId],
room_id: &'a RoomId,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> { ) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> {
Box::pin(async move { Box::pin(async move {
@ -1377,6 +1406,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
match handle_incoming_pdu( match handle_incoming_pdu(
origin, origin,
&event_id, &event_id,
&room_id,
value.clone(), value.clone(),
false, false,
db, db,
@ -1583,32 +1613,38 @@ pub(crate) async fn fetch_signing_keys(
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub(crate) fn append_incoming_pdu( async fn append_incoming_pdu(
db: &Database, db: &Database,
room_id: &RoomId,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,
new_room_leaves: HashSet<EventId>, new_room_leaves: HashSet<EventId>,
state: &StateMap<Arc<PduEvent>>, state: &StateMap<Arc<PduEvent>>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let count = db.globals.next_count()?; let mutex = Arc::clone(
let mut pdu_id = pdu.room_id.as_bytes().to_vec(); db.globals
pdu_id.push(0xff); .roomid_mutex
pdu_id.extend_from_slice(&count.to_be_bytes()); .write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// We append to state before appending the pdu, so we don't have a moment in time with the // We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
db.rooms db.rooms
.set_event_state(&pdu.event_id, state, &db.globals)?; .set_event_state(&pdu.event_id, state, &db.globals)?;
db.rooms.append_pdu( let pdu_id = db.rooms.append_pdu(
pdu, pdu,
pdu_json, pdu_json,
count,
&pdu_id,
&new_room_leaves.into_iter().collect::<Vec<_>>(), &new_room_leaves.into_iter().collect::<Vec<_>>(),
&db, &db,
)?; )?;
drop(mutex_lock);
for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) {
if let Some(namespaces) = appservice.1.get("namespaces") { if let Some(namespaces) = appservice.1.get("namespaces") {
let users = namespaces let users = namespaces
@ -1872,7 +1908,11 @@ pub fn get_room_state_ids_route(
"Pdu state not found.", "Pdu state not found.",
))?; ))?;
let pdu_ids = db.rooms.state_full_ids(shortstatehash)?; let pdu_ids = db
.rooms
.state_full_ids(shortstatehash)?
.into_iter()
.collect();
let mut auth_chain_ids = BTreeSet::<EventId>::new(); let mut auth_chain_ids = BTreeSet::<EventId>::new();
let mut todo = BTreeSet::new(); let mut todo = BTreeSet::new();
@ -2118,7 +2158,24 @@ pub async fn create_join_event_route(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
let pdu_id = handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) let mutex = Arc::clone(
db.globals
.roomid_mutex_federation
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let pdu_id = handle_incoming_pdu(
&origin,
&event_id,
&body.room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map_err(|_| { .map_err(|_| {
Error::BadRequest( Error::BadRequest(
@ -2130,6 +2187,7 @@ pub async fn create_join_event_route(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Could not accept incoming PDU as timeline event.", "Could not accept incoming PDU as timeline event.",
))?; ))?;
drop(mutex_lock);
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash)?;