2021-01-05 14:21:41 +00:00
|
|
|
use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma};
|
2020-10-05 20:19:22 +00:00
|
|
|
use get_profile_information::v1::ProfileField;
|
2020-09-23 10:03:08 +00:00
|
|
|
use http::header::{HeaderValue, AUTHORIZATION, HOST};
|
2020-12-22 17:45:35 +00:00
|
|
|
use log::{error, info, warn};
|
2020-08-14 09:29:32 +00:00
|
|
|
use rocket::{get, post, put, response::content::Json, State};
|
2020-09-12 20:41:33 +00:00
|
|
|
use ruma::{
|
|
|
|
api::{
|
|
|
|
federation::{
|
2020-09-25 10:26:29 +00:00
|
|
|
directory::{get_public_rooms, get_public_rooms_filtered},
|
2020-09-12 20:41:33 +00:00
|
|
|
discovery::{
|
2020-12-04 23:16:17 +00:00
|
|
|
get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys,
|
|
|
|
VerifyKey,
|
2020-09-12 20:41:33 +00:00
|
|
|
},
|
2021-01-05 14:21:41 +00:00
|
|
|
event::{get_event, get_missing_events, get_room_state_ids},
|
2020-10-05 20:19:22 +00:00
|
|
|
query::get_profile_information,
|
2020-09-12 20:41:33 +00:00
|
|
|
transactions::send_transaction_message,
|
2020-08-06 12:29:59 +00:00
|
|
|
},
|
2020-09-12 20:41:33 +00:00
|
|
|
OutgoingRequest,
|
2020-08-14 09:29:32 +00:00
|
|
|
},
|
2020-09-14 09:42:16 +00:00
|
|
|
directory::{IncomingFilter, IncomingRoomNetwork},
|
2021-01-12 13:26:52 +00:00
|
|
|
serde::to_canonical_value,
|
|
|
|
signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap},
|
2020-12-22 17:45:35 +00:00
|
|
|
EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId,
|
2020-05-26 08:27:51 +00:00
|
|
|
};
|
2021-01-14 19:39:56 +00:00
|
|
|
use state_res::{Event, EventMap, StateMap};
|
2020-04-22 18:55:11 +00:00
|
|
|
use std::{
|
2020-12-22 17:45:35 +00:00
|
|
|
collections::{BTreeMap, BTreeSet},
|
2021-01-15 02:32:22 +00:00
|
|
|
convert::TryFrom,
|
2020-08-14 09:31:31 +00:00
|
|
|
fmt::Debug,
|
2021-01-15 02:32:22 +00:00
|
|
|
future::Future,
|
2020-12-08 11:34:46 +00:00
|
|
|
net::{IpAddr, SocketAddr},
|
2021-01-15 02:32:22 +00:00
|
|
|
pin::Pin,
|
|
|
|
result::Result as StdResult,
|
2020-12-31 13:40:49 +00:00
|
|
|
sync::Arc,
|
2020-04-22 18:55:11 +00:00
|
|
|
time::{Duration, SystemTime},
|
|
|
|
};
|
2020-04-19 12:14:47 +00:00
|
|
|
|
2020-08-14 09:31:31 +00:00
|
|
|
pub async fn send_request<T: OutgoingRequest>(
|
2020-09-14 18:23:19 +00:00
|
|
|
globals: &crate::database::globals::Globals,
|
2021-01-14 19:39:56 +00:00
|
|
|
destination: &ServerName,
|
2020-04-19 12:14:47 +00:00
|
|
|
request: T,
|
2020-08-14 09:31:31 +00:00
|
|
|
) -> Result<T::IncomingResponse>
|
|
|
|
where
|
|
|
|
T: Debug,
|
|
|
|
{
|
2021-01-01 12:47:53 +00:00
|
|
|
if !globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-12-06 10:05:51 +00:00
|
|
|
let maybe_result = globals
|
|
|
|
.actual_destination_cache
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
2021-01-14 19:39:56 +00:00
|
|
|
.get(destination)
|
2020-12-06 10:05:51 +00:00
|
|
|
.cloned();
|
2020-09-23 10:03:08 +00:00
|
|
|
|
2020-12-06 10:05:51 +00:00
|
|
|
let (actual_destination, host) = if let Some(result) = maybe_result {
|
|
|
|
result
|
|
|
|
} else {
|
|
|
|
let result = find_actual_destination(globals, &destination).await;
|
|
|
|
globals
|
|
|
|
.actual_destination_cache
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
2021-01-14 19:39:56 +00:00
|
|
|
.insert(Box::<ServerName>::from(destination), result.clone());
|
2020-12-06 10:05:51 +00:00
|
|
|
result
|
|
|
|
};
|
2020-08-14 09:31:31 +00:00
|
|
|
|
|
|
|
let mut http_request = request
|
|
|
|
.try_into_http_request(&actual_destination, Some(""))
|
2020-09-15 06:55:02 +00:00
|
|
|
.map_err(|e| {
|
2020-11-15 21:48:43 +00:00
|
|
|
warn!("Failed to find destination {}: {}", actual_destination, e);
|
2020-09-15 06:55:02 +00:00
|
|
|
Error::BadServerResponse("Invalid destination")
|
|
|
|
})?;
|
2020-04-22 09:53:06 +00:00
|
|
|
|
2020-04-22 19:14:40 +00:00
|
|
|
let mut request_map = serde_json::Map::new();
|
2020-04-19 12:14:47 +00:00
|
|
|
|
2020-04-22 19:14:40 +00:00
|
|
|
if !http_request.body().is_empty() {
|
2020-04-25 09:47:32 +00:00
|
|
|
request_map.insert(
|
|
|
|
"content".to_owned(),
|
2020-09-15 06:55:02 +00:00
|
|
|
serde_json::from_slice(http_request.body())
|
|
|
|
.expect("body is valid json, we just created it"),
|
2020-04-25 09:47:32 +00:00
|
|
|
);
|
2020-04-22 19:14:40 +00:00
|
|
|
};
|
2020-04-19 12:14:47 +00:00
|
|
|
|
2020-04-22 09:53:06 +00:00
|
|
|
request_map.insert("method".to_owned(), T::METADATA.method.to_string().into());
|
2020-08-14 09:31:31 +00:00
|
|
|
request_map.insert(
|
|
|
|
"uri".to_owned(),
|
|
|
|
http_request
|
|
|
|
.uri()
|
|
|
|
.path_and_query()
|
|
|
|
.expect("all requests have a path")
|
|
|
|
.to_string()
|
|
|
|
.into(),
|
|
|
|
);
|
2020-09-15 06:16:20 +00:00
|
|
|
request_map.insert("origin".to_owned(), globals.server_name().as_str().into());
|
2020-09-14 09:00:31 +00:00
|
|
|
request_map.insert("destination".to_owned(), destination.as_str().into());
|
2020-04-22 19:14:40 +00:00
|
|
|
|
2020-10-27 23:10:09 +00:00
|
|
|
let mut request_json =
|
|
|
|
serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap");
|
|
|
|
|
2020-06-05 16:19:26 +00:00
|
|
|
ruma::signatures::sign_json(
|
2020-09-14 18:23:19 +00:00
|
|
|
globals.server_name().as_str(),
|
|
|
|
globals.keypair(),
|
2020-12-31 20:07:05 +00:00
|
|
|
&mut request_json,
|
2020-05-09 19:47:09 +00:00
|
|
|
)
|
2020-09-15 06:55:02 +00:00
|
|
|
.expect("our request json is what ruma expects");
|
2020-04-19 12:14:47 +00:00
|
|
|
|
2020-10-27 23:10:09 +00:00
|
|
|
let request_json: serde_json::Map<String, serde_json::Value> =
|
|
|
|
serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap();
|
|
|
|
|
2020-04-22 09:53:06 +00:00
|
|
|
let signatures = request_json["signatures"]
|
|
|
|
.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.values()
|
2020-08-14 09:31:31 +00:00
|
|
|
.map(|v| {
|
|
|
|
v.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.iter()
|
|
|
|
.map(|(k, v)| (k, v.as_str().unwrap()))
|
|
|
|
});
|
|
|
|
|
|
|
|
for signature_server in signatures {
|
|
|
|
for s in signature_server {
|
|
|
|
http_request.headers_mut().insert(
|
|
|
|
AUTHORIZATION,
|
|
|
|
HeaderValue::from_str(&format!(
|
|
|
|
"X-Matrix origin={},key=\"{}\",sig=\"{}\"",
|
2020-09-14 18:23:19 +00:00
|
|
|
globals.server_name(),
|
2020-08-14 09:31:31 +00:00
|
|
|
s.0,
|
|
|
|
s.1
|
|
|
|
))
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
}
|
2020-04-22 09:53:06 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 10:03:08 +00:00
|
|
|
if let Some(host) = host {
|
|
|
|
http_request
|
|
|
|
.headers_mut()
|
|
|
|
.insert(HOST, HeaderValue::from_str(&host).unwrap());
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
2020-08-14 09:31:31 +00:00
|
|
|
.expect("all http requests are valid reqwest requests");
|
|
|
|
|
2020-09-23 10:03:08 +00:00
|
|
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
|
|
|
|
2020-09-23 13:23:29 +00:00
|
|
|
let url = reqwest_request.url().clone();
|
2020-09-14 18:23:19 +00:00
|
|
|
let reqwest_response = globals.reqwest_client().execute(reqwest_request).await;
|
2020-04-22 09:53:06 +00:00
|
|
|
|
|
|
|
// Because reqwest::Response -> http::Response is complicated:
|
|
|
|
match reqwest_response {
|
|
|
|
Ok(mut reqwest_response) => {
|
|
|
|
let status = reqwest_response.status();
|
|
|
|
let mut http_response = http::Response::builder().status(status);
|
|
|
|
let headers = http_response.headers_mut().unwrap();
|
|
|
|
|
|
|
|
for (k, v) in reqwest_response.headers_mut().drain() {
|
|
|
|
if let Some(key) = k {
|
|
|
|
headers.insert(key, v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 20:03:43 +00:00
|
|
|
let status = reqwest_response.status();
|
|
|
|
|
2020-04-22 09:53:06 +00:00
|
|
|
let body = reqwest_response
|
|
|
|
.bytes()
|
|
|
|
.await
|
2020-10-21 14:08:54 +00:00
|
|
|
.unwrap_or_else(|e| {
|
2020-12-31 20:07:05 +00:00
|
|
|
warn!("server error {}", e);
|
2020-10-21 14:08:54 +00:00
|
|
|
Vec::new().into()
|
|
|
|
}) // TODO: handle timeout
|
2020-04-22 09:53:06 +00:00
|
|
|
.into_iter()
|
2020-12-05 20:03:43 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
if status != 200 {
|
2020-12-31 20:07:05 +00:00
|
|
|
info!(
|
|
|
|
"Server returned bad response {} {}\n{}\n{:?}",
|
2020-12-05 20:03:43 +00:00
|
|
|
destination,
|
|
|
|
status,
|
2020-12-31 20:07:05 +00:00
|
|
|
url,
|
2020-12-05 20:03:43 +00:00
|
|
|
utils::string_from_bytes(&body)
|
|
|
|
);
|
|
|
|
}
|
2020-09-12 19:30:07 +00:00
|
|
|
|
2020-09-15 06:55:02 +00:00
|
|
|
let response = T::IncomingResponse::try_from(
|
|
|
|
http_response
|
|
|
|
.body(body)
|
|
|
|
.expect("reqwest body is valid http body"),
|
|
|
|
);
|
2020-12-05 20:03:43 +00:00
|
|
|
response.map_err(|_| {
|
2020-12-31 20:07:05 +00:00
|
|
|
info!(
|
|
|
|
"Server returned invalid response bytes {}\n{}",
|
2020-12-05 20:03:43 +00:00
|
|
|
destination, url
|
2020-09-23 13:23:29 +00:00
|
|
|
);
|
2020-09-15 06:16:20 +00:00
|
|
|
Error::BadServerResponse("Server returned bad response.")
|
|
|
|
})
|
2020-04-22 09:53:06 +00:00
|
|
|
}
|
2020-08-14 09:31:31 +00:00
|
|
|
Err(e) => Err(e.into()),
|
2020-04-22 09:53:06 +00:00
|
|
|
}
|
2020-04-19 12:14:47 +00:00
|
|
|
}
|
2020-04-22 18:55:11 +00:00
|
|
|
|
2020-12-08 11:34:46 +00:00
|
|
|
fn get_ip_with_port(destination_str: String) -> Option<String> {
|
|
|
|
if destination_str.parse::<SocketAddr>().is_ok() {
|
|
|
|
Some(destination_str)
|
|
|
|
} else if let Ok(ip_addr) = destination_str.parse::<IpAddr>() {
|
|
|
|
Some(SocketAddr::new(ip_addr, 8448).to_string())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn add_port_to_hostname(destination_str: String) -> String {
|
|
|
|
match destination_str.find(':') {
|
|
|
|
None => destination_str.to_owned() + ":8448",
|
|
|
|
Some(_) => destination_str.to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-06 10:05:51 +00:00
|
|
|
/// Returns: actual_destination, host header
|
2020-12-08 11:34:46 +00:00
|
|
|
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
|
|
|
/// Numbers in comments below refer to bullet points in linked section of specification
|
2020-12-06 10:05:51 +00:00
|
|
|
async fn find_actual_destination(
|
|
|
|
globals: &crate::database::globals::Globals,
|
2021-01-05 14:21:41 +00:00
|
|
|
destination: &ServerName,
|
2020-12-06 10:05:51 +00:00
|
|
|
) -> (String, Option<String>) {
|
|
|
|
let mut host = None;
|
|
|
|
|
2020-12-08 11:34:46 +00:00
|
|
|
let destination_str = destination.as_str().to_owned();
|
2020-12-06 10:05:51 +00:00
|
|
|
let actual_destination = "https://".to_owned()
|
2020-12-08 11:34:46 +00:00
|
|
|
+ &match get_ip_with_port(destination_str.clone()) {
|
|
|
|
Some(host_port) => {
|
|
|
|
// 1: IP literal with provided or default port
|
|
|
|
host_port
|
2020-12-06 10:05:51 +00:00
|
|
|
}
|
2020-12-08 11:34:46 +00:00
|
|
|
None => {
|
|
|
|
if destination_str.find(':').is_some() {
|
|
|
|
// 2: Hostname with included port
|
|
|
|
destination_str
|
|
|
|
} else {
|
|
|
|
match request_well_known(globals, &destination.as_str()).await {
|
|
|
|
// 3: A .well-known file is available
|
|
|
|
Some(delegated_hostname) => {
|
|
|
|
match get_ip_with_port(delegated_hostname.clone()) {
|
|
|
|
Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file
|
|
|
|
None => {
|
|
|
|
if destination_str.find(':').is_some() {
|
|
|
|
// 3.2: Hostname with port in .well-known file
|
|
|
|
destination_str
|
|
|
|
} else {
|
|
|
|
match query_srv_record(globals, &delegated_hostname).await {
|
|
|
|
// 3.3: SRV lookup successful
|
|
|
|
Some(hostname) => hostname,
|
|
|
|
// 3.4: No SRV records, just use the hostname from .well-known
|
|
|
|
None => add_port_to_hostname(delegated_hostname),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// 4: No .well-known or an error occured
|
|
|
|
None => {
|
|
|
|
match query_srv_record(globals, &destination_str).await {
|
|
|
|
// 4: SRV record found
|
|
|
|
Some(hostname) => {
|
|
|
|
host = Some(destination_str.to_owned());
|
|
|
|
hostname
|
|
|
|
}
|
|
|
|
// 5: No SRV record found
|
|
|
|
None => add_port_to_hostname(destination_str.to_string()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-06 10:05:51 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
(actual_destination, host)
|
|
|
|
}
|
|
|
|
|
2021-01-14 19:39:56 +00:00
|
|
|
async fn query_srv_record(
|
2020-12-08 11:34:46 +00:00
|
|
|
globals: &crate::database::globals::Globals,
|
2021-01-14 19:39:56 +00:00
|
|
|
hostname: &str,
|
2020-12-08 11:34:46 +00:00
|
|
|
) -> Option<String> {
|
|
|
|
if let Ok(Some(host_port)) = globals
|
|
|
|
.dns_resolver()
|
|
|
|
.srv_lookup(format!("_matrix._tcp.{}", hostname))
|
|
|
|
.await
|
|
|
|
.map(|srv| {
|
|
|
|
srv.iter().next().map(|result| {
|
|
|
|
format!(
|
|
|
|
"{}:{}",
|
|
|
|
result.target().to_string().trim_end_matches('.'),
|
|
|
|
result.port().to_string()
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
{
|
|
|
|
Some(host_port)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn request_well_known(
|
|
|
|
globals: &crate::database::globals::Globals,
|
|
|
|
destination: &str,
|
|
|
|
) -> Option<String> {
|
|
|
|
let body: serde_json::Value = serde_json::from_str(
|
|
|
|
&globals
|
|
|
|
.reqwest_client()
|
|
|
|
.get(&format!(
|
|
|
|
"https://{}/.well-known/matrix/server",
|
|
|
|
destination
|
|
|
|
))
|
|
|
|
.send()
|
|
|
|
.await
|
|
|
|
.ok()?
|
|
|
|
.text()
|
|
|
|
.await
|
|
|
|
.ok()?,
|
|
|
|
)
|
|
|
|
.ok()?;
|
|
|
|
Some(body.get("m.server")?.as_str()?.to_owned())
|
|
|
|
}
|
|
|
|
|
2020-08-14 09:31:31 +00:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))]
|
2020-12-05 20:03:43 +00:00
|
|
|
pub fn get_server_version_route(
|
|
|
|
db: State<'_, Database>,
|
|
|
|
) -> ConduitResult<get_server_version::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-08-14 09:31:31 +00:00
|
|
|
Ok(get_server_version::Response {
|
2020-04-28 18:03:14 +00:00
|
|
|
server: Some(get_server_version::Server {
|
2020-04-22 18:55:11 +00:00
|
|
|
name: Some("Conduit".to_owned()),
|
|
|
|
version: Some(env!("CARGO_PKG_VERSION").to_owned()),
|
2020-04-28 18:03:14 +00:00
|
|
|
}),
|
2020-08-14 09:31:31 +00:00
|
|
|
}
|
|
|
|
.into())
|
2020-04-22 18:55:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-14 09:31:31 +00:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))]
|
2020-12-05 20:03:43 +00:00
|
|
|
pub fn get_server_keys_route(db: State<'_, Database>) -> Json<String> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-10-06 19:04:51 +00:00
|
|
|
// TODO: Use proper types
|
|
|
|
return Json("Federation is disabled.".to_owned());
|
|
|
|
}
|
|
|
|
|
2020-04-22 18:55:11 +00:00
|
|
|
let mut verify_keys = BTreeMap::new();
|
|
|
|
verify_keys.insert(
|
2020-12-04 23:16:17 +00:00
|
|
|
ServerSigningKeyId::try_from(
|
|
|
|
format!("ed25519:{}", db.globals.keypair().version()).as_str(),
|
|
|
|
)
|
|
|
|
.expect("found invalid server signing keys in DB"),
|
2020-08-14 09:31:31 +00:00
|
|
|
VerifyKey {
|
2020-05-03 15:25:31 +00:00
|
|
|
key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD),
|
2020-04-22 18:55:11 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
let mut response = serde_json::from_slice(
|
2020-08-14 09:31:31 +00:00
|
|
|
http::Response::try_from(get_server_keys::v2::Response {
|
2020-12-04 23:16:17 +00:00
|
|
|
server_key: ServerSigningKeys {
|
2020-08-14 09:31:31 +00:00
|
|
|
server_name: db.globals.server_name().to_owned(),
|
|
|
|
verify_keys,
|
|
|
|
old_verify_keys: BTreeMap::new(),
|
|
|
|
signatures: BTreeMap::new(),
|
|
|
|
valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2),
|
|
|
|
},
|
2020-04-22 18:55:11 +00:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
.body(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-11-15 21:48:43 +00:00
|
|
|
|
2020-06-05 16:19:26 +00:00
|
|
|
ruma::signatures::sign_json(
|
2020-08-14 09:31:31 +00:00
|
|
|
db.globals.server_name().as_str(),
|
2020-05-17 17:56:40 +00:00
|
|
|
db.globals.keypair(),
|
|
|
|
&mut response,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-11-15 21:48:43 +00:00
|
|
|
|
2020-10-27 23:10:09 +00:00
|
|
|
Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical"))
|
2020-04-22 18:55:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-14 09:31:31 +00:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))]
|
2020-12-05 20:03:43 +00:00
|
|
|
pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json<String> {
|
|
|
|
get_server_keys_route(db)
|
2020-04-22 18:55:11 +00:00
|
|
|
}
|
2020-08-14 09:29:32 +00:00
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
post("/_matrix/federation/v1/publicRooms", data = "<body>")
|
|
|
|
)]
|
2020-09-14 09:42:16 +00:00
|
|
|
pub async fn get_public_rooms_filtered_route(
|
|
|
|
db: State<'_, Database>,
|
|
|
|
body: Ruma<get_public_rooms_filtered::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_public_rooms_filtered::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 09:42:16 +00:00
|
|
|
let response = client_server::get_public_rooms_filtered_helper(
|
|
|
|
&db,
|
|
|
|
None,
|
|
|
|
body.limit,
|
|
|
|
body.since.as_deref(),
|
|
|
|
&body.filter,
|
|
|
|
&body.room_network,
|
|
|
|
)
|
|
|
|
.await?
|
|
|
|
.0;
|
|
|
|
|
|
|
|
Ok(get_public_rooms_filtered::v1::Response {
|
|
|
|
chunk: response
|
|
|
|
.chunk
|
|
|
|
.into_iter()
|
|
|
|
.map(|c| {
|
|
|
|
// Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk
|
|
|
|
// to ruma::api::client::r0::directory::PublicRoomsChunk
|
|
|
|
Ok::<_, Error>(
|
|
|
|
serde_json::from_str(
|
|
|
|
&serde_json::to_string(&c)
|
|
|
|
.expect("PublicRoomsChunk::to_string always works"),
|
|
|
|
)
|
|
|
|
.expect("federation and client-server PublicRoomsChunk are the same type"),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.collect(),
|
|
|
|
prev_batch: response.prev_batch,
|
|
|
|
next_batch: response.next_batch,
|
|
|
|
total_room_count_estimate: response.total_room_count_estimate,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/publicRooms", data = "<body>")
|
|
|
|
)]
|
2020-08-14 09:29:32 +00:00
|
|
|
pub async fn get_public_rooms_route(
|
|
|
|
db: State<'_, Database>,
|
2020-09-08 15:32:03 +00:00
|
|
|
body: Ruma<get_public_rooms::v1::Request<'_>>,
|
2020-08-14 09:29:32 +00:00
|
|
|
) -> ConduitResult<get_public_rooms::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 09:42:16 +00:00
|
|
|
let response = client_server::get_public_rooms_filtered_helper(
|
2020-08-23 12:32:43 +00:00
|
|
|
&db,
|
|
|
|
None,
|
2020-09-14 09:42:16 +00:00
|
|
|
body.limit,
|
|
|
|
body.since.as_deref(),
|
|
|
|
&IncomingFilter::default(),
|
|
|
|
&IncomingRoomNetwork::Matrix,
|
2020-08-14 09:29:32 +00:00
|
|
|
)
|
|
|
|
.await?
|
|
|
|
.0;
|
|
|
|
|
|
|
|
Ok(get_public_rooms::v1::Response {
|
2020-09-14 09:42:16 +00:00
|
|
|
chunk: response
|
|
|
|
.chunk
|
2020-08-14 09:29:32 +00:00
|
|
|
.into_iter()
|
|
|
|
.map(|c| {
|
|
|
|
// Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk
|
|
|
|
// to ruma::api::client::r0::directory::PublicRoomsChunk
|
|
|
|
Ok::<_, Error>(
|
|
|
|
serde_json::from_str(
|
|
|
|
&serde_json::to_string(&c)
|
|
|
|
.expect("PublicRoomsChunk::to_string always works"),
|
|
|
|
)
|
|
|
|
.expect("federation and client-server PublicRoomsChunk are the same type"),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.collect(),
|
2020-09-14 09:42:16 +00:00
|
|
|
prev_batch: response.prev_batch,
|
|
|
|
next_batch: response.next_batch,
|
|
|
|
total_room_count_estimate: response.total_room_count_estimate,
|
2020-08-14 09:29:32 +00:00
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2020-12-22 17:45:35 +00:00
|
|
|
#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
|
|
|
|
pub enum PrevEvents<T> {
|
|
|
|
Sequential(T),
|
|
|
|
Fork(Vec<T>),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> IntoIterator for PrevEvents<T> {
|
|
|
|
type Item = T;
|
|
|
|
type IntoIter = std::vec::IntoIter<Self::Item>;
|
|
|
|
|
|
|
|
fn into_iter(self) -> Self::IntoIter {
|
|
|
|
match self {
|
|
|
|
Self::Sequential(item) => vec![item].into_iter(),
|
|
|
|
Self::Fork(list) => list.into_iter(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: Clone> PrevEvents<T> {
|
|
|
|
pub fn new(id: &[T]) -> Self {
|
|
|
|
match id {
|
|
|
|
[] => panic!("All events must have previous event"),
|
|
|
|
[single_id] => Self::Sequential(single_id.clone()),
|
|
|
|
rest => Self::Fork(rest.to_vec()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-14 09:29:32 +00:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
put("/_matrix/federation/v1/send/<_>", data = "<body>")
|
|
|
|
)]
|
2020-10-27 23:10:09 +00:00
|
|
|
pub async fn send_transaction_message_route<'a>(
|
2020-09-12 20:41:33 +00:00
|
|
|
db: State<'a, Database>,
|
2020-09-08 15:32:03 +00:00
|
|
|
body: Ruma<send_transaction_message::v1::Request<'_>>,
|
2020-08-14 09:29:32 +00:00
|
|
|
) -> ConduitResult<send_transaction_message::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2021-01-12 13:26:52 +00:00
|
|
|
dbg!(&*body);
|
|
|
|
|
2020-11-08 19:46:26 +00:00
|
|
|
for edu in &body.edus {
|
|
|
|
match serde_json::from_str::<send_transaction_message::v1::Edu>(edu.json().get()) {
|
|
|
|
Ok(edu) => match edu.edu_type.as_str() {
|
|
|
|
"m.typing" => {
|
|
|
|
if let Some(typing) = edu.content.get("typing") {
|
|
|
|
if typing.as_bool().unwrap_or_default() {
|
|
|
|
db.rooms.edus.typing_add(
|
|
|
|
&UserId::try_from(edu.content["user_id"].as_str().unwrap())
|
|
|
|
.unwrap(),
|
|
|
|
&RoomId::try_from(edu.content["room_id"].as_str().unwrap())
|
|
|
|
.unwrap(),
|
|
|
|
3000 + utils::millis_since_unix_epoch(),
|
|
|
|
&db.globals,
|
|
|
|
)?;
|
|
|
|
} else {
|
|
|
|
db.rooms.edus.typing_remove(
|
|
|
|
&UserId::try_from(edu.content["user_id"].as_str().unwrap())
|
|
|
|
.unwrap(),
|
|
|
|
&RoomId::try_from(edu.content["room_id"].as_str().unwrap())
|
|
|
|
.unwrap(),
|
|
|
|
&db.globals,
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"m.presence" => {}
|
|
|
|
"m.receipt" => {}
|
2021-01-12 13:26:52 +00:00
|
|
|
"m.device_list_update" => {}
|
2020-11-08 19:46:26 +00:00
|
|
|
_ => {}
|
|
|
|
},
|
2020-12-04 22:16:29 +00:00
|
|
|
Err(_err) => {
|
2020-11-08 18:49:02 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-11-08 19:46:26 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-08 18:54:59 +00:00
|
|
|
|
2020-11-11 19:30:12 +00:00
|
|
|
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?
|
2020-11-08 18:49:02 +00:00
|
|
|
// SPEC:
|
|
|
|
// Servers MUST strictly enforce the JSON format specified in the appendices.
|
|
|
|
// This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of
|
|
|
|
// events over federation. For example, the Federation API's /send endpoint would
|
|
|
|
// discard the event whereas the Client Server API's /send/{eventType} endpoint
|
|
|
|
// would return a M_BAD_JSON error.
|
2020-11-08 19:44:02 +00:00
|
|
|
let mut resolved_map = BTreeMap::new();
|
2021-01-15 20:46:47 +00:00
|
|
|
'main_pdu_loop: for pdu in &body.pdus {
|
2020-12-22 17:45:35 +00:00
|
|
|
// 1. Is a valid event, otherwise it is dropped.
|
|
|
|
// Ruma/PduEvent/StateEvent satisfies this
|
2021-01-12 13:26:52 +00:00
|
|
|
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
2021-01-14 19:39:56 +00:00
|
|
|
let (event_id, value) = crate::pdu::gen_event_id_canonical_json(pdu);
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
// If we have no idea about this room skip the PDU
|
|
|
|
let room_id = match value
|
|
|
|
.get("room_id")
|
|
|
|
.map(|id| match id {
|
|
|
|
CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(),
|
|
|
|
_ => None,
|
|
|
|
})
|
|
|
|
.flatten()
|
|
|
|
{
|
|
|
|
Some(id) => id,
|
|
|
|
None => {
|
|
|
|
resolved_map.insert(event_id, Err("Event needs a valid RoomId".to_string()));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if !db.rooms.exists(&room_id)? {
|
|
|
|
resolved_map.insert(event_id, Err("Room is unknown to this server".to_string()));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-01-14 19:39:56 +00:00
|
|
|
let server_name = &body.body.origin;
|
2020-12-22 17:45:35 +00:00
|
|
|
let mut pub_key_map = BTreeMap::new();
|
2021-01-12 13:26:52 +00:00
|
|
|
if let Some(sig) = value.get("signatures") {
|
|
|
|
match sig {
|
|
|
|
CanonicalJsonValue::Object(entity) => {
|
|
|
|
for key in entity.keys() {
|
|
|
|
// TODO: save this in a DB maybe...
|
|
|
|
// fetch the public signing key
|
2021-01-15 02:32:22 +00:00
|
|
|
let origin = <&ServerName>::try_from(key.as_str()).unwrap();
|
|
|
|
let keys = fetch_signing_keys(&db, origin).await?;
|
2021-01-12 13:26:52 +00:00
|
|
|
|
|
|
|
pub_key_map.insert(
|
2021-01-15 02:32:22 +00:00
|
|
|
origin.to_string(),
|
|
|
|
keys.into_iter()
|
2021-01-12 13:26:52 +00:00
|
|
|
.map(|(k, v)| (k.to_string(), v.key))
|
|
|
|
.collect(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
resolved_map.insert(
|
|
|
|
event_id,
|
|
|
|
Err("`signatures` is not a JSON object".to_string()),
|
|
|
|
);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string()));
|
|
|
|
continue;
|
|
|
|
}
|
2020-12-22 17:45:35 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
// TODO: make this persist but not a DB Tree...
|
|
|
|
// This is all the auth_events that have been recursively fetched so they don't have to be
|
|
|
|
// deserialized over and over again. This could potentially also be some sort of trie (suffix tree)
|
|
|
|
// like structure so that once an auth event is known it would know (using indexes maybe) all of
|
|
|
|
// the auth events that it references.
|
|
|
|
let mut auth_cache = EventMap::new();
|
|
|
|
|
|
|
|
// 1. check the server is in the room (optional)
|
|
|
|
// 2. check content hash, redact if doesn't match
|
|
|
|
// 3. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
|
|
|
|
// 4. reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
|
|
|
|
// 5. reject "due to auth events" if the event doesn't pass auth based on the auth events
|
|
|
|
// 6. persist this event as an outlier
|
|
|
|
// 7. if not timeline event: stop
|
|
|
|
let pdu = match validate_event(
|
|
|
|
&db,
|
|
|
|
value,
|
|
|
|
event_id.clone(),
|
|
|
|
&pub_key_map,
|
|
|
|
server_name,
|
|
|
|
// All the auth events gathered will be here
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
2021-01-03 22:26:17 +00:00
|
|
|
Ok(pdu) => pdu,
|
|
|
|
Err(e) => {
|
|
|
|
resolved_map.insert(event_id, Err(e));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
2020-11-08 19:44:02 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
let pdu = Arc::new(pdu.clone());
|
2021-01-12 13:26:52 +00:00
|
|
|
// Fetch any unknown prev_events or retrieve them from the DB
|
2021-01-15 02:32:22 +00:00
|
|
|
let previous = match fetch_events(
|
|
|
|
&db,
|
|
|
|
server_name,
|
|
|
|
&pub_key_map,
|
|
|
|
&pdu.prev_events,
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(mut evs) if evs.len() == 1 => Some(evs.remove(0)),
|
2021-01-14 19:39:56 +00:00
|
|
|
_ => None,
|
|
|
|
};
|
2020-12-22 17:45:35 +00:00
|
|
|
|
2021-01-15 20:46:47 +00:00
|
|
|
// [auth_cache] At this point we have the auth chain of the incoming event.
|
2021-01-15 02:32:22 +00:00
|
|
|
let mut event_map: state_res::EventMap<Arc<PduEvent>> = auth_cache
|
2021-01-03 22:26:17 +00:00
|
|
|
.iter()
|
2021-01-15 02:32:22 +00:00
|
|
|
.map(|(k, v)| (k.clone(), v.clone()))
|
2021-01-03 22:26:17 +00:00
|
|
|
.collect();
|
|
|
|
|
2021-01-12 13:26:52 +00:00
|
|
|
// Check that the event passes auth based on the auth_events
|
|
|
|
let is_authed = state_res::event_auth::auth_check(
|
2020-12-22 17:45:35 +00:00
|
|
|
&RoomVersionId::Version6,
|
2021-01-15 02:32:22 +00:00
|
|
|
&pdu,
|
2020-12-22 17:45:35 +00:00
|
|
|
previous.clone(),
|
2021-01-12 13:26:52 +00:00
|
|
|
&pdu.auth_events
|
|
|
|
.iter()
|
|
|
|
.map(|id| {
|
2021-01-15 20:46:47 +00:00
|
|
|
auth_cache
|
2021-01-12 13:26:52 +00:00
|
|
|
.get(id)
|
|
|
|
.map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone()))
|
|
|
|
.ok_or_else(|| {
|
|
|
|
Error::Conflict(
|
|
|
|
"Auth event not found, event failed recursive auth checks.",
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Result<BTreeMap<_, _>>>()?,
|
2021-01-03 22:26:17 +00:00
|
|
|
None, // TODO: third party invite
|
2020-12-22 17:45:35 +00:00
|
|
|
)
|
2021-01-12 13:26:52 +00:00
|
|
|
.map_err(|_e| Error::Conflict("Auth check failed"))?;
|
2021-01-15 02:32:22 +00:00
|
|
|
|
2021-01-12 13:26:52 +00:00
|
|
|
if !is_authed {
|
2020-12-22 17:45:35 +00:00
|
|
|
resolved_map.insert(
|
2021-01-15 02:32:22 +00:00
|
|
|
pdu.event_id().clone(),
|
2020-12-22 17:45:35 +00:00
|
|
|
Err("Event has failed auth check with auth events".into()),
|
|
|
|
);
|
|
|
|
continue;
|
|
|
|
}
|
2021-01-06 13:52:30 +00:00
|
|
|
// End of step 4.
|
|
|
|
|
|
|
|
// Step 5. event passes auth based on state at the event
|
|
|
|
let (state_at_event, incoming_auth_events): (StateMap<Arc<PduEvent>>, Vec<Arc<PduEvent>>) =
|
|
|
|
match db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
2021-01-14 19:39:56 +00:00
|
|
|
server_name,
|
2021-01-06 13:52:30 +00:00
|
|
|
get_room_state_ids::v1::Request {
|
|
|
|
room_id: pdu.room_id(),
|
|
|
|
event_id: pdu.event_id(),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(res) => {
|
2021-01-15 02:32:22 +00:00
|
|
|
let state = fetch_events(
|
|
|
|
&db,
|
|
|
|
server_name,
|
|
|
|
&pub_key_map,
|
|
|
|
&res.pdu_ids,
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await?;
|
2021-01-06 13:52:30 +00:00
|
|
|
// Sanity check: there are no conflicting events in the state we received
|
|
|
|
let mut seen = BTreeSet::new();
|
|
|
|
for ev in &state {
|
|
|
|
// If the key is already present
|
|
|
|
if !seen.insert((&ev.kind, &ev.state_key)) {
|
|
|
|
todo!("Server sent us an invalid state")
|
|
|
|
}
|
2021-01-05 14:21:41 +00:00
|
|
|
}
|
2021-02-06 14:27:43 +00:00
|
|
|
|
2021-01-06 13:52:30 +00:00
|
|
|
let state = state
|
|
|
|
.into_iter()
|
2021-01-15 02:32:22 +00:00
|
|
|
.map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu))
|
2021-01-06 13:52:30 +00:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
(
|
|
|
|
state,
|
2021-01-15 02:32:22 +00:00
|
|
|
fetch_events(
|
|
|
|
&db,
|
|
|
|
server_name,
|
|
|
|
&pub_key_map,
|
|
|
|
&res.auth_chain_ids,
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await?
|
|
|
|
.into_iter()
|
|
|
|
.collect(),
|
2021-01-06 13:52:30 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
resolved_map.insert(
|
2021-01-15 02:32:22 +00:00
|
|
|
pdu.event_id().clone(),
|
2021-01-06 13:52:30 +00:00
|
|
|
Err("Fetching state for event failed".into()),
|
|
|
|
);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
2020-12-22 17:45:35 +00:00
|
|
|
|
|
|
|
if !state_res::event_auth::auth_check(
|
|
|
|
&RoomVersionId::Version6,
|
2021-01-15 02:32:22 +00:00
|
|
|
&pdu,
|
2020-12-22 17:45:35 +00:00
|
|
|
previous.clone(),
|
2021-01-05 14:21:41 +00:00
|
|
|
&state_at_event,
|
|
|
|
None, // TODO: third party invite
|
2020-12-22 17:45:35 +00:00
|
|
|
)
|
|
|
|
.map_err(|_e| Error::Conflict("Auth check failed"))?
|
|
|
|
{
|
|
|
|
// Event failed auth with state_at
|
|
|
|
resolved_map.insert(
|
2021-01-15 02:32:22 +00:00
|
|
|
event_id,
|
2020-12-22 17:45:35 +00:00
|
|
|
Err("Event has failed auth check with state at the event".into()),
|
|
|
|
);
|
|
|
|
continue;
|
2020-12-08 09:33:44 +00:00
|
|
|
}
|
2021-01-06 13:52:30 +00:00
|
|
|
// End of step 5.
|
2020-11-08 19:44:02 +00:00
|
|
|
|
2020-12-22 17:45:35 +00:00
|
|
|
// Gather the forward extremities and resolve
|
2021-01-15 20:46:47 +00:00
|
|
|
let fork_states = match forward_extremity_ids(
|
|
|
|
&db,
|
|
|
|
&pdu,
|
|
|
|
server_name,
|
|
|
|
&pub_key_map,
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
2021-01-15 02:32:22 +00:00
|
|
|
Ok(states) => states,
|
|
|
|
Err(_) => {
|
|
|
|
resolved_map.insert(event_id, Err("Failed to gather forward extremities".into()));
|
|
|
|
continue;
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
2021-01-15 02:32:22 +00:00
|
|
|
};
|
2020-12-22 17:45:35 +00:00
|
|
|
|
2021-01-06 13:52:30 +00:00
|
|
|
// Step 6. event passes auth based on state of all forks and current room state
|
2020-12-22 17:45:35 +00:00
|
|
|
let state_at_forks = if fork_states.is_empty() {
|
|
|
|
// State is empty
|
|
|
|
Default::default()
|
|
|
|
} else if fork_states.len() == 1 {
|
|
|
|
fork_states[0].clone()
|
|
|
|
} else {
|
2021-01-15 20:46:47 +00:00
|
|
|
// TODO: remove this is for current debugging Jan, 15 2021
|
|
|
|
let mut number_fetches = 0_u32;
|
2021-01-15 02:32:22 +00:00
|
|
|
let mut auth_events = vec![];
|
|
|
|
// this keeps track if we error so we can break out of these inner loops
|
|
|
|
// to continue on with the incoming PDU's
|
|
|
|
for map in &fork_states {
|
|
|
|
let mut state_auth = vec![];
|
2021-01-15 20:46:47 +00:00
|
|
|
for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) {
|
|
|
|
let event = match auth_cache.get(auth_id) {
|
2021-01-15 02:32:22 +00:00
|
|
|
Some(aev) => aev.clone(),
|
|
|
|
// We should know about every event at this point but just incase...
|
|
|
|
None => match fetch_events(
|
|
|
|
&db,
|
|
|
|
server_name,
|
|
|
|
&pub_key_map,
|
2021-01-15 20:46:47 +00:00
|
|
|
&[auth_id.clone()],
|
2021-01-15 02:32:22 +00:00
|
|
|
&mut auth_cache,
|
2021-01-06 20:05:09 +00:00
|
|
|
)
|
2021-01-15 02:32:22 +00:00
|
|
|
.await
|
2021-01-15 20:46:47 +00:00
|
|
|
.map(|mut vec| {
|
|
|
|
number_fetches += 1;
|
|
|
|
vec.remove(0)
|
|
|
|
}) {
|
|
|
|
Ok(aev) => aev,
|
2021-01-15 02:32:22 +00:00
|
|
|
Err(_) => {
|
|
|
|
resolved_map.insert(
|
|
|
|
event_id.clone(),
|
|
|
|
Err("Event has been soft failed".into()),
|
|
|
|
);
|
2021-01-15 20:46:47 +00:00
|
|
|
continue 'main_pdu_loop;
|
2021-01-15 02:32:22 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
state_auth.push(event);
|
|
|
|
}
|
|
|
|
auth_events.push(state_auth);
|
|
|
|
}
|
2021-01-15 20:46:47 +00:00
|
|
|
info!("{} event's were not in the auth_cache", number_fetches);
|
2021-01-06 13:52:30 +00:00
|
|
|
|
2021-01-06 20:05:09 +00:00
|
|
|
// Add everything we will need to event_map
|
|
|
|
event_map.extend(
|
|
|
|
auth_events
|
|
|
|
.iter()
|
|
|
|
.map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone())))
|
|
|
|
.flatten(),
|
|
|
|
);
|
2021-01-03 22:26:17 +00:00
|
|
|
event_map.extend(
|
|
|
|
incoming_auth_events
|
|
|
|
.into_iter()
|
2021-01-06 13:52:30 +00:00
|
|
|
.map(|pdu| (pdu.event_id().clone(), pdu)),
|
2021-01-03 22:26:17 +00:00
|
|
|
);
|
|
|
|
event_map.extend(
|
|
|
|
state_at_event
|
|
|
|
.into_iter()
|
|
|
|
.map(|(_, pdu)| (pdu.event_id().clone(), pdu)),
|
|
|
|
);
|
|
|
|
|
2020-12-22 17:45:35 +00:00
|
|
|
match state_res::StateResolution::resolve(
|
|
|
|
&pdu.room_id,
|
|
|
|
&RoomVersionId::Version6,
|
|
|
|
&fork_states
|
|
|
|
.into_iter()
|
|
|
|
.map(|map| {
|
|
|
|
map.into_iter()
|
2020-12-31 13:40:49 +00:00
|
|
|
.map(|(k, v)| (k, v.event_id.clone()))
|
2020-12-22 17:45:35 +00:00
|
|
|
.collect::<StateMap<_>>()
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>(),
|
2021-01-06 20:05:09 +00:00
|
|
|
auth_events
|
|
|
|
.into_iter()
|
|
|
|
.map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect())
|
|
|
|
.collect(),
|
2021-01-03 22:26:17 +00:00
|
|
|
&mut event_map,
|
2020-12-22 17:45:35 +00:00
|
|
|
) {
|
|
|
|
Ok(res) => res
|
|
|
|
.into_iter()
|
2020-12-31 13:40:49 +00:00
|
|
|
.map(|(k, v)| (k, Arc::new(db.rooms.get_pdu(&v).unwrap().unwrap())))
|
2020-12-22 17:45:35 +00:00
|
|
|
.collect(),
|
2021-01-15 20:46:47 +00:00
|
|
|
Err(_) => {
|
|
|
|
resolved_map.insert(
|
|
|
|
pdu.event_id().clone(),
|
|
|
|
Err("State resolution failed, either an event could not be found or deserialization".into()),
|
|
|
|
);
|
|
|
|
continue 'main_pdu_loop;
|
|
|
|
}
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if !state_res::event_auth::auth_check(
|
|
|
|
&RoomVersionId::Version6,
|
2021-01-15 02:32:22 +00:00
|
|
|
&pdu,
|
2020-12-22 17:45:35 +00:00
|
|
|
previous,
|
2021-01-05 14:21:41 +00:00
|
|
|
&state_at_forks,
|
2020-12-22 17:45:35 +00:00
|
|
|
None,
|
|
|
|
)
|
|
|
|
.map_err(|_e| Error::Conflict("Auth check failed"))?
|
|
|
|
{
|
2021-01-15 02:32:22 +00:00
|
|
|
// Soft fail, we add the event as an outlier.
|
2020-12-31 13:40:49 +00:00
|
|
|
resolved_map.insert(
|
2021-01-15 02:32:22 +00:00
|
|
|
pdu.event_id().clone(),
|
2020-12-31 13:40:49 +00:00
|
|
|
Err("Event has been soft failed".into()),
|
|
|
|
);
|
2020-12-22 17:45:35 +00:00
|
|
|
} else {
|
|
|
|
append_state(&db, &pdu)?;
|
|
|
|
// Event has passed all auth/stateres checks
|
2021-01-15 02:32:22 +00:00
|
|
|
resolved_map.insert(pdu.event_id().clone(), Ok(()));
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
2020-09-12 20:41:33 +00:00
|
|
|
}
|
2020-11-08 19:44:02 +00:00
|
|
|
|
2021-01-12 13:26:52 +00:00
|
|
|
Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into())
|
2020-08-14 09:29:32 +00:00
|
|
|
}
|
2020-09-25 10:26:29 +00:00
|
|
|
|
2021-01-15 20:46:47 +00:00
|
|
|
/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events
|
2021-01-15 02:32:22 +00:00
|
|
|
/// Validate any event that is given to us by another server.
|
|
|
|
///
|
|
|
|
/// 1. Is a valid event, otherwise it is dropped (PduEvent deserialization satisfies this).
|
|
|
|
/// 2. Passes signature checks, otherwise event is dropped.
|
|
|
|
/// 3. Passes hash checks, otherwise it is redacted before being processed further.
|
|
|
|
/// 4. Passes auth_chain collection (we can gather the events that auth this event recursively).
|
|
|
|
/// 5. Once the event has passed all checks it can be added as an outlier to the DB.
|
|
|
|
fn validate_event<'a>(
|
|
|
|
db: &'a Database,
|
2021-01-14 19:39:56 +00:00
|
|
|
value: CanonicalJsonObject,
|
|
|
|
event_id: EventId,
|
2021-01-15 02:32:22 +00:00
|
|
|
pub_key_map: &'a PublicKeyMap,
|
|
|
|
origin: &'a ServerName,
|
|
|
|
auth_cache: &'a mut EventMap<Arc<PduEvent>>,
|
|
|
|
) -> Pin<Box<dyn Future<Output = StdResult<PduEvent, String>> + 'a + Send>> {
|
|
|
|
Box::pin(async move {
|
|
|
|
let mut val = signature_and_hash_check(&pub_key_map, value)?;
|
2021-01-14 19:39:56 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
// Now that we have checked the signature and hashes we can add the eventID and convert
|
|
|
|
// to our PduEvent type also finally verifying the first step listed above
|
|
|
|
val.insert(
|
|
|
|
"event_id".to_owned(),
|
|
|
|
to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"),
|
|
|
|
);
|
|
|
|
let pdu = serde_json::from_value::<PduEvent>(
|
|
|
|
serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"),
|
|
|
|
)
|
|
|
|
.map_err(|_| "Event is not a valid PDU".to_string())?;
|
2021-01-14 19:39:56 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
fetch_check_auth_events(db, origin, pub_key_map, &pdu.auth_events, auth_cache)
|
|
|
|
.await
|
|
|
|
.map_err(|_| "Event failed auth chain check".to_string())?;
|
2021-01-14 19:39:56 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
db.rooms
|
|
|
|
.append_pdu_outlier(pdu.event_id(), &pdu)
|
|
|
|
.map_err(|e| e.to_string())?;
|
2021-01-14 19:39:56 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
Ok(pdu)
|
|
|
|
})
|
2021-01-14 19:39:56 +00:00
|
|
|
}
|
|
|
|
|
2021-01-15 20:46:47 +00:00
|
|
|
/// TODO: don't add as outlier if event is fetched as a result of gathering auth_events
|
|
|
|
/// The check in `fetch_check_auth_events` is that a complete chain is found for the
|
|
|
|
/// events `auth_events`. If the chain is found to have any missing events it fails.
|
|
|
|
///
|
|
|
|
/// The `auth_cache` is filled instead of returning a `Vec`.
|
|
|
|
async fn fetch_check_auth_events(
|
|
|
|
db: &Database,
|
|
|
|
origin: &ServerName,
|
|
|
|
key_map: &PublicKeyMap,
|
|
|
|
event_ids: &[EventId],
|
|
|
|
auth_cache: &mut EventMap<Arc<PduEvent>>,
|
|
|
|
) -> Result<()> {
|
|
|
|
let mut stack = event_ids.to_vec();
|
|
|
|
|
|
|
|
// DFS for auth event chain
|
|
|
|
while !stack.is_empty() {
|
|
|
|
let ev_id = stack.pop().unwrap();
|
|
|
|
if auth_cache.contains_key(&ev_id) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let ev = fetch_events(db, origin, key_map, &[ev_id.clone()], auth_cache)
|
|
|
|
.await
|
|
|
|
.map(|mut vec| vec.remove(0))?;
|
|
|
|
|
|
|
|
stack.extend(ev.auth_events());
|
|
|
|
auth_cache.insert(ev.event_id().clone(), ev);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
/// Find the event and auth it.
|
|
|
|
///
|
|
|
|
/// 1. Look in the main timeline (pduid_pdu tree)
|
|
|
|
/// 2. Look at outlier pdu tree
|
|
|
|
/// 3. Ask origin server over federation
|
|
|
|
/// 4. TODO: Ask other servers over federation?
|
|
|
|
async fn fetch_events(
|
|
|
|
db: &Database,
|
|
|
|
origin: &ServerName,
|
|
|
|
key_map: &PublicKeyMap,
|
|
|
|
events: &[EventId],
|
|
|
|
auth_cache: &mut EventMap<Arc<PduEvent>>,
|
|
|
|
) -> Result<Vec<Arc<PduEvent>>> {
|
|
|
|
let mut pdus = vec![];
|
|
|
|
for id in events {
|
|
|
|
let pdu = match db.rooms.get_pdu(&id)? {
|
|
|
|
Some(pdu) => Arc::new(pdu),
|
|
|
|
None => match db.rooms.get_pdu_outlier(&id)? {
|
|
|
|
Some(pdu) => Arc::new(pdu),
|
|
|
|
None => match db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
|
|
|
origin,
|
|
|
|
get_event::v1::Request { event_id: &id },
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(res) => {
|
|
|
|
let (event_id, value) = crate::pdu::gen_event_id_canonical_json(&res.pdu);
|
|
|
|
let pdu = validate_event(db, value, event_id, key_map, origin, auth_cache)
|
|
|
|
.await
|
|
|
|
.map_err(|_| Error::Conflict("Authentication of event failed"))?;
|
|
|
|
|
|
|
|
Arc::new(pdu)
|
2021-01-12 13:26:52 +00:00
|
|
|
}
|
2021-01-15 02:32:22 +00:00
|
|
|
Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
pdus.push(pdu);
|
|
|
|
}
|
|
|
|
Ok(pdus)
|
2021-01-12 13:26:52 +00:00
|
|
|
}
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
/// Search the DB for the signing keys of the given server, if we don't have them
|
|
|
|
/// fetch them from the server and save to our DB.
|
|
|
|
async fn fetch_signing_keys(
|
2021-01-03 22:26:17 +00:00
|
|
|
db: &Database,
|
2021-01-14 19:39:56 +00:00
|
|
|
origin: &ServerName,
|
2021-01-15 02:32:22 +00:00
|
|
|
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
|
|
|
|
match db.globals.signing_keys_for(origin)? {
|
|
|
|
keys if !keys.is_empty() => Ok(keys),
|
|
|
|
_ => {
|
|
|
|
let keys = db
|
2021-01-03 22:26:17 +00:00
|
|
|
.sending
|
2021-01-15 02:32:22 +00:00
|
|
|
.send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new())
|
2021-01-03 22:26:17 +00:00
|
|
|
.await
|
2021-01-15 02:32:22 +00:00
|
|
|
.map_err(|_| Error::BadServerResponse("Failed to request server keys"))?;
|
|
|
|
db.globals.add_signing_key(origin, &keys.server_key)?;
|
|
|
|
Ok(keys.server_key.verify_keys)
|
2021-01-03 22:26:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-01-15 20:46:47 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
fn signature_and_hash_check(
|
|
|
|
pub_key_map: &ruma::signatures::PublicKeyMap,
|
|
|
|
value: CanonicalJsonObject,
|
|
|
|
) -> std::result::Result<CanonicalJsonObject, String> {
|
|
|
|
Ok(
|
|
|
|
match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) {
|
|
|
|
Ok(ver) => {
|
|
|
|
if let ruma::signatures::Verified::Signatures = ver {
|
|
|
|
error!("CONTENT HASH FAILED");
|
|
|
|
match ruma::signatures::redact(&value, &RoomVersionId::Version6) {
|
|
|
|
Ok(obj) => obj,
|
|
|
|
Err(_) => return Err("Redaction failed".to_string()),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(_e) => {
|
|
|
|
return Err("Signature verification failed".to_string());
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
|
|
|
|
2021-01-15 20:46:47 +00:00
|
|
|
async fn forward_extremity_ids(
|
|
|
|
db: &Database,
|
|
|
|
pdu: &PduEvent,
|
|
|
|
origin: &ServerName,
|
|
|
|
pub_key_map: &PublicKeyMap,
|
|
|
|
auth_cache: &mut EventMap<Arc<PduEvent>>,
|
|
|
|
) -> Result<Vec<StateMap<Arc<PduEvent>>>> {
|
|
|
|
let mut current_leaves = db.rooms.get_pdu_leaves(pdu.room_id())?;
|
|
|
|
|
|
|
|
for incoming_leaf in &pdu.prev_events {
|
|
|
|
if !current_leaves.contains(incoming_leaf) {
|
|
|
|
current_leaves.push(incoming_leaf.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
let mut fork_states = vec![];
|
2021-01-15 20:46:47 +00:00
|
|
|
for id in ¤t_leaves {
|
2021-01-15 02:32:22 +00:00
|
|
|
if let Some(id) = db.rooms.get_pdu_id(id)? {
|
|
|
|
let state_hash = db
|
|
|
|
.rooms
|
|
|
|
.pdu_state_hash(&id)?
|
|
|
|
.expect("found pdu with no statehash");
|
|
|
|
let state = db
|
|
|
|
.rooms
|
|
|
|
.state_full(&pdu.room_id, &state_hash)?
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| ((k.0, Some(k.1)), Arc::new(v)))
|
|
|
|
.collect();
|
2020-12-22 17:45:35 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
fork_states.push(state);
|
|
|
|
} else {
|
2021-01-15 20:46:47 +00:00
|
|
|
let res = db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
|
|
|
origin,
|
|
|
|
get_room_state_ids::v1::Request {
|
|
|
|
room_id: pdu.room_id(),
|
|
|
|
event_id: id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
// TODO: This only adds events to the auth_cache, there is for sure a better way to
|
|
|
|
// do this...
|
|
|
|
fetch_events(&db, origin, &pub_key_map, &res.auth_chain_ids, auth_cache).await?;
|
|
|
|
|
|
|
|
let state = fetch_events(&db, origin, &pub_key_map, &res.pdu_ids, auth_cache)
|
|
|
|
.await?
|
|
|
|
.into_iter()
|
|
|
|
.map(|pdu| ((pdu.kind.clone(), pdu.state_key.clone()), pdu))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
fork_states.push(state);
|
2021-01-15 02:32:22 +00:00
|
|
|
}
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
2021-01-15 20:46:47 +00:00
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
Ok(fork_states)
|
2020-12-22 17:45:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> {
|
|
|
|
let count = db.globals.next_count()?;
|
|
|
|
let mut pdu_id = pdu.room_id.as_bytes().to_vec();
|
|
|
|
pdu_id.push(0xff);
|
|
|
|
pdu_id.extend_from_slice(&count.to_be_bytes());
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
// We append to state before appending the pdu, so we don't have a moment in time with the
|
|
|
|
// pdu without it's state. This is okay because append_pdu can't fail.
|
|
|
|
let statehashid = db.rooms.append_to_state(&pdu_id, &pdu, &db.globals)?;
|
2020-12-22 17:45:35 +00:00
|
|
|
|
|
|
|
db.rooms.append_pdu(
|
2021-01-15 02:32:22 +00:00
|
|
|
&pdu,
|
|
|
|
utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"),
|
2020-12-22 17:45:35 +00:00
|
|
|
count,
|
|
|
|
pdu_id.clone().into(),
|
2021-01-15 16:05:57 +00:00
|
|
|
&db,
|
2020-12-22 17:45:35 +00:00
|
|
|
)?;
|
|
|
|
|
2021-01-15 02:32:22 +00:00
|
|
|
// We set the room state after inserting the pdu, so that we never have a moment in time
|
|
|
|
// where events in the current room state do not exist
|
|
|
|
db.rooms.set_room_state(&pdu.room_id, &statehashid)?;
|
|
|
|
|
2020-12-22 17:45:35 +00:00
|
|
|
for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) {
|
|
|
|
db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-09-25 10:26:29 +00:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
post("/_matrix/federation/v1/get_missing_events/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
pub fn get_missing_events_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_missing_events::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_missing_events::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-09-25 10:26:29 +00:00
|
|
|
let mut queued_events = body.latest_events.clone();
|
|
|
|
let mut events = Vec::new();
|
|
|
|
|
|
|
|
let mut i = 0;
|
|
|
|
while i < queued_events.len() && events.len() < u64::from(body.limit) as usize {
|
|
|
|
if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? {
|
|
|
|
if body.earliest_events.contains(
|
|
|
|
&serde_json::from_value(
|
|
|
|
pdu.get("event_id")
|
|
|
|
.cloned()
|
|
|
|
.ok_or_else(|| Error::bad_database("Event in db has no event_id field."))?,
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?,
|
|
|
|
) {
|
|
|
|
i += 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
queued_events.extend_from_slice(
|
|
|
|
&serde_json::from_value::<Vec<EventId>>(
|
|
|
|
pdu.get("prev_events").cloned().ok_or_else(|| {
|
|
|
|
Error::bad_database("Invalid prev_events field of pdu in db.")
|
|
|
|
})?,
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?,
|
|
|
|
);
|
2020-10-27 23:10:09 +00:00
|
|
|
events.push(serde_json::from_value(pdu).expect("Raw<..> is always valid"));
|
2020-09-25 10:26:29 +00:00
|
|
|
}
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_missing_events::v1::Response { events }.into())
|
|
|
|
}
|
2020-10-05 20:19:22 +00:00
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/query/profile", data = "<body>")
|
|
|
|
)]
|
|
|
|
pub fn get_profile_information_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_profile_information::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_profile_information::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-10-05 20:19:22 +00:00
|
|
|
let mut displayname = None;
|
|
|
|
let mut avatar_url = None;
|
|
|
|
|
2020-12-04 23:16:17 +00:00
|
|
|
match &body.field {
|
|
|
|
// TODO: what to do with custom
|
|
|
|
Some(ProfileField::_Custom(_s)) => {}
|
2020-10-05 20:19:22 +00:00
|
|
|
Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?,
|
|
|
|
Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?,
|
|
|
|
None => {
|
|
|
|
displayname = db.users.displayname(&body.user_id)?;
|
|
|
|
avatar_url = db.users.avatar_url(&body.user_id)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_profile_information::v1::Response {
|
|
|
|
displayname,
|
|
|
|
avatar_url,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v2/invite/<_>/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
pub fn get_user_devices_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<membership::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_profile_information::v1::Response> {
|
2021-01-01 12:47:53 +00:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 22:13:06 +00:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-10-05 20:19:22 +00:00
|
|
|
let mut displayname = None;
|
|
|
|
let mut avatar_url = None;
|
|
|
|
|
|
|
|
match body.field {
|
|
|
|
Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?,
|
|
|
|
Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?,
|
|
|
|
None => {
|
|
|
|
displayname = db.users.displayname(&body.user_id)?;
|
|
|
|
avatar_url = db.users.avatar_url(&body.user_id)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_profile_information::v1::Response {
|
|
|
|
displayname,
|
|
|
|
avatar_url,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
*/
|
2020-12-08 11:34:46 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::{add_port_to_hostname, get_ip_with_port};
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn ips_get_default_ports() {
|
|
|
|
assert_eq!(
|
|
|
|
get_ip_with_port(String::from("1.1.1.1")),
|
|
|
|
Some(String::from("1.1.1.1:8448"))
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
get_ip_with_port(String::from("dead:beef::")),
|
|
|
|
Some(String::from("[dead:beef::]:8448"))
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn ips_keep_custom_ports() {
|
|
|
|
assert_eq!(
|
|
|
|
get_ip_with_port(String::from("1.1.1.1:1234")),
|
|
|
|
Some(String::from("1.1.1.1:1234"))
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
get_ip_with_port(String::from("[dead::beef]:8933")),
|
|
|
|
Some(String::from("[dead::beef]:8933"))
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hostnames_get_default_ports() {
|
|
|
|
assert_eq!(
|
|
|
|
add_port_to_hostname(String::from("example.com")),
|
|
|
|
"example.com:8448"
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hostnames_keep_custom_ports() {
|
|
|
|
assert_eq!(
|
|
|
|
add_port_to_hostname(String::from("example.com:1337")),
|
|
|
|
"example.com:1337"
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|