Fix signature/hash checks, fetch recursive auth events
parent
88f3ee489b
commit
27c4e9d9d5
|
@ -681,13 +681,8 @@ async fn join_room_by_id_helper(
|
||||||
pdu_id.push(0xff);
|
pdu_id.push(0xff);
|
||||||
pdu_id.extend_from_slice(&count.to_be_bytes());
|
pdu_id.extend_from_slice(&count.to_be_bytes());
|
||||||
db.rooms.append_pdu(
|
db.rooms.append_pdu(
|
||||||
<<<<<<< HEAD
|
|
||||||
&PduEvent::from(&**pdu),
|
|
||||||
utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"),
|
|
||||||
=======
|
|
||||||
&pdu,
|
&pdu,
|
||||||
&utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"),
|
utils::to_canonical_object(&**pdu).expect("Pdu is valid canonical object"),
|
||||||
>>>>>>> 6232d1f (Update state-res, use the new Event trait)
|
|
||||||
count,
|
count,
|
||||||
pdu_id.clone().into(),
|
pdu_id.clone().into(),
|
||||||
&db.globals,
|
&db.globals,
|
||||||
|
|
|
@ -20,7 +20,7 @@ use ruma::{
|
||||||
EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use sled::IVec;
|
use sled::IVec;
|
||||||
use state_res::{event_auth, Error as StateError, Event, StateMap, StateStore};
|
use state_res::{event_auth, Event, StateMap};
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
|
@ -193,7 +193,7 @@ impl Rooms {
|
||||||
/// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`.
|
/// A recursive list of all the auth_events going back to `RoomCreate` for each event in `event_ids`.
|
||||||
pub fn auth_events_full(
|
pub fn auth_events_full(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
_room_id: &RoomId,
|
||||||
event_ids: &[EventId],
|
event_ids: &[EventId],
|
||||||
) -> Result<Vec<PduEvent>> {
|
) -> Result<Vec<PduEvent>> {
|
||||||
let mut result = BTreeMap::new();
|
let mut result = BTreeMap::new();
|
||||||
|
|
12
src/pdu.rs
12
src/pdu.rs
|
@ -4,7 +4,7 @@ use ruma::{
|
||||||
pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent,
|
pdu::EventHash, room::member::MemberEventContent, AnyEvent, AnyRoomEvent, AnyStateEvent,
|
||||||
AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent,
|
AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent,
|
||||||
},
|
},
|
||||||
serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw},
|
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
|
||||||
EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId,
|
EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UInt, UserId,
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -286,12 +286,11 @@ impl state_res::Event for PduEvent {
|
||||||
|
|
||||||
/// Generates a correct eventId for the incoming pdu.
|
/// Generates a correct eventId for the incoming pdu.
|
||||||
///
|
///
|
||||||
/// Returns a tuple of the new `EventId` and the PDU with the eventId inserted as a `serde_json::Value`.
|
/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`.
|
||||||
pub(crate) fn process_incoming_pdu(
|
pub(crate) fn process_incoming_pdu(
|
||||||
pdu: &Raw<ruma::events::pdu::Pdu>,
|
pdu: &Raw<ruma::events::pdu::Pdu>,
|
||||||
) -> (EventId, CanonicalJsonObject) {
|
) -> (EventId, CanonicalJsonObject) {
|
||||||
let mut value =
|
let value = serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON");
|
||||||
serde_json::from_str(pdu.json().get()).expect("A Raw<...> is always valid JSON");
|
|
||||||
|
|
||||||
let event_id = EventId::try_from(&*format!(
|
let event_id = EventId::try_from(&*format!(
|
||||||
"${}",
|
"${}",
|
||||||
|
@ -300,11 +299,6 @@ pub(crate) fn process_incoming_pdu(
|
||||||
))
|
))
|
||||||
.expect("ruma's reference hashes are valid event ids");
|
.expect("ruma's reference hashes are valid event ids");
|
||||||
|
|
||||||
value.insert(
|
|
||||||
"event_id".to_owned(),
|
|
||||||
to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"),
|
|
||||||
);
|
|
||||||
|
|
||||||
(event_id, value)
|
(event_id, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma};
|
use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma};
|
||||||
|
use get_devices::v1::UserDevice;
|
||||||
use get_profile_information::v1::ProfileField;
|
use get_profile_information::v1::ProfileField;
|
||||||
use http::header::{HeaderValue, AUTHORIZATION, HOST};
|
use http::header::{HeaderValue, AUTHORIZATION, HOST};
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
|
@ -6,6 +7,7 @@ use rocket::{get, post, put, response::content::Json, State};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
federation::{
|
federation::{
|
||||||
|
device::get_devices,
|
||||||
directory::{get_public_rooms, get_public_rooms_filtered},
|
directory::{get_public_rooms, get_public_rooms_filtered},
|
||||||
discovery::{
|
discovery::{
|
||||||
get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys,
|
get_server_keys, get_server_version::v1 as get_server_version, ServerSigningKeys,
|
||||||
|
@ -18,13 +20,14 @@ use ruma::{
|
||||||
OutgoingRequest,
|
OutgoingRequest,
|
||||||
},
|
},
|
||||||
directory::{IncomingFilter, IncomingRoomNetwork},
|
directory::{IncomingFilter, IncomingRoomNetwork},
|
||||||
signatures::{CanonicalJsonObject, PublicKeyMap},
|
serde::to_canonical_value,
|
||||||
|
signatures::{CanonicalJsonObject, CanonicalJsonValue, PublicKeyMap},
|
||||||
EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId,
|
EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId,
|
||||||
};
|
};
|
||||||
use state_res::{Event, StateMap};
|
use state_res::{Event, StateMap};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet},
|
collections::{BTreeMap, BTreeSet},
|
||||||
convert::TryFrom,
|
convert::{TryFrom, TryInto},
|
||||||
fmt::Debug,
|
fmt::Debug,
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
|
@ -519,6 +522,8 @@ pub async fn send_transaction_message_route<'a>(
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
return Err(Error::bad_config("Federation is disabled."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dbg!(&*body);
|
||||||
|
|
||||||
for edu in &body.edus {
|
for edu in &body.edus {
|
||||||
match serde_json::from_str::<send_transaction_message::v1::Edu>(edu.json().get()) {
|
match serde_json::from_str::<send_transaction_message::v1::Edu>(edu.json().get()) {
|
||||||
Ok(edu) => match edu.edu_type.as_str() {
|
Ok(edu) => match edu.edu_type.as_str() {
|
||||||
|
@ -546,6 +551,7 @@ pub async fn send_transaction_message_route<'a>(
|
||||||
}
|
}
|
||||||
"m.presence" => {}
|
"m.presence" => {}
|
||||||
"m.receipt" => {}
|
"m.receipt" => {}
|
||||||
|
"m.device_list_update" => {}
|
||||||
_ => {}
|
_ => {}
|
||||||
},
|
},
|
||||||
Err(_err) => {
|
Err(_err) => {
|
||||||
|
@ -565,21 +571,52 @@ pub async fn send_transaction_message_route<'a>(
|
||||||
for pdu in &body.pdus {
|
for pdu in &body.pdus {
|
||||||
// 1. Is a valid event, otherwise it is dropped.
|
// 1. Is a valid event, otherwise it is dropped.
|
||||||
// Ruma/PduEvent/StateEvent satisfies this
|
// Ruma/PduEvent/StateEvent satisfies this
|
||||||
|
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
||||||
// TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then
|
// TODO: ruma may solve this but our `process_incoming_pdu` needs to return a Result then
|
||||||
let (event_id, value) = crate::pdu::process_incoming_pdu(pdu);
|
let (event_id, value) = crate::pdu::process_incoming_pdu(pdu);
|
||||||
|
|
||||||
// 2. Passes signature checks, otherwise event is dropped.
|
// 2. Passes signature checks, otherwise event is dropped.
|
||||||
// 3. Passes hash checks, otherwise it is redacted before being processed further.
|
// 3. Passes hash checks, otherwise it is redacted before being processed further.
|
||||||
let keys = db.globals.keypair();
|
let server_name = body.body.origin.clone();
|
||||||
let mut pub_key_set = BTreeMap::new();
|
|
||||||
pub_key_set.insert(
|
|
||||||
"ed25519:1".to_string(),
|
|
||||||
String::from_utf8(keys.public_key().to_vec()).expect("public key is valid utf8"),
|
|
||||||
);
|
|
||||||
let mut pub_key_map = BTreeMap::new();
|
let mut pub_key_map = BTreeMap::new();
|
||||||
pub_key_map.insert("domain".to_string(), pub_key_set);
|
if let Some(sig) = value.get("signatures") {
|
||||||
|
match sig {
|
||||||
|
CanonicalJsonValue::Object(entity) => {
|
||||||
|
for key in entity.keys() {
|
||||||
|
// TODO: save this in a DB maybe...
|
||||||
|
// fetch the public signing key
|
||||||
|
let res = db
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
|
Box::<ServerName>::try_from(key.to_string()).unwrap(),
|
||||||
|
get_server_keys::v2::Request::new(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let pdu = match signature_and_hash_check(&pub_key_map, value) {
|
pub_key_map.insert(
|
||||||
|
res.server_key.server_name.to_string(),
|
||||||
|
res.server_key
|
||||||
|
.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
resolved_map.insert(
|
||||||
|
event_id,
|
||||||
|
Err("`signatures` is not a JSON object".to_string()),
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resolved_map.insert(event_id, Err("No field `signatures` in JSON".to_string()));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut val = match signature_and_hash_check(&pub_key_map, value) {
|
||||||
Ok(pdu) => pdu,
|
Ok(pdu) => pdu,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
resolved_map.insert(event_id, Err(e));
|
resolved_map.insert(event_id, Err(e));
|
||||||
|
@ -587,50 +624,75 @@ pub async fn send_transaction_message_route<'a>(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Now that we have checked the signature and hashes we can add the eventID and convert
|
||||||
|
// to our PduEvent type
|
||||||
|
val.insert(
|
||||||
|
"event_id".to_owned(),
|
||||||
|
to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"),
|
||||||
|
);
|
||||||
|
let pdu = serde_json::from_value::<PduEvent>(
|
||||||
|
serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"),
|
||||||
|
)
|
||||||
|
.expect("Pdu is valid Canonical JSON Map");
|
||||||
|
|
||||||
// If we have no idea about this room skip the PDU
|
// If we have no idea about this room skip the PDU
|
||||||
if !db.rooms.exists(&pdu.room_id)? {
|
if !db.rooms.exists(&pdu.room_id)? {
|
||||||
resolved_map.insert(event_id, Err("Room is unknown to this server".into()));
|
resolved_map.insert(event_id, Err("Room is unknown to this server".into()));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_name = body.body.origin.clone();
|
|
||||||
let event = Arc::new(pdu.clone());
|
let event = Arc::new(pdu.clone());
|
||||||
// Fetch any unknown events or retrieve them from the DB
|
dbg!(&*event);
|
||||||
|
// Fetch any unknown prev_events or retrieve them from the DB
|
||||||
let previous =
|
let previous =
|
||||||
match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await? {
|
match fetch_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events).await {
|
||||||
mut evs if evs.len() == 1 => Some(Arc::new(evs.remove(0))),
|
Ok(mut evs) if evs.len() == 1 => Some(Arc::new(evs.remove(0))),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
// 4. Passes authorization rules based on the event's auth events, otherwise it is rejected.
|
// 4. Passes authorization rules based on the event's auth events, otherwise it is rejected.
|
||||||
// TODO: To me this sounds more like the auth_events should be "get the pdu.auth_events" not
|
// Recursively gather all auth events checking that the previous auth events are valid.
|
||||||
// the auth events that would be correct for this pdu. Put another way we should use the auth events
|
let auth_events: Vec<PduEvent> =
|
||||||
// the pdu claims are its auth events
|
match fetch_check_auth_events(&db, server_name.clone(), &pub_key_map, &pdu.prev_events)
|
||||||
let auth_events = db.rooms.get_auth_events(
|
.await
|
||||||
&pdu.room_id,
|
{
|
||||||
&pdu.kind,
|
Ok(events) => events,
|
||||||
&pdu.sender,
|
Err(_) => {
|
||||||
pdu.state_key.as_deref(),
|
resolved_map.insert(
|
||||||
pdu.content.clone(),
|
pdu.event_id,
|
||||||
)?;
|
Err("Failed to recursively gather auth events".into()),
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut event_map: state_res::EventMap<Arc<PduEvent>> = auth_events
|
let mut event_map: state_res::EventMap<Arc<PduEvent>> = auth_events
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(_k, v)| (v.event_id().clone(), Arc::new(v.clone())))
|
.map(|v| (v.event_id().clone(), Arc::new(v.clone())))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if !state_res::event_auth::auth_check(
|
// Check that the event passes auth based on the auth_events
|
||||||
|
let is_authed = state_res::event_auth::auth_check(
|
||||||
&RoomVersionId::Version6,
|
&RoomVersionId::Version6,
|
||||||
&event,
|
&event,
|
||||||
previous.clone(),
|
previous.clone(),
|
||||||
&auth_events
|
&pdu.auth_events
|
||||||
.into_iter()
|
.iter()
|
||||||
.map(|(k, v)| (k, Arc::new(v)))
|
.map(|id| {
|
||||||
.collect(),
|
event_map
|
||||||
|
.get(id)
|
||||||
|
.map(|pdu| ((pdu.kind(), pdu.state_key()), pdu.clone()))
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::Conflict(
|
||||||
|
"Auth event not found, event failed recursive auth checks.",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Result<BTreeMap<_, _>>>()?,
|
||||||
None, // TODO: third party invite
|
None, // TODO: third party invite
|
||||||
)
|
)
|
||||||
.map_err(|_e| Error::Conflict("Auth check failed"))?
|
.map_err(|_e| Error::Conflict("Auth check failed"))?;
|
||||||
{
|
if !is_authed {
|
||||||
resolved_map.insert(
|
resolved_map.insert(
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
Err("Event has failed auth check with auth events".into()),
|
Err("Event has failed auth check with auth events".into()),
|
||||||
|
@ -816,31 +878,92 @@ pub async fn send_transaction_message_route<'a>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into())
|
Ok(dbg!(send_transaction_message::v1::Response { pdus: resolved_map }).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signature_and_hash_check(
|
fn signature_and_hash_check(
|
||||||
pub_key_map: &ruma::signatures::PublicKeyMap,
|
pub_key_map: &ruma::signatures::PublicKeyMap,
|
||||||
value: CanonicalJsonObject,
|
value: CanonicalJsonObject,
|
||||||
) -> std::result::Result<PduEvent, String> {
|
) -> std::result::Result<CanonicalJsonObject, String> {
|
||||||
let val = match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) {
|
Ok(
|
||||||
|
match ruma::signatures::verify_event(pub_key_map, &value, &RoomVersionId::Version6) {
|
||||||
Ok(ver) => {
|
Ok(ver) => {
|
||||||
if let ruma::signatures::Verified::Signatures = ver {
|
if let ruma::signatures::Verified::Signatures = ver {
|
||||||
|
error!("CONTENT HASH FAILED");
|
||||||
match ruma::signatures::redact(&value, &RoomVersionId::Version6) {
|
match ruma::signatures::redact(&value, &RoomVersionId::Version6) {
|
||||||
Ok(obj) => obj,
|
Ok(obj) => obj,
|
||||||
Err(_) => return Err("Redaction failed".into()),
|
Err(_) => return Err("Redaction failed".to_string()),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
value
|
value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_e) => return Err("Signature verification failed".into()),
|
Err(_e) => {
|
||||||
};
|
return Err("Signature verification failed".to_string());
|
||||||
|
}
|
||||||
serde_json::from_value::<PduEvent>(
|
},
|
||||||
serde_json::to_value(val).expect("CanonicalJsonObj is a valid JsonValue"),
|
|
||||||
)
|
)
|
||||||
.map_err(|_| "Deserialization failed for JSON value".into())
|
}
|
||||||
|
|
||||||
|
/// The check in `fetch_check_auth_events` is that a complete chain is found for the
|
||||||
|
/// events `auth_events`. If the chain is found to have missing events it fails.
|
||||||
|
async fn fetch_check_auth_events(
|
||||||
|
db: &Database,
|
||||||
|
origin: Box<ServerName>,
|
||||||
|
key_map: &PublicKeyMap,
|
||||||
|
event_ids: &[EventId],
|
||||||
|
) -> Result<Vec<PduEvent>> {
|
||||||
|
let mut result = BTreeMap::new();
|
||||||
|
let mut stack = event_ids.to_vec();
|
||||||
|
|
||||||
|
// DFS for auth event chain
|
||||||
|
while !stack.is_empty() {
|
||||||
|
let ev_id = stack.pop().unwrap();
|
||||||
|
if result.contains_key(&ev_id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ev = match db.rooms.get_pdu(&ev_id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => match db
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
|
origin.clone(),
|
||||||
|
get_event::v1::Request { event_id: &ev_id },
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => {
|
||||||
|
let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu);
|
||||||
|
match signature_and_hash_check(key_map, value) {
|
||||||
|
Ok(mut val) => {
|
||||||
|
val.insert(
|
||||||
|
"event_id".to_owned(),
|
||||||
|
to_canonical_value(&event_id)
|
||||||
|
.expect("EventId is a valid CanonicalJsonValue"),
|
||||||
|
);
|
||||||
|
serde_json::from_value::<PduEvent>(
|
||||||
|
serde_json::to_value(val)
|
||||||
|
.expect("CanonicalJsonObj is a valid JsonValue"),
|
||||||
|
)
|
||||||
|
.expect("Pdu is valid Canonical JSON Map")
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// TODO: I would assume we just keep going
|
||||||
|
error!("{:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => return Err(Error::BadServerResponse("Failed to fetch event")),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
stack.extend(ev.auth_events());
|
||||||
|
result.insert(ev.event_id().clone(), ev);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result.into_iter().map(|(_, v)| v).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: this needs to add events to the DB in a way that does not
|
/// TODO: this needs to add events to the DB in a way that does not
|
||||||
|
@ -865,10 +988,21 @@ async fn fetch_events(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
let (_, value) = crate::pdu::process_incoming_pdu(&res.pdu);
|
let (event_id, value) = crate::pdu::process_incoming_pdu(&res.pdu);
|
||||||
match signature_and_hash_check(key_map, value) {
|
match signature_and_hash_check(key_map, value) {
|
||||||
Ok(pdu) => {
|
Ok(mut val) => {
|
||||||
// TODO: add to our DB somehow?
|
// TODO: add to our DB somehow?
|
||||||
|
val.insert(
|
||||||
|
"event_id".to_owned(),
|
||||||
|
to_canonical_value(&event_id)
|
||||||
|
.expect("EventId is a valid CanonicalJsonValue"),
|
||||||
|
);
|
||||||
|
let pdu = serde_json::from_value::<PduEvent>(
|
||||||
|
serde_json::to_value(val)
|
||||||
|
.expect("CanonicalJsonObj is a valid JsonValue"),
|
||||||
|
)
|
||||||
|
.expect("Pdu is valid Canonical JSON Map");
|
||||||
|
|
||||||
pdus.push(pdu);
|
pdus.push(pdu);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -898,7 +1032,7 @@ fn append_state(db: &Database, pdu: &PduEvent) -> Result<()> {
|
||||||
db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?;
|
db.rooms.append_to_state(&pdu_id, pdu, &db.globals)?;
|
||||||
db.rooms.append_pdu(
|
db.rooms.append_pdu(
|
||||||
pdu,
|
pdu,
|
||||||
&utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"),
|
utils::to_canonical_object(pdu).expect("Pdu is valid canonical object"),
|
||||||
count,
|
count,
|
||||||
pdu_id.clone().into(),
|
pdu_id.clone().into(),
|
||||||
&db.globals,
|
&db.globals,
|
||||||
|
|
Loading…
Reference in New Issue