Fix review issues, Remove EventHash's in prev/auth_events in StateEvent

The latest state-res crate uses ruma's PduRoomV3 PDU's which don't have
tuples of (EventId, EventHashs) like previous versions did (this was
left from rebasing onto master). The Media DB
now takes an optional content_type like the updated ruma structs.
next
Devin Ragotzy 2020-11-11 14:30:12 -05:00 committed by Timo Kösters
parent acd144e934
commit 234b226468
No known key found for this signature in database
GPG Key ID: 24DA7517711A2BA4
5 changed files with 23 additions and 26 deletions

View File

@ -39,7 +39,7 @@ pub async fn create_content_route(
db.media.create( db.media.create(
mxc.clone(), mxc.clone(),
&body.filename.as_deref(), &body.filename.as_deref(),
body.content_type.as_deref().unwrap_or("img"), // TODO this is now optional handle &body.content_type.as_deref(), // TODO this is now optional handle
&body.file, &body.file,
)?; )?;
@ -85,10 +85,7 @@ pub async fn get_content_route(
db.media.create( db.media.create(
mxc, mxc,
&get_content_response.content_disposition.as_deref(), &get_content_response.content_disposition.as_deref(),
get_content_response // TODO this is now optional handle &get_content_response.content_type.as_deref(),
.content_type
.as_deref()
.unwrap_or("img"),
&get_content_response.file, &get_content_response.file,
)?; )?;
@ -142,10 +139,7 @@ pub async fn get_content_thumbnail_route(
db.media.upload_thumbnail( db.media.upload_thumbnail(
mxc, mxc,
&None, &None,
get_thumbnail_response &get_thumbnail_response.content_type,
.content_type
.as_deref()
.unwrap_or("img"), // TODO now optional, deal with it somehow
body.width.try_into().expect("all UInts are valid u32s"), body.width.try_into().expect("all UInts are valid u32s"),
body.height.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"),
&get_thumbnail_response.file, &get_thumbnail_response.file,

View File

@ -20,7 +20,7 @@ impl Media {
&self, &self,
mxc: String, mxc: String,
filename: &Option<&str>, filename: &Option<&str>,
content_type: &str, content_type: &Option<&str>,
file: &[u8], file: &[u8],
) -> Result<()> { ) -> Result<()> {
let mut key = mxc.as_bytes().to_vec(); let mut key = mxc.as_bytes().to_vec();
@ -30,7 +30,12 @@ impl Media {
key.push(0xff); key.push(0xff);
key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default());
key.push(0xff); key.push(0xff);
key.extend_from_slice(content_type.as_bytes()); key.extend_from_slice(
content_type
.as_ref()
.map(|c| c.as_bytes())
.unwrap_or_default(),
);
self.mediaid_file.insert(key, file)?; self.mediaid_file.insert(key, file)?;
@ -42,7 +47,7 @@ impl Media {
&self, &self,
mxc: String, mxc: String,
filename: &Option<String>, filename: &Option<String>,
content_type: &str, content_type: &Option<String>,
width: u32, width: u32,
height: u32, height: u32,
file: &[u8], file: &[u8],
@ -54,7 +59,12 @@ impl Media {
key.push(0xff); key.push(0xff);
key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default()); key.extend_from_slice(filename.as_ref().map(|f| f.as_bytes()).unwrap_or_default());
key.push(0xff); key.push(0xff);
key.extend_from_slice(content_type.as_bytes()); key.extend_from_slice(
content_type
.as_ref()
.map(|c| c.as_bytes())
.unwrap_or_default(),
);
self.mediaid_file.insert(key, file)?; self.mediaid_file.insert(key, file)?;

View File

@ -647,6 +647,7 @@ impl Rooms {
} }
/// Creates a new persisted data unit and adds it to a room. /// Creates a new persisted data unit and adds it to a room.
#[allow(clippy::too_many_arguments)]
pub fn build_and_append_pdu( pub fn build_and_append_pdu(
&self, &self,
pdu_builder: PduBuilder, pdu_builder: PduBuilder,

View File

@ -225,7 +225,7 @@ impl PduEvent {
impl From<&state_res::StateEvent> for PduEvent { impl From<&state_res::StateEvent> for PduEvent {
fn from(pdu: &state_res::StateEvent) -> Self { fn from(pdu: &state_res::StateEvent) -> Self {
Self { Self {
event_id: pdu.event_id().clone(), event_id: pdu.event_id(),
room_id: pdu.room_id().unwrap().clone(), room_id: pdu.room_id().unwrap().clone(),
sender: pdu.sender().clone(), sender: pdu.sender().clone(),
origin_server_ts: (pdu origin_server_ts: (pdu
@ -260,17 +260,9 @@ impl PduEvent {
"type": self.kind, "type": self.kind,
"content": self.content, "content": self.content,
"state_key": self.state_key, "state_key": self.state_key,
"prev_events": self.prev_events "prev_events": self.prev_events,
.iter()
// TODO How do we create one of these
.map(|id| (id, EventHash { sha256: "hello".into() }))
.collect::<Vec<_>>(),
"depth": self.depth, "depth": self.depth,
"auth_events": self.auth_events "auth_events": self.auth_events,
.iter()
// TODO How do we create one of these
.map(|id| (id, EventHash { sha256: "hello".into() }))
.collect::<Vec<_>>(),
"redacts": self.redacts, "redacts": self.redacts,
"unsigned": self.unsigned, "unsigned": self.unsigned,
"hashes": self.hashes, "hashes": self.hashes,

View File

@ -419,7 +419,7 @@ pub async fn send_transaction_message_route<'a>(
} }
} }
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we? // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?
// SPEC: // SPEC:
// Servers MUST strictly enforce the JSON format specified in the appendices. // Servers MUST strictly enforce the JSON format specified in the appendices.
// This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of // This translates to a 400 M_BAD_JSON error on most endpoints, or discarding of
@ -554,7 +554,7 @@ pub async fn send_transaction_message_route<'a>(
// TODO we may not want the auth events chained in here for resolution? // TODO we may not want the auth events chained in here for resolution?
their_current_state their_current_state
.iter() .iter()
.map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id().clone())) .map(|(_id, v)| ((v.kind(), v.state_key()), v.event_id()))
.collect::<BTreeMap<_, _>>(), .collect::<BTreeMap<_, _>>(),
], ],
Some( Some(