conduit/src/database.rs

684 lines
25 KiB
Rust
Raw Normal View History

2021-06-08 16:10:00 +00:00
pub mod abstraction;
pub mod account_data;
2020-11-09 11:21:04 +00:00
pub mod admin;
pub mod appservice;
pub mod globals;
pub mod key_backups;
pub mod media;
2021-07-01 19:38:25 +00:00
pub mod proxy;
pub mod pusher;
pub mod rooms;
2020-09-15 14:13:54 +00:00
pub mod sending;
2020-08-25 11:24:38 +00:00
pub mod transaction_ids;
pub mod uiaa;
pub mod users;
use crate::{utils, Error, Result};
2021-06-08 16:10:00 +00:00
use abstraction::DatabaseEngine;
use directories::ProjectDirs;
use log::error;
2021-06-30 07:52:01 +00:00
use lru_cache::LruCache;
2021-07-14 07:07:08 +00:00
use rocket::{
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
2021-07-14 09:28:24 +00:00
outcome::{try_outcome, IntoOutcome},
2021-07-14 07:07:08 +00:00
request::{FromRequest, Request},
2021-07-14 12:50:07 +00:00
Shutdown, State,
2021-07-14 07:07:08 +00:00
};
use ruma::{DeviceId, ServerName, UserId};
2021-07-14 07:07:08 +00:00
use serde::{de::IgnoredAny, Deserialize};
2021-06-08 16:24:36 +00:00
use std::{
2021-07-14 07:07:08 +00:00
collections::{BTreeMap, HashMap},
2021-06-08 16:24:36 +00:00
fs::{self, remove_dir_all},
io::Write,
2021-07-14 07:07:08 +00:00
ops::Deref,
path::Path,
2021-06-08 16:24:36 +00:00
sync::{Arc, RwLock},
};
2021-07-14 07:07:08 +00:00
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
2021-07-01 19:38:25 +00:00
use self::proxy::ProxyConfig;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
server_name: Box<ServerName>,
database_path: String,
2021-07-14 07:07:08 +00:00
#[serde(default = "default_db_cache_capacity_mb")]
db_cache_capacity_mb: f64,
#[serde(default = "default_sqlite_read_pool_size")]
sqlite_read_pool_size: usize,
#[serde(default = "true_fn")]
sqlite_wal_clean_timer: bool,
#[serde(default = "default_sqlite_wal_clean_second_interval")]
sqlite_wal_clean_second_interval: u32,
#[serde(default = "default_sqlite_wal_clean_second_timeout")]
sqlite_wal_clean_second_timeout: u32,
2021-07-19 14:25:41 +00:00
#[serde(default = "default_sqlite_spillover_reap_fraction")]
sqlite_spillover_reap_fraction: u32,
2021-07-19 13:56:20 +00:00
#[serde(default = "default_sqlite_spillover_reap_interval_secs")]
sqlite_spillover_reap_interval_secs: u32,
#[serde(default = "default_max_request_size")]
max_request_size: u32,
2020-12-19 15:00:11 +00:00
#[serde(default = "default_max_concurrent_requests")]
max_concurrent_requests: u16,
2021-02-07 12:20:00 +00:00
#[serde(default = "true_fn")]
2021-01-01 12:47:53 +00:00
allow_registration: bool,
#[serde(default = "true_fn")]
allow_encryption: bool,
#[serde(default = "false_fn")]
allow_federation: bool,
2021-02-28 11:41:03 +00:00
#[serde(default = "false_fn")]
pub allow_jaeger: bool,
2021-04-13 18:15:58 +00:00
#[serde(default)]
proxy: ProxyConfig,
2021-02-07 16:38:45 +00:00
jwt_secret: Option<String>,
#[serde(default = "Vec::new")]
trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")]
pub log: String,
2021-07-14 07:07:08 +00:00
#[serde(flatten)]
catchall: BTreeMap<String, IgnoredAny>,
}
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
impl Config {
pub fn warn_deprecated(&self) {
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
log::warn!("Config parameter {} is deprecated", key);
was_deprecated = true;
}
if was_deprecated {
log::warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
}
}
2021-01-01 12:47:53 +00:00
}
fn false_fn() -> bool {
false
}
fn true_fn() -> bool {
true
}
2021-07-14 07:07:08 +00:00
fn default_db_cache_capacity_mb() -> f64 {
200.0
}
fn default_sqlite_read_pool_size() -> usize {
num_cpus::get().max(1)
}
fn default_sqlite_wal_clean_second_interval() -> u32 {
60 * 60
}
fn default_sqlite_wal_clean_second_timeout() -> u32 {
2
}
2021-07-19 14:25:41 +00:00
fn default_sqlite_spillover_reap_fraction() -> u32 {
2
2021-07-19 13:56:20 +00:00
}
fn default_sqlite_spillover_reap_interval_secs() -> u32 {
2021-07-19 14:25:41 +00:00
60
2021-07-19 13:56:20 +00:00
}
fn default_max_request_size() -> u32 {
20 * 1024 * 1024 // Default to 20 MB
}
2020-05-06 13:36:44 +00:00
2020-12-19 15:00:11 +00:00
fn default_max_concurrent_requests() -> u16 {
100
2020-12-19 15:00:11 +00:00
}
fn default_log() -> String {
2021-03-26 10:10:45 +00:00
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
}
#[cfg(feature = "sled")]
2021-07-14 07:07:08 +00:00
pub type Engine = abstraction::sled::Engine;
#[cfg(feature = "rocksdb")]
2021-07-14 07:07:08 +00:00
pub type Engine = abstraction::rocksdb::Engine;
#[cfg(feature = "sqlite")]
pub type Engine = abstraction::sqlite::Engine;
2021-06-08 16:10:00 +00:00
pub struct Database {
2021-07-14 07:07:08 +00:00
_db: Arc<Engine>,
pub globals: globals::Globals,
pub users: users::Users,
2020-06-06 16:44:50 +00:00
pub uiaa: uiaa::Uiaa,
pub rooms: rooms::Rooms,
pub account_data: account_data::AccountData,
2020-05-18 15:53:34 +00:00
pub media: media::Media,
2020-06-16 10:11:38 +00:00
pub key_backups: key_backups::KeyBackups,
2020-08-25 11:24:38 +00:00
pub transaction_ids: transaction_ids::TransactionIds,
2020-09-15 14:13:54 +00:00
pub sending: sending::Sending,
2020-11-09 11:21:04 +00:00
pub admin: admin::Admin,
pub appservice: appservice::Appservice,
pub pusher: pusher::PushData,
}
impl Database {
/// Tries to remove the old database but ignores all errors.
2020-06-09 13:13:17 +00:00
pub fn try_remove(server_name: &str) -> Result<()> {
2020-04-11 18:03:22 +00:00
let mut path = ProjectDirs::from("xyz", "koesters", "conduit")
2020-11-15 11:17:21 +00:00
.ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))?
.data_dir()
.to_path_buf();
2020-05-06 13:36:44 +00:00
path.push(server_name);
let _ = remove_dir_all(path);
2020-06-09 13:13:17 +00:00
Ok(())
}
2021-07-14 07:07:08 +00:00
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
let path = Path::new(&config.database_path);
#[cfg(feature = "backend_sqlite")]
{
let sled_exists = path.join("db").exists();
let sqlite_exists = path.join("conduit.db").exists();
if sled_exists {
if sqlite_exists {
// most likely an in-place directory, only warn
log::warn!("Both sled and sqlite databases are detected in database directory");
log::warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
} else {
log::error!(
"Sled database detected, conduit now uses sqlite for database operations"
);
log::error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
return Err(Error::bad_config(
"sled database detected, migrate to sqlite",
));
}
}
}
Ok(())
}
/// Load an existing database or create a new one.
2021-07-14 12:50:07 +00:00
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
2021-07-14 07:07:08 +00:00
Self::check_sled_or_sqlite_db(&config)?;
2021-06-08 16:10:00 +00:00
let builder = Engine::open(&config)?;
2020-10-21 19:43:59 +00:00
if config.max_request_size < 1024 {
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
}
2020-11-09 11:21:04 +00:00
let (admin_sender, admin_receiver) = mpsc::unbounded();
2021-06-08 16:10:00 +00:00
let (sending_sender, sending_receiver) = mpsc::unbounded();
2020-11-09 11:21:04 +00:00
2021-07-14 07:07:08 +00:00
let db = Arc::new(TokioRwLock::from(Self {
_db: builder.clone(),
users: users::Users {
2021-06-08 16:10:00 +00:00
userid_password: builder.open_tree("userid_password")?,
userid_displayname: builder.open_tree("userid_displayname")?,
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
userdeviceid_token: builder.open_tree("userdeviceid_token")?,
userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?,
userid_devicelistversion: builder.open_tree("userid_devicelistversion")?,
token_userdeviceid: builder.open_tree("token_userdeviceid")?,
onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?,
userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?,
keychangeid_userid: builder.open_tree("keychangeid_userid")?,
keyid_key: builder.open_tree("keyid_key")?,
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
todeviceid_events: builder.open_tree("todeviceid_events")?,
},
2020-06-06 16:44:50 +00:00
uiaa: uiaa::Uiaa {
2021-06-08 16:10:00 +00:00
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
userdevicesessionid_uiaarequest: builder
.open_tree("userdevicesessionid_uiaarequest")?,
2020-06-06 16:44:50 +00:00
},
rooms: rooms::Rooms {
edus: rooms::RoomEdus {
2021-06-08 16:10:00 +00:00
readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?,
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
roomuserid_lastprivatereadupdate: builder
2021-03-23 11:59:27 +00:00
.open_tree("roomuserid_lastprivatereadupdate")?,
2021-06-08 16:10:00 +00:00
typingid_userid: builder.open_tree("typingid_userid")?,
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
presenceid_presence: builder.open_tree("presenceid_presence")?,
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
},
2021-06-08 16:10:00 +00:00
pduid_pdu: builder.open_tree("pduid_pdu")?,
eventid_pduid: builder.open_tree("eventid_pduid")?,
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
alias_roomid: builder.open_tree("alias_roomid")?,
aliasid_alias: builder.open_tree("aliasid_alias")?,
publicroomids: builder.open_tree("publicroomids")?,
tokenids: builder.open_tree("tokenids")?,
roomserverids: builder.open_tree("roomserverids")?,
serverroomids: builder.open_tree("serverroomids")?,
userroomid_joined: builder.open_tree("userroomid_joined")?,
roomuserid_joined: builder.open_tree("roomuserid_joined")?,
roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?,
userroomid_invitestate: builder.open_tree("userroomid_invitestate")?,
roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?,
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
stateid_shorteventid: builder.open_tree("stateid_shorteventid")?,
eventid_shorteventid: builder.open_tree("eventid_shorteventid")?,
shorteventid_eventid: builder.open_tree("shorteventid_eventid")?,
shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?,
roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?,
statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?,
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
prevevent_parent: builder.open_tree("prevevent_parent")?,
2021-07-13 08:22:04 +00:00
pdu_cache: RwLock::new(LruCache::new(10_000)),
},
account_data: account_data::AccountData {
2021-06-08 16:10:00 +00:00
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
},
2020-05-18 15:53:34 +00:00
media: media::Media {
2021-06-08 16:10:00 +00:00
mediaid_file: builder.open_tree("mediaid_file")?,
2020-05-18 15:53:34 +00:00
},
2020-06-16 10:11:38 +00:00
key_backups: key_backups::KeyBackups {
2021-06-08 16:10:00 +00:00
backupid_algorithm: builder.open_tree("backupid_algorithm")?,
backupid_etag: builder.open_tree("backupid_etag")?,
backupkeyid_backup: builder.open_tree("backupkeyid_backup")?,
2020-06-16 10:11:38 +00:00
},
2020-08-25 11:24:38 +00:00
transaction_ids: transaction_ids::TransactionIds {
2021-06-08 16:10:00 +00:00
userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?,
2020-08-25 11:24:38 +00:00
},
2020-09-15 14:13:54 +00:00
sending: sending::Sending {
2021-06-08 16:10:00 +00:00
servername_educount: builder.open_tree("servername_educount")?,
servernamepduids: builder.open_tree("servernamepduids")?,
servercurrentevents: builder.open_tree("servercurrentevents")?,
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
2021-06-08 16:10:00 +00:00
sender: sending_sender,
2020-09-15 14:13:54 +00:00
},
2020-11-09 11:21:04 +00:00
admin: admin::Admin {
sender: admin_sender,
},
appservice: appservice::Appservice {
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
2021-06-08 16:10:00 +00:00
id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?,
},
pusher: pusher::PushData {
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
},
globals: globals::Globals::load(
2021-06-08 16:10:00 +00:00
builder.open_tree("global")?,
builder.open_tree("server_signingkeys")?,
2021-07-14 07:07:08 +00:00
config.clone(),
)?,
2021-07-14 07:07:08 +00:00
}));
{
let db = db.read().await;
// MIGRATIONS
// TODO: database versions of new dbs should probably not be 0
if db.globals.database_version()? < 1 {
for (roomserverid, _) in db.rooms.roomserverids.iter() {
let mut parts = roomserverid.split(|&b| b == 0xff);
let room_id = parts.next().expect("split always returns one element");
let servername = match parts.next() {
Some(s) => s,
None => {
error!("Migration: Invalid roomserverid in db.");
continue;
}
};
let mut serverroomid = servername.to_vec();
serverroomid.push(0xff);
serverroomid.extend_from_slice(room_id);
2020-11-09 11:21:04 +00:00
2021-07-14 07:07:08 +00:00
db.rooms.serverroomids.insert(&serverroomid, &[])?;
}
db.globals.bump_database_version(1)?;
2021-07-14 07:07:08 +00:00
println!("Migration: 0 -> 1 finished");
}
2021-07-14 07:07:08 +00:00
if db.globals.database_version()? < 2 {
// We accidentally inserted hashed versions of "" into the db instead of just ""
for (userid, password) in db.users.userid_password.iter() {
let password = utils::string_from_bytes(&password);
2021-07-14 07:07:08 +00:00
let empty_hashed_password = password.map_or(false, |password| {
argon2::verify_encoded(&password, b"").unwrap_or(false)
});
2021-07-14 07:07:08 +00:00
if empty_hashed_password {
db.users.userid_password.insert(&userid, b"")?;
}
}
2021-07-14 07:07:08 +00:00
db.globals.bump_database_version(2)?;
2021-06-08 16:23:24 +00:00
2021-07-14 07:07:08 +00:00
println!("Migration: 1 -> 2 finished");
}
2021-07-14 07:07:08 +00:00
if db.globals.database_version()? < 3 {
// Move media to filesystem
for (key, content) in db.media.mediaid_file.iter() {
if content.is_empty() {
2021-07-14 07:07:08 +00:00
continue;
}
2021-07-14 07:07:08 +00:00
let path = db.globals.get_media_file(&key);
let mut file = fs::File::create(path)?;
file.write_all(&content)?;
db.media.mediaid_file.insert(&key, &[])?;
2021-06-08 16:23:24 +00:00
}
2021-07-14 07:07:08 +00:00
db.globals.bump_database_version(3)?;
2021-06-08 16:23:24 +00:00
2021-07-14 07:07:08 +00:00
println!("Migration: 2 -> 3 finished");
}
2021-07-14 07:07:08 +00:00
if db.globals.database_version()? < 4 {
// Add federated users to db as deactivated
for our_user in db.users.iter() {
let our_user = our_user?;
if db.users.is_deactivated(&our_user)? {
continue;
}
for room in db.rooms.rooms_joined(&our_user) {
for user in db.rooms.room_members(&room?) {
let user = user?;
if user.server_name() != db.globals.server_name() {
println!("Migration: Creating user {}", user);
db.users.create(&user, None)?;
}
}
}
}
2021-07-14 07:07:08 +00:00
db.globals.bump_database_version(4)?;
2021-07-14 07:07:08 +00:00
println!("Migration: 3 -> 4 finished");
}
}
2021-07-14 07:07:08 +00:00
let guard = db.read().await;
// This data is probably outdated
2021-07-14 07:07:08 +00:00
guard.rooms.edus.presenceid_presence.clear()?;
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
guard
.sending
.start_handler(Arc::clone(&db), sending_receiver);
2021-07-14 07:07:08 +00:00
drop(guard);
#[cfg(feature = "sqlite")]
2021-07-19 13:56:20 +00:00
{
Self::start_wal_clean_task(&db, &config).await;
Self::start_spillover_reap_task(builder, &config).await;
}
2020-11-09 11:21:04 +00:00
Ok(db)
}
2021-07-14 12:50:07 +00:00
#[cfg(feature = "conduit_bin")]
pub async fn start_on_shutdown_tasks(db: Arc<TokioRwLock<Self>>, shutdown: Shutdown) {
tokio::spawn(async move {
shutdown.await;
log::info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
db.read().await.globals.rotate.fire();
});
}
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
let userid_bytes = user_id.as_bytes().to_vec();
let mut userid_prefix = userid_bytes.clone();
userid_prefix.push(0xff);
let mut userdeviceid_prefix = userid_prefix.clone();
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
userdeviceid_prefix.push(0xff);
2021-06-08 16:10:00 +00:00
let mut futures = FuturesUnordered::new();
// Return when *any* user changed his key
// TODO: only send for user they share a room with
futures.push(
self.users
.todeviceid_events
.watch_prefix(&userdeviceid_prefix),
);
futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix));
2021-04-11 19:01:27 +00:00
futures.push(
self.rooms
.userroomid_invitestate
.watch_prefix(&userid_prefix),
);
2021-04-13 13:00:45 +00:00
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
// Events for rooms we are in
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
let roomid_bytes = room_id.as_bytes().to_vec();
2020-08-22 20:02:32 +00:00
let mut roomid_prefix = roomid_bytes.clone();
roomid_prefix.push(0xff);
// PDUs
futures.push(self.rooms.pduid_pdu.watch_prefix(&roomid_prefix));
// EDUs
futures.push(
self.rooms
.edus
.roomid_lasttypingupdate
2020-08-22 20:02:32 +00:00
.watch_prefix(&roomid_bytes),
);
futures.push(
self.rooms
.edus
.readreceiptid_readreceipt
.watch_prefix(&roomid_prefix),
);
// Key changes
futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix));
// Room account data
let mut roomuser_prefix = roomid_prefix.clone();
roomuser_prefix.extend_from_slice(&userid_prefix);
futures.push(
self.account_data
.roomuserdataid_accountdata
.watch_prefix(&roomuser_prefix),
);
}
let mut globaluserdata_prefix = vec![0xff];
globaluserdata_prefix.extend_from_slice(&userid_prefix);
futures.push(
self.account_data
.roomuserdataid_accountdata
.watch_prefix(&globaluserdata_prefix),
);
// More key changes (used when user is not joined to any rooms)
futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix));
// One time keys
futures.push(
self.users
.userid_lastonetimekeyupdate
.watch_prefix(&userid_bytes),
);
2021-07-14 07:07:08 +00:00
futures.push(Box::pin(self.globals.rotate.watch()));
// Wait until one of them finds something
futures.next().await;
}
pub async fn flush(&self) -> Result<()> {
2021-07-14 07:07:08 +00:00
let start = std::time::Instant::now();
let res = self._db.flush();
log::debug!("flush: took {:?}", start.elapsed());
res
}
#[cfg(feature = "sqlite")]
pub fn flush_wal(&self) -> Result<()> {
self._db.flush_wal()
}
2021-07-19 13:56:20 +00:00
#[cfg(feature = "sqlite")]
pub async fn start_spillover_reap_task(engine: Arc<Engine>, config: &Config) {
2021-07-19 14:25:41 +00:00
use std::convert::TryInto;
let fraction_factor = config.sqlite_spillover_reap_fraction.max(1).try_into().unwrap(/* We just converted it to be at least 1 */);
2021-07-19 13:56:20 +00:00
let interval_secs = config.sqlite_spillover_reap_interval_secs as u64;
let weak = Arc::downgrade(&engine);
tokio::spawn(async move {
use tokio::time::interval;
use std::{sync::Weak, time::Duration};
let mut i = interval(Duration::from_secs(interval_secs));
loop {
i.tick().await;
if let Some(arc) = Weak::upgrade(&weak) {
2021-07-19 14:25:41 +00:00
arc.reap_spillover_by_fraction(fraction_factor);
2021-07-19 13:56:20 +00:00
} else {
break;
}
}
});
}
2021-07-14 07:07:08 +00:00
#[cfg(feature = "sqlite")]
pub async fn start_wal_clean_task(lock: &Arc<TokioRwLock<Self>>, config: &Config) {
2021-07-15 16:09:10 +00:00
use tokio::time::{interval, timeout};
#[cfg(unix)]
use tokio::signal::unix::{signal, SignalKind};
2021-07-14 07:07:08 +00:00
use std::{
sync::Weak,
time::{Duration, Instant},
};
let weak: Weak<TokioRwLock<Database>> = Arc::downgrade(&lock);
let lock_timeout = Duration::from_secs(config.sqlite_wal_clean_second_timeout as u64);
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
let do_timer = config.sqlite_wal_clean_timer;
tokio::spawn(async move {
let mut i = interval(timer_interval);
2021-07-15 16:09:10 +00:00
#[cfg(unix)]
2021-07-14 07:07:08 +00:00
let mut s = signal(SignalKind::hangup()).unwrap();
loop {
2021-07-15 16:09:10 +00:00
#[cfg(unix)]
tokio::select! {
2021-07-14 07:07:08 +00:00
_ = i.tick(), if do_timer => {
log::info!(target: "wal-trunc", "Timer ticked")
}
_ = s.recv() => {
log::info!(target: "wal-trunc", "Received SIGHUP")
}
};
2021-07-15 16:09:10 +00:00
#[cfg(not(unix))]
if do_timer {
i.tick().await;
log::info!(target: "wal-trunc", "Timer ticked")
} else {
// timer disabled, and there's no concept of signals on windows, bailing...
return;
}
2021-07-14 07:07:08 +00:00
if let Some(arc) = Weak::upgrade(&weak) {
log::info!(target: "wal-trunc", "Rotating sync helpers...");
// This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock.
// Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out.
arc.read().await.globals.rotate.fire();
log::info!(target: "wal-trunc", "Locking...");
let guard = {
if let Ok(guard) = timeout(lock_timeout, arc.write()).await {
guard
} else {
log::info!(target: "wal-trunc", "Lock failed in timeout, canceled.");
continue;
}
};
log::info!(target: "wal-trunc", "Locked, flushing...");
let start = Instant::now();
if let Err(e) = guard.flush_wal() {
log::error!(target: "wal-trunc", "Errored: {}", e);
} else {
log::info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed());
}
} else {
break;
}
}
});
}
}
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
impl Deref for DatabaseGuard {
type Target = OwnedRwLockReadGuard<Database>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for DatabaseGuard {
type Error = ();
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
2021-07-14 09:28:24 +00:00
let db = try_outcome!(req.guard::<&State<Arc<TokioRwLock<Database>>>>().await);
2021-07-14 07:07:08 +00:00
Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(())
}
}
impl From<OwnedRwLockReadGuard<Database>> for DatabaseGuard {
fn from(val: OwnedRwLockReadGuard<Database>) -> Self {
Self(val)
}
}