Merge branch 'benchmarks'
commit
c8e769860b
|
@ -53,6 +53,9 @@ matrix-sdk-test = { version = "0.2.0", path = "../matrix_sdk_test" }
|
||||||
indoc = "1.0.3"
|
indoc = "1.0.3"
|
||||||
criterion = { version = "0.3.4", features = ["async", "async_futures", "html_reports"] }
|
criterion = { version = "0.3.4", features = ["async", "async_futures", "html_reports"] }
|
||||||
|
|
||||||
|
[target.'cfg(target_os = "linux")'.dev-dependencies]
|
||||||
|
pprof = { version = "0.4.2", features = ["flamegraph"] }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "crypto_bench"
|
name = "crypto_bench"
|
||||||
harness = false
|
harness = false
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
mod perf;
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use criterion::{
|
use criterion::{async_executor::FuturesExecutor, *};
|
||||||
async_executor::FuturesExecutor, criterion_group, criterion_main, BenchmarkId, Criterion,
|
|
||||||
Throughput,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
use futures::executor::block_on;
|
||||||
use matrix_sdk_common::{
|
use matrix_sdk_common::{
|
||||||
api::r0::keys::get_keys,
|
api::r0::keys::{claim_keys, get_keys},
|
||||||
identifiers::{user_id, DeviceIdBox, UserId},
|
identifiers::{user_id, DeviceIdBox, UserId},
|
||||||
uuid::Uuid,
|
uuid::Uuid,
|
||||||
};
|
};
|
||||||
|
@ -29,7 +30,14 @@ fn keys_query_response() -> get_keys::Response {
|
||||||
get_keys::Response::try_from(data).expect("Can't parse the keys upload response")
|
get_keys::Response::try_from(data).expect("Can't parse the keys upload response")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn receive_keys_query(c: &mut Criterion) {
|
fn keys_claim_response() -> claim_keys::Response {
|
||||||
|
let data = include_bytes!("./keys_claim.json");
|
||||||
|
let data: Value = serde_json::from_slice(data).unwrap();
|
||||||
|
let data = response_from_file(&data);
|
||||||
|
claim_keys::Response::try_from(data).expect("Can't parse the keys upload response")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn keys_query(c: &mut Criterion) {
|
||||||
let machine = OlmMachine::new(&alice_id(), &alice_device_id());
|
let machine = OlmMachine::new(&alice_id(), &alice_device_id());
|
||||||
let response = keys_query_response();
|
let response = keys_query_response();
|
||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
|
@ -42,11 +50,14 @@ pub fn receive_keys_query(c: &mut Criterion) {
|
||||||
+ response.self_signing_keys.len()
|
+ response.self_signing_keys.len()
|
||||||
+ response.user_signing_keys.len();
|
+ response.user_signing_keys.len();
|
||||||
|
|
||||||
let mut group = c.benchmark_group("key query throughput");
|
let mut group = c.benchmark_group("Keys querying");
|
||||||
group.throughput(Throughput::Elements(count as u64));
|
group.throughput(Throughput::Elements(count as u64));
|
||||||
|
|
||||||
group.bench_with_input(
|
group.bench_with_input(
|
||||||
BenchmarkId::new("key_query", "150 devices key query response parsing"),
|
BenchmarkId::new(
|
||||||
|
"Keys querying",
|
||||||
|
"150 device keys parsing and signature checking",
|
||||||
|
),
|
||||||
&response,
|
&response,
|
||||||
|b, response| {
|
|b, response| {
|
||||||
b.to_async(FuturesExecutor)
|
b.to_async(FuturesExecutor)
|
||||||
|
@ -56,5 +67,52 @@ pub fn receive_keys_query(c: &mut Criterion) {
|
||||||
group.finish()
|
group.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, receive_keys_query);
|
pub fn keys_claiming(c: &mut Criterion) {
|
||||||
|
let keys_query_response = keys_query_response();
|
||||||
|
let uuid = Uuid::new_v4();
|
||||||
|
|
||||||
|
let response = keys_claim_response();
|
||||||
|
|
||||||
|
let count = response
|
||||||
|
.one_time_keys
|
||||||
|
.values()
|
||||||
|
.fold(0, |acc, d| acc + d.len());
|
||||||
|
|
||||||
|
let mut group = c.benchmark_group("Keys claiming throughput");
|
||||||
|
group.throughput(Throughput::Elements(count as u64));
|
||||||
|
|
||||||
|
let name = format!("{} one-time keys claiming and session creation", count);
|
||||||
|
|
||||||
|
group.bench_with_input(
|
||||||
|
BenchmarkId::new("One-time keys claiming", &name),
|
||||||
|
&response,
|
||||||
|
|b, response| {
|
||||||
|
b.iter_batched(
|
||||||
|
|| {
|
||||||
|
let machine = OlmMachine::new(&alice_id(), &alice_device_id());
|
||||||
|
block_on(machine.mark_request_as_sent(&uuid, &keys_query_response)).unwrap();
|
||||||
|
machine
|
||||||
|
},
|
||||||
|
move |machine| block_on(machine.mark_request_as_sent(&uuid, response)).unwrap(),
|
||||||
|
BatchSize::SmallInput,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
);
|
||||||
|
group.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn criterion() -> Criterion {
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
let criterion = Criterion::default().with_profiler(perf::FlamegraphProfiler::new(100));
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
let criterion = Criterion::default();
|
||||||
|
|
||||||
|
criterion
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group! {
|
||||||
|
name = benches;
|
||||||
|
config = criterion();
|
||||||
|
targets = keys_query, keys_claiming
|
||||||
|
}
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,78 @@
|
||||||
|
//! This is a simple Criterion Profiler implementation using pprof.
|
||||||
|
//!
|
||||||
|
//! It's mostly a direct copy from here: https://www.jibbow.com/posts/criterion-flamegraphs/
|
||||||
|
use std::{fs::File, os::raw::c_int, path::Path};
|
||||||
|
|
||||||
|
use criterion::profiler::Profiler;
|
||||||
|
use pprof::ProfilerGuard;
|
||||||
|
|
||||||
|
/// Small custom profiler that can be used with Criterion to create a flamegraph for benchmarks.
|
||||||
|
/// Also see [the Criterion documentation on this][custom-profiler].
|
||||||
|
///
|
||||||
|
/// ## Example on how to enable the custom profiler:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// mod perf;
|
||||||
|
/// use perf::FlamegraphProfiler;
|
||||||
|
///
|
||||||
|
/// fn fibonacci_profiled(criterion: &mut Criterion) {
|
||||||
|
/// // Use the criterion struct as normal here.
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// fn custom() -> Criterion {
|
||||||
|
/// Criterion::default().with_profiler(FlamegraphProfiler::new())
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// criterion_group! {
|
||||||
|
/// name = benches;
|
||||||
|
/// config = custom();
|
||||||
|
/// targets = fibonacci_profiled
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// The neat thing about this is that it will sample _only_ the benchmark, and not other stuff like
|
||||||
|
/// the setup process.
|
||||||
|
///
|
||||||
|
/// Further, it will only kick in if `--profile-time <time>` is passed to the benchmark binary.
|
||||||
|
/// A flamegraph will be created for each individual benchmark in its report directory under
|
||||||
|
/// `profile/flamegraph.svg`.
|
||||||
|
///
|
||||||
|
/// [custom-profiler]: https://bheisler.github.io/criterion.rs/book/user_guide/profiling.html#implementing-in-process-profiling-hooks
|
||||||
|
pub struct FlamegraphProfiler<'a> {
|
||||||
|
frequency: c_int,
|
||||||
|
active_profiler: Option<ProfilerGuard<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> FlamegraphProfiler<'a> {
|
||||||
|
pub fn new(frequency: c_int) -> Self {
|
||||||
|
FlamegraphProfiler {
|
||||||
|
frequency,
|
||||||
|
active_profiler: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Profiler for FlamegraphProfiler<'a> {
|
||||||
|
fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) {
|
||||||
|
self.active_profiler = Some(ProfilerGuard::new(self.frequency).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) {
|
||||||
|
std::fs::create_dir_all(benchmark_dir)
|
||||||
|
.expect("Can't create a directory to store the benchmarking report");
|
||||||
|
|
||||||
|
let flamegraph_path = benchmark_dir.join("flamegraph.svg");
|
||||||
|
|
||||||
|
let flamegraph_file = File::create(&flamegraph_path)
|
||||||
|
.expect("File system error while creating flamegraph.svg");
|
||||||
|
|
||||||
|
if let Some(profiler) = self.active_profiler.take() {
|
||||||
|
profiler
|
||||||
|
.report()
|
||||||
|
.build()
|
||||||
|
.expect("Can't build profiling report")
|
||||||
|
.flamegraph(flamegraph_file)
|
||||||
|
.expect("Error writing flamegraph");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue