315 lines
10 KiB
Rust
315 lines
10 KiB
Rust
#![allow(incomplete_features)]
|
|
#![feature(generic_associated_types)]
|
|
|
|
use baseplug::{MidiReceiver, Plugin, ProcessContext};
|
|
use ringbuf::{Consumer, Producer, RingBuffer};
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use utils::buffers::*;
|
|
use utils::delay::*;
|
|
use utils::logs::*;
|
|
use utils::pitch::*;
|
|
|
|
mod tuna;
|
|
|
|
const BUFFER_LEN: usize = 2 << 9;
|
|
const DELAY_LEN: usize = 4000;
|
|
|
|
baseplug::model! {
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
struct RoboTunaModel {
|
|
#[model(min = 0.0, max = 1.0)]
|
|
#[parameter(name = "manual/snap")]
|
|
manual: f32,
|
|
#[model(min = 0.1, max = 2.1)]
|
|
#[parameter(name = "frequency gain")]
|
|
freq_gain: f32,
|
|
}
|
|
}
|
|
|
|
impl Default for RoboTunaModel {
|
|
fn default() -> Self {
|
|
Self {
|
|
manual: 1.0,
|
|
freq_gain: 1.0,
|
|
}
|
|
}
|
|
}
|
|
|
|
struct RoboTuna {
|
|
/// Current midi note
|
|
note: Option<u8>,
|
|
|
|
/// Current pitches
|
|
pitch_l: Option<f32>,
|
|
pitch_r: Option<f32>,
|
|
|
|
/// Current recording buffer
|
|
/// Input goes here
|
|
recording_buffer: Buffers<BUFFER_LEN>,
|
|
|
|
/// Ringbuf producer so we can send audio chunks to the processing thread
|
|
recordings: Producer<tuna::ProcessorInput>,
|
|
/// Ringbuf consumer so we can receive processed buffers from the processing threads
|
|
processed: Consumer<tuna::ProcessorOutput>,
|
|
|
|
/// Contains some empty buffers so we can reuse them instead of doing allocations
|
|
/// Buffers here are not actually empty, since we don't spend any time clearing them
|
|
/// But since they will be overwritten, this isn't an issue
|
|
empty_buffers: Vec<Buffers<BUFFER_LEN>>,
|
|
|
|
/// Keeps delay lines for playing
|
|
delays: DelayLines<DELAY_LEN>,
|
|
|
|
/// Floating indexes so we can do interpolation
|
|
delay_idx_l: f32,
|
|
delay_idx_r: f32,
|
|
/// true indexes so we can know how much we're drifting away
|
|
true_idx: usize,
|
|
}
|
|
|
|
impl Plugin for RoboTuna {
|
|
const NAME: &'static str = "robotuna";
|
|
const PRODUCT: &'static str = "robotuna";
|
|
const VENDOR: &'static str = "unnieversal";
|
|
|
|
const INPUT_CHANNELS: usize = 2;
|
|
const OUTPUT_CHANNELS: usize = 2;
|
|
|
|
type Model = RoboTunaModel;
|
|
|
|
#[inline]
|
|
fn new(_sample_rate: f32, _model: &RoboTunaModel) -> Self {
|
|
setup_logging("robotuna.log");
|
|
|
|
let (recordings, recording_rx) = RingBuffer::<tuna::ProcessorInput>::new(30).split();
|
|
let (processed_tx, processed) = RingBuffer::<tuna::ProcessorOutput>::new(30).split();
|
|
|
|
// Spawn analysis thread
|
|
std::thread::spawn(move || {
|
|
tuna::tuna(recording_rx, processed_tx);
|
|
});
|
|
|
|
// keep some empty buffer around so we can swap them
|
|
let mut empty_buffers = Vec::with_capacity(80);
|
|
const BUF: Buffers<BUFFER_LEN> = Buffers::new();
|
|
empty_buffers.append(&mut vec![BUF; 30]);
|
|
|
|
log::info!("finished init");
|
|
|
|
Self {
|
|
note: None,
|
|
pitch_l: None,
|
|
pitch_r: None,
|
|
recording_buffer: Buffers::new(),
|
|
recordings,
|
|
processed,
|
|
empty_buffers,
|
|
delays: DelayLines::<DELAY_LEN>::new(),
|
|
|
|
delay_idx_l: 0.0,
|
|
delay_idx_r: 0.0,
|
|
// We start this at a high number cause idk
|
|
// We'll catch up when we start playing
|
|
true_idx: 500,
|
|
}
|
|
}
|
|
|
|
#[inline]
|
|
fn process(&mut self, model: &RoboTunaModelProcess, ctx: &mut ProcessContext<Self>) {
|
|
let input = &ctx.inputs[0].buffers;
|
|
let output = &mut ctx.outputs[0].buffers;
|
|
|
|
for i in 0..ctx.nframes {
|
|
// append input to main buffer
|
|
let full = self
|
|
.recording_buffer
|
|
.write_advance(input[0][i], input[1][i]);
|
|
// If we fill the buffer, switch it with an empty one
|
|
if full {
|
|
// we have to loop here, cause when the daw renders audio it tries to do it faster than
|
|
// real time. if we don't loop and wait, the processing thread gets stuck with all of the buffers,
|
|
// and we run out of empty ones to switch to
|
|
// the loop-wait ensures that we don't panic when there isn't an empty buffer
|
|
loop {
|
|
// get the empty buffer from unused buffer list
|
|
if let Some(mut buf) = self.empty_buffers.pop() {
|
|
buf.reset();
|
|
// swap it with recording buffer
|
|
std::mem::swap(&mut buf, &mut self.recording_buffer);
|
|
buf.reset();
|
|
|
|
// pass it to the processor thread
|
|
let _ = self.recordings.push(tuna::ProcessorInput {
|
|
buffers: buf,
|
|
sample_rate: ctx.sample_rate as u32,
|
|
});
|
|
break;
|
|
}
|
|
std::thread::sleep(std::time::Duration::from_micros(10));
|
|
}
|
|
}
|
|
|
|
// Try to get a processed buffer from the processor thread
|
|
if let Some(tuna::ProcessorOutput {
|
|
buffers,
|
|
pitch_l,
|
|
pitch_r,
|
|
}) = self.processed.pop()
|
|
{
|
|
self.empty_buffers.push(buffers);
|
|
|
|
// Update current pitch
|
|
// We use `or`, so we keep the old value if the current one is None
|
|
self.pitch_l = pitch_l.or(self.pitch_l);
|
|
self.pitch_r = pitch_r.or(self.pitch_r);
|
|
}
|
|
|
|
// Play from delay line according to pitch
|
|
let (l, r) = self.shift(
|
|
input[0][i],
|
|
input[1][i],
|
|
ctx.sample_rate,
|
|
model.freq_gain[i],
|
|
model.manual[i] < 0.5,
|
|
);
|
|
|
|
output[0][i] = l;
|
|
output[1][i] = r;
|
|
}
|
|
}
|
|
}
|
|
impl RoboTuna {
|
|
fn advancement_rate(&self, freq_gain: f32, manual: bool) -> (f32, f32) {
|
|
// TODO Deal with pitch detection failing
|
|
let current_pitch_l = self.pitch_l.unwrap_or(220.0);
|
|
let current_pitch_r = self.pitch_r.unwrap_or(220.0);
|
|
|
|
if manual {
|
|
// If we're on manual, get the expected frequency from the midi note
|
|
if let Some(expected) = self.note.map(midi_note_to_pitch) {
|
|
let l = expected / current_pitch_l;
|
|
let r = expected / current_pitch_r;
|
|
(freq_gain * l, freq_gain * r)
|
|
} else {
|
|
// If there's no note, we just do frequency gain
|
|
(freq_gain, freq_gain)
|
|
}
|
|
} else {
|
|
// If we're on snap, get the closest note
|
|
let expected_l = closest_note_freq(current_pitch_l);
|
|
let expected_r = closest_note_freq(current_pitch_r);
|
|
|
|
let l = expected_l / current_pitch_l;
|
|
let r = expected_r / current_pitch_r;
|
|
(freq_gain * l, freq_gain * r)
|
|
}
|
|
}
|
|
|
|
fn shift(
|
|
&mut self,
|
|
l: f32,
|
|
r: f32,
|
|
sample_rate: f32,
|
|
freq_gain: f32,
|
|
manual: bool,
|
|
) -> (f32, f32) {
|
|
// so um this code will probably not make any sense if i don't write an explanation of the
|
|
// general thing it's trying to achieve
|
|
// if i've forgoten to write it up and you want to understand the code, ping me and uh yeah
|
|
|
|
// add input to delay line
|
|
self.delays.write_and_advance(l, r);
|
|
|
|
// get period of left & right
|
|
let period_l = sample_rate / self.pitch_l.unwrap_or(220.0);
|
|
let period_r = sample_rate / self.pitch_r.unwrap_or(220.0);
|
|
|
|
// advance indexes
|
|
let (adv_l, adv_r) = self.advancement_rate(freq_gain, manual);
|
|
self.delay_idx_l += adv_l;
|
|
self.delay_idx_r += adv_r;
|
|
self.true_idx += 1;
|
|
|
|
// get how close we are to the input idx, so we know if we have to interpolate/jump
|
|
let l_diff = self.true_idx as f32 - self.delay_idx_l;
|
|
let r_diff = self.true_idx as f32 - self.delay_idx_r;
|
|
|
|
// get the current value
|
|
let mut l = self.delays.l.floating_index(self.delay_idx_l);
|
|
let mut r = self.delays.r.floating_index(self.delay_idx_r);
|
|
|
|
// Interpolation
|
|
// if we are close to having to jump, we start interpolating with the jump destination
|
|
// interpolate when we're one third of the period away from jumping
|
|
|
|
// TODO change to a non-linear interpolation
|
|
|
|
const DIV: f32 = 2.0 / 3.0;
|
|
if l_diff - period_l < (period_l / DIV) {
|
|
let a = (l_diff - period_l) / (period_l / DIV);
|
|
l *= a;
|
|
l += (1.0 - a) * self.delays.l.floating_index(self.delay_idx_l - period_l);
|
|
}
|
|
if 3.0 * period_l - l_diff < (period_l / DIV) {
|
|
let a = (3.0 * period_l - l_diff) / (period_l / DIV);
|
|
l *= a;
|
|
l += (1.0 - a) * self.delays.l.floating_index(self.delay_idx_l - period_l);
|
|
}
|
|
if r_diff - period_r < (period_r / DIV) {
|
|
let a = (r_diff - period_r) / (period_r / DIV);
|
|
r *= a;
|
|
r += (1.0 - a) * self.delays.r.floating_index(self.delay_idx_r - period_r);
|
|
}
|
|
if 3.0 * period_r - r_diff < (period_r / DIV) {
|
|
let a = (3.0 * period_r - r_diff) / (period_r / DIV);
|
|
r *= a;
|
|
r += (1.0 - a) * self.delays.r.floating_index(self.delay_idx_r - period_r);
|
|
}
|
|
|
|
// Check if we need to advance/go back `period` samples
|
|
// we want to be between the second and third period
|
|
// so ideally we want {l,r}_diff == 2.0 * period_{l,r}
|
|
|
|
// We are about to get to the first period
|
|
if l_diff < period_l {
|
|
self.delay_idx_l -= period_l;
|
|
}
|
|
// We are about to get to the fourth period
|
|
if l_diff > 3.0 * period_l {
|
|
self.delay_idx_l += period_l;
|
|
}
|
|
if r_diff < period_r {
|
|
self.delay_idx_r -= period_r;
|
|
}
|
|
if r_diff > 3.0 * period_r {
|
|
self.delay_idx_r += period_r;
|
|
}
|
|
|
|
(l, r)
|
|
}
|
|
}
|
|
impl MidiReceiver for RoboTuna {
|
|
fn midi_input(&mut self, _model: &RoboTunaModelProcess, data: [u8; 3]) {
|
|
match data[0] {
|
|
// note on
|
|
0x90 => {
|
|
self.note = Some(data[1]);
|
|
}
|
|
// note off
|
|
0x80 => {
|
|
// only set note to None if it's the same one we currently have
|
|
if let Some(n) = self.note {
|
|
if n == data[1] {
|
|
self.note = None;
|
|
}
|
|
}
|
|
}
|
|
|
|
_ => (),
|
|
}
|
|
}
|
|
}
|
|
|
|
baseplug::vst2!(RoboTuna, b"tuna");
|