Voice Rework -- Events, Track Queues (#806)

This implements a proof-of-concept for an improved audio frontend. The largest change is the introduction of events and event handling: both by time elapsed and by track events, such as ending or looping. Following on from this, the library now includes a basic, event-driven track queue system (which people seem to ask for unusually often). A new sample, `examples/13_voice_events`, demonstrates both the `TrackQueue` system and some basic events via the `~queue` and `~play_fade` commands.

Locks are removed from around the control of `Audio` objects, which should allow the backend to be moved to a more granular futures-based backend solution in a cleaner way.
This commit is contained in:
Kyle Simpson
2020-10-29 20:25:20 +00:00
committed by Alex M. M
commit 7e4392ae68
76 changed files with 8756 additions and 0 deletions

View File

@@ -0,0 +1,303 @@
use super::{apply_length_hint, compressed_cost_per_sec, default_config};
use crate::{
constants::*,
input::{
error::{Error, Result},
CodecType,
Container,
Input,
Metadata,
Reader,
},
};
use audiopus::{
coder::Encoder as OpusEncoder,
Application,
Bitrate,
Channels,
Error as OpusError,
ErrorCode as OpusErrorCode,
SampleRate,
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::{
convert::TryInto,
io::{Error as IoError, ErrorKind as IoErrorKind, Read, Result as IoResult},
mem,
sync::atomic::{AtomicUsize, Ordering},
};
use streamcatcher::{Config, NeedsBytes, Stateful, Transform, TransformPosition, TxCatcher};
use tracing::{debug, trace};
/// A wrapper around an existing [`Input`] which compresses
/// the input using the Opus codec before storing it in memory.
///
/// The main purpose of this wrapper is to enable seeking on
/// incompatible sources (i.e., ffmpeg output) and to ease resource
/// consumption for commonly reused/shared tracks. [`Restartable`]
/// and [`Memory`] offer the same functionality with different
/// tradeoffs.
///
/// This is intended for use with larger, repeatedly used audio
/// tracks shared between sources, and stores the sound data
/// retrieved as **compressed Opus audio**. There is an associated memory cost,
/// but this is far smaller than using a [`Memory`].
///
/// [`Input`]: ../struct.Input.html
/// [`Memory`]: struct.Memory.html
/// [`Restartable`]: ../struct.Restartable.html
#[derive(Clone, Debug)]
pub struct Compressed {
/// Inner shared bytestore.
pub raw: TxCatcher<Box<Input>, OpusCompressor>,
/// Metadata moved out of the captured source.
pub metadata: Metadata,
/// Stereo-ness of the captured source.
pub stereo: bool,
}
impl Compressed {
/// Wrap an existing [`Input`] with an in-memory store, compressed using Opus.
///
/// [`Input`]: ../struct.Input.html
/// [`Metadata.duration`]: ../struct.Metadata.html#structfield.duration
pub fn new(source: Input, bitrate: Bitrate) -> Result<Self> {
Self::with_config(source, bitrate, None)
}
/// Wrap an existing [`Input`] with an in-memory store, compressed using Opus.
///
/// `config.length_hint` may be used to control the size of the initial chunk, preventing
/// needless allocations and copies. If this is not present, the value specified in
/// `source`'s [`Metadata.duration`] will be used.
///
/// [`Input`]: ../struct.Input.html
/// [`Metadata.duration`]: ../struct.Metadata.html#structfield.duration
pub fn with_config(source: Input, bitrate: Bitrate, config: Option<Config>) -> Result<Self> {
let channels = if source.stereo {
Channels::Stereo
} else {
Channels::Mono
};
let mut encoder = OpusEncoder::new(SampleRate::Hz48000, channels, Application::Audio)?;
encoder.set_bitrate(bitrate)?;
Self::with_encoder(source, encoder, config)
}
/// Wrap an existing [`Input`] with an in-memory store, compressed using a user-defined
/// Opus encoder.
///
/// `length_hint` functions as in [`new`]. This function's behaviour is undefined if your encoder
/// has a different sample rate than 48kHz, and if the decoder has a different channel count from the source.
///
/// [`Input`]: ../struct.Input.html
/// [`new`]: #method.new
pub fn with_encoder(
mut source: Input,
encoder: OpusEncoder,
config: Option<Config>,
) -> Result<Self> {
let bitrate = encoder.bitrate()?;
let cost_per_sec = compressed_cost_per_sec(bitrate);
let stereo = source.stereo;
let metadata = source.metadata.take();
let mut config = config.unwrap_or_else(|| default_config(cost_per_sec));
// apply length hint.
if config.length_hint.is_none() {
if let Some(dur) = metadata.duration {
apply_length_hint(&mut config, dur, cost_per_sec);
}
}
let raw = config
.build_tx(Box::new(source), OpusCompressor::new(encoder, stereo))
.map_err(Error::Streamcatcher)?;
Ok(Self {
raw,
metadata,
stereo,
})
}
/// Acquire a new handle to this object, creating a new
/// view of the existing cached data from the beginning.
pub fn new_handle(&self) -> Self {
Self {
raw: self.raw.new_handle(),
metadata: self.metadata.clone(),
stereo: self.stereo,
}
}
}
impl From<Compressed> for Input {
fn from(src: Compressed) -> Self {
Input::new(
true,
Reader::Compressed(src.raw),
CodecType::Opus
.try_into()
.expect("Default decoder values are known to be valid."),
Container::Dca { first_frame: 0 },
Some(src.metadata),
)
}
}
/// Transform applied inside [`Compressed`], converting a floating-point PCM
/// input stream into a DCA-framed Opus stream.
///
/// Created and managed by [`Compressed`].
///
/// [`Compressed`]: struct.Compressed.html
#[derive(Debug)]
pub struct OpusCompressor {
encoder: OpusEncoder,
last_frame: Vec<u8>,
stereo_input: bool,
frame_pos: usize,
audio_bytes: AtomicUsize,
}
impl OpusCompressor {
fn new(encoder: OpusEncoder, stereo_input: bool) -> Self {
Self {
encoder,
last_frame: Vec::with_capacity(4000),
stereo_input,
frame_pos: 0,
audio_bytes: Default::default(),
}
}
}
impl<T> Transform<T> for OpusCompressor
where
T: Read,
{
fn transform_read(&mut self, src: &mut T, buf: &mut [u8]) -> IoResult<TransformPosition> {
let output_start = mem::size_of::<u16>();
let mut eof = false;
let mut raw_len = 0;
let mut out = None;
let mut sample_buf = [0f32; STEREO_FRAME_SIZE];
let samples_in_frame = if self.stereo_input {
STEREO_FRAME_SIZE
} else {
MONO_FRAME_SIZE
};
// Purge old frame and read new, if needed.
if self.frame_pos == self.last_frame.len() + output_start || self.last_frame.is_empty() {
self.last_frame.resize(self.last_frame.capacity(), 0);
// We can't use `read_f32_into` because we can't guarantee the buffer will be filled.
for el in sample_buf[..samples_in_frame].iter_mut() {
match src.read_f32::<LittleEndian>() {
Ok(sample) => {
*el = sample;
raw_len += 1;
},
Err(e) if e.kind() == IoErrorKind::UnexpectedEof => {
eof = true;
break;
},
Err(e) => {
out = Some(Err(e));
break;
},
}
}
if out.is_none() && raw_len > 0 {
loop {
// NOTE: we don't index by raw_len because the last frame can be too small
// to occupy a "whole packet". Zero-padding is the correct behaviour.
match self
.encoder
.encode_float(&sample_buf[..samples_in_frame], &mut self.last_frame[..])
{
Ok(pkt_len) => {
trace!("Next packet to write has {:?}", pkt_len);
self.frame_pos = 0;
self.last_frame.truncate(pkt_len);
break;
},
Err(OpusError::Opus(OpusErrorCode::BufferTooSmall)) => {
// If we need more capacity to encode this frame, then take it.
trace!("Resizing inner buffer (+256).");
self.last_frame.resize(self.last_frame.len() + 256, 0);
},
Err(e) => {
debug!("Read error {:?} {:?} {:?}.", e, out, raw_len);
out = Some(Err(IoError::new(IoErrorKind::Other, e)));
break;
},
}
}
}
}
if out.is_none() {
// Write from frame we have.
let start = if self.frame_pos < output_start {
(&mut buf[..output_start])
.write_i16::<LittleEndian>(self.last_frame.len() as i16)
.expect(
"Minimum bytes requirement for Opus (2) should mean that an i16 \
may always be written.",
);
self.frame_pos += output_start;
trace!("Wrote frame header: {}.", self.last_frame.len());
output_start
} else {
0
};
let out_pos = self.frame_pos - output_start;
let remaining = self.last_frame.len() - out_pos;
let write_len = remaining.min(buf.len() - start);
buf[start..start + write_len]
.copy_from_slice(&self.last_frame[out_pos..out_pos + write_len]);
self.frame_pos += write_len;
trace!("Appended {} to inner store", write_len);
out = Some(Ok(write_len + start));
}
// NOTE: use of raw_len here preserves true sample length even if
// stream is extended to 20ms boundary.
out.unwrap_or_else(|| Err(IoError::new(IoErrorKind::Other, "Unclear.")))
.map(|compressed_sz| {
self.audio_bytes
.fetch_add(raw_len * mem::size_of::<f32>(), Ordering::Release);
if eof {
TransformPosition::Finished
} else {
TransformPosition::Read(compressed_sz)
}
})
}
}
impl NeedsBytes for OpusCompressor {
fn min_bytes_required(&self) -> usize {
2
}
}
impl Stateful for OpusCompressor {
type State = usize;
fn state(&self) -> Self::State {
self.audio_bytes.load(Ordering::Acquire)
}
}

40
src/input/cached/hint.rs Normal file
View File

@@ -0,0 +1,40 @@
use std::time::Duration;
use streamcatcher::Config;
/// Expected amount of time that an input should last.
#[derive(Copy, Clone, Debug)]
pub enum LengthHint {
/// Estimate of a source's length in bytes.
Bytes(usize),
/// Estimate of a source's length in time.
///
/// This will be converted to a bytecount at setup.
Time(Duration),
}
impl From<usize> for LengthHint {
fn from(size: usize) -> Self {
LengthHint::Bytes(size)
}
}
impl From<Duration> for LengthHint {
fn from(size: Duration) -> Self {
LengthHint::Time(size)
}
}
/// Modify the given cache configuration to initially allocate
/// enough bytes to store a length of audio at the given bitrate.
pub fn apply_length_hint<H>(config: &mut Config, hint: H, cost_per_sec: usize)
where
H: Into<LengthHint>,
{
config.length_hint = Some(match hint.into() {
LengthHint::Bytes(a) => a,
LengthHint::Time(t) => {
let s = t.as_secs() + if t.subsec_millis() > 0 { 1 } else { 0 };
(s as usize) * cost_per_sec
},
});
}

116
src/input/cached/memory.rs Normal file
View File

@@ -0,0 +1,116 @@
use super::{apply_length_hint, default_config, raw_cost_per_sec};
use crate::input::{
error::{Error, Result},
CodecType,
Container,
Input,
Metadata,
Reader,
};
use std::convert::{TryFrom, TryInto};
use streamcatcher::{Catcher, Config};
/// A wrapper around an existing [`Input`] which caches
/// the decoded and converted audio data locally in memory.
///
/// The main purpose of this wrapper is to enable seeking on
/// incompatible sources (i.e., ffmpeg output) and to ease resource
/// consumption for commonly reused/shared tracks. [`Restartable`]
/// and [`Compressed`] offer the same functionality with different
/// tradeoffs.
///
/// This is intended for use with small, repeatedly used audio
/// tracks shared between sources, and stores the sound data
/// retrieved in **uncompressed floating point** form to minimise the
/// cost of audio processing. This is a significant *3 Mbps (375 kiB/s)*,
/// or 131 MiB of RAM for a 6 minute song.
///
/// [`Input`]: ../struct.Input.html
/// [`Compressed`]: struct.Compressed.html
/// [`Restartable`]: ../struct.Restartable.html
#[derive(Clone, Debug)]
pub struct Memory {
/// Inner shared bytestore.
pub raw: Catcher<Box<Reader>>,
/// Metadata moved out of the captured source.
pub metadata: Metadata,
/// Codec used to read the inner bytestore.
pub kind: CodecType,
/// Stereo-ness of the captured source.
pub stereo: bool,
/// Framing mechanism for the inner bytestore.
pub container: Container,
}
impl Memory {
/// Wrap an existing [`Input`] with an in-memory store with the same codec and framing.
///
/// [`Input`]: ../struct.Input.html
pub fn new(source: Input) -> Result<Self> {
Self::with_config(source, None)
}
/// Wrap an existing [`Input`] with an in-memory store with the same codec and framing.
///
/// `length_hint` may be used to control the size of the initial chunk, preventing
/// needless allocations and copies. If this is not present, the value specified in
/// `source`'s [`Metadata.duration`] will be used, assuming that the source is uncompressed.
///
/// [`Input`]: ../struct.Input.html
/// [`Metadata.duration`]: ../struct.Metadata.html#structfield.duration
pub fn with_config(mut source: Input, config: Option<Config>) -> Result<Self> {
let stereo = source.stereo;
let kind = (&source.kind).into();
let container = source.container;
let metadata = source.metadata.take();
let cost_per_sec = raw_cost_per_sec(stereo);
let mut config = config.unwrap_or_else(|| default_config(cost_per_sec));
// apply length hint.
if config.length_hint.is_none() {
if let Some(dur) = metadata.duration {
apply_length_hint(&mut config, dur, cost_per_sec);
}
}
let raw = config
.build(Box::new(source.reader))
.map_err(Error::Streamcatcher)?;
Ok(Self {
raw,
metadata,
kind,
stereo,
container,
})
}
/// Acquire a new handle to this object, creating a new
/// view of the existing cached data from the beginning.
pub fn new_handle(&self) -> Self {
Self {
raw: self.raw.new_handle(),
metadata: self.metadata.clone(),
kind: self.kind,
stereo: self.stereo,
container: self.container,
}
}
}
impl TryFrom<Memory> for Input {
type Error = Error;
fn try_from(src: Memory) -> Result<Self> {
Ok(Input::new(
src.stereo,
Reader::Memory(src.raw),
src.kind.try_into()?,
src.container,
Some(src.metadata),
))
}
}

44
src/input/cached/mod.rs Normal file
View File

@@ -0,0 +1,44 @@
//! In-memory, shared input sources for reuse between calls, fast seeking, and
//! direct Opus frame passthrough.
mod compressed;
mod hint;
mod memory;
#[cfg(test)]
mod tests;
pub use self::{compressed::*, hint::*, memory::*};
use crate::constants::*;
use crate::input::utils;
use audiopus::Bitrate;
use std::{mem, time::Duration};
use streamcatcher::{Config, GrowthStrategy};
/// Estimates the cost, in B/s, of audio data compressed at the given bitrate.
pub fn compressed_cost_per_sec(bitrate: Bitrate) -> usize {
let framing_cost_per_sec = AUDIO_FRAME_RATE * mem::size_of::<u16>();
let bitrate_raw = match bitrate {
Bitrate::BitsPerSecond(i) => i,
Bitrate::Auto => 64_000,
Bitrate::Max => 512_000,
} as usize;
(bitrate_raw / 8) + framing_cost_per_sec
}
/// Calculates the cost, in B/s, of raw floating-point audio data.
pub fn raw_cost_per_sec(stereo: bool) -> usize {
utils::timestamp_to_byte_count(Duration::from_secs(1), stereo)
}
/// Provides the default config used by a cached source.
///
/// This maps to the default configuration in [`streamcatcher`], using
/// a constant chunk size of 5s worth of audio at the given bitrate estimate.
///
/// [`streamcatcher`]: https://docs.rs/streamcatcher/0.1.0/streamcatcher/struct.Config.html
pub fn default_config(cost_per_sec: usize) -> Config {
Config::new().chunk_size(GrowthStrategy::Constant(5 * cost_per_sec))
}

79
src/input/cached/tests.rs Normal file
View File

@@ -0,0 +1,79 @@
use super::*;
use crate::{
constants::*,
input::{error::Error, ffmpeg, Codec, Container, Input, Reader},
test_utils::*,
};
use audiopus::{coder::Decoder, Bitrate, Channels, SampleRate};
use byteorder::{LittleEndian, ReadBytesExt};
use std::io::{Cursor, Read};
#[tokio::test]
async fn streamcatcher_preserves_file() {
let input = make_sine(50 * MONO_FRAME_SIZE, true);
let input_len = input.len();
let mut raw = default_config(raw_cost_per_sec(true))
.build(Cursor::new(input.clone()))
.map_err(Error::Streamcatcher)
.unwrap();
let mut out_buf = vec![];
let read = raw.read_to_end(&mut out_buf).unwrap();
assert_eq!(input_len, read);
assert_eq!(input, out_buf);
}
#[test]
fn compressed_scans_frames_decodes_mono() {
let data = one_s_compressed_sine(false);
run_through_dca(data.raw);
}
#[test]
fn compressed_scans_frames_decodes_stereo() {
let data = one_s_compressed_sine(true);
run_through_dca(data.raw);
}
#[test]
fn compressed_triggers_valid_passthrough() {
let mut input = Input::from(one_s_compressed_sine(true));
assert!(input.supports_passthrough());
let mut opus_buf = [0u8; 10_000];
let mut signal_buf = [0i16; 1920];
let opus_len = input.read_opus_frame(&mut opus_buf[..]).unwrap();
let mut decoder = Decoder::new(SampleRate::Hz48000, Channels::Stereo).unwrap();
decoder
.decode(Some(&opus_buf[..opus_len]), &mut signal_buf[..], false)
.unwrap();
}
fn one_s_compressed_sine(stereo: bool) -> Compressed {
let data = make_sine(50 * MONO_FRAME_SIZE, stereo);
let input = Input::new(stereo, data.into(), Codec::FloatPcm, Container::Raw, None);
Compressed::new(input, Bitrate::BitsPerSecond(128_000)).unwrap()
}
fn run_through_dca(mut src: impl Read) {
let mut decoder = Decoder::new(SampleRate::Hz48000, Channels::Stereo).unwrap();
let mut pkt_space = [0u8; 10_000];
let mut signals = [0i16; 1920];
while let Ok(frame_len) = src.read_i16::<LittleEndian>() {
let pkt_len = src.read(&mut pkt_space[..frame_len as usize]).unwrap();
decoder
.decode(Some(&pkt_space[..pkt_len]), &mut signals[..], false)
.unwrap();
}
}

38
src/input/child.rs Normal file
View File

@@ -0,0 +1,38 @@
use super::*;
use std::{
io::{BufReader, Read},
process::Child,
};
use tracing::debug;
/// Handle for a child process which ensures that any subprocesses are properly closed
/// on drop.
#[derive(Debug)]
pub struct ChildContainer(Child);
pub(crate) fn child_to_reader<T>(child: Child) -> Reader {
Reader::Pipe(BufReader::with_capacity(
STEREO_FRAME_SIZE * mem::size_of::<T>() * CHILD_BUFFER_LEN,
ChildContainer(child),
))
}
impl From<Child> for Reader {
fn from(container: Child) -> Self {
child_to_reader::<f32>(container)
}
}
impl Read for ChildContainer {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
self.0.stdout.as_mut().unwrap().read(buffer)
}
}
impl Drop for ChildContainer {
fn drop(&mut self) {
if let Err(e) = self.0.kill() {
debug!("Error awaiting child process: {:?}", e);
}
}
}

99
src/input/codec/mod.rs Normal file
View File

@@ -0,0 +1,99 @@
//! Decoding schemes for input audio bytestreams.
mod opus;
pub use self::opus::OpusDecoderState;
use super::*;
use std::{fmt::Debug, mem};
/// State used to decode input bytes of an [`Input`].
///
/// [`Input`]: ../struct.Input.html
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Codec {
/// The inner bytestream is encoded using the Opus codec, to be decoded
/// using the given state.
///
/// Must be combined with a non-[`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
Opus(OpusDecoderState),
/// The inner bytestream is encoded using raw `i16` samples.
///
/// Must be combined with a [`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
Pcm,
/// The inner bytestream is encoded using raw `f32` samples.
///
/// Must be combined with a [`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
FloatPcm,
}
impl From<&Codec> for CodecType {
fn from(f: &Codec) -> Self {
use Codec::*;
match f {
Opus(_) => Self::Opus,
Pcm => Self::Pcm,
FloatPcm => Self::FloatPcm,
}
}
}
/// Type of data being passed into an [`Input`].
///
/// [`Input`]: ../struct.Input.html
#[non_exhaustive]
#[derive(Copy, Clone, Debug)]
pub enum CodecType {
/// The inner bytestream is encoded using the Opus codec.
///
/// Must be combined with a non-[`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
Opus,
/// The inner bytestream is encoded using raw `i16` samples.
///
/// Must be combined with a [`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
Pcm,
/// The inner bytestream is encoded using raw `f32` samples.
///
/// Must be combined with a [`Raw`] container.
///
/// [`Raw`]: ../enum.Container.html#variant.Raw
FloatPcm,
}
impl CodecType {
/// Returns the length of a single output sample, in bytes.
pub fn sample_len(&self) -> usize {
use CodecType::*;
match self {
Opus | FloatPcm => mem::size_of::<f32>(),
Pcm => mem::size_of::<i16>(),
}
}
}
impl TryFrom<CodecType> for Codec {
type Error = Error;
fn try_from(f: CodecType) -> Result<Self> {
use CodecType::*;
match f {
Opus => Ok(Codec::Opus(OpusDecoderState::new()?)),
Pcm => Ok(Codec::Pcm),
FloatPcm => Ok(Codec::FloatPcm),
}
}
}

43
src/input/codec/opus.rs Normal file
View File

@@ -0,0 +1,43 @@
use crate::constants::*;
use audiopus::{coder::Decoder as OpusDecoder, Channels, Error as OpusError};
use parking_lot::Mutex;
use std::sync::Arc;
#[derive(Clone, Debug)]
/// Inner state
pub struct OpusDecoderState {
/// Inner decoder used to convert opus frames into a stream of samples.
pub decoder: Arc<Mutex<OpusDecoder>>,
/// Controls whether this source allows direct Opus frame passthrough.
/// Defaults to `true`.
///
/// Enabling this flag is a promise from the programmer to the audio core
/// that the source has been encoded at 48kHz, using 20ms long frames.
/// If you cannot guarantee this, disable this flag (or else risk nasal demons)
/// and bizarre audio behaviour.
pub allow_passthrough: bool,
pub(crate) current_frame: Vec<f32>,
pub(crate) frame_pos: usize,
pub(crate) should_reset: bool,
}
impl OpusDecoderState {
/// Creates a new decoder, having stereo output at 48kHz.
pub fn new() -> Result<Self, OpusError> {
Ok(Self::from_decoder(OpusDecoder::new(
SAMPLE_RATE,
Channels::Stereo,
)?))
}
/// Creates a new decoder pre-configured by the user.
pub fn from_decoder(decoder: OpusDecoder) -> Self {
Self {
decoder: Arc::new(Mutex::new(decoder)),
allow_passthrough: true,
current_frame: Vec::with_capacity(STEREO_FRAME_SIZE),
frame_pos: 0,
should_reset: false,
}
}
}

View File

@@ -0,0 +1,8 @@
/// Information used in audio frame detection.
#[derive(Clone, Copy, Debug)]
pub struct Frame {
/// Length of this frame's header, in bytes.
pub header_len: usize,
/// Payload length, in bytes.
pub frame_len: usize,
}

View File

@@ -0,0 +1,69 @@
mod frame;
pub use frame::*;
use super::CodecType;
use byteorder::{LittleEndian, ReadBytesExt};
use std::{
fmt::Debug,
io::{Read, Result as IoResult},
mem,
};
/// Marker and state for decoding framed input files.
#[non_exhaustive]
#[derive(Clone, Copy, Debug)]
pub enum Container {
/// Raw, unframed input.
Raw,
/// Framed input, beginning with a JSON header.
///
/// Frames have the form `{ len: i16, payload: [u8; len]}`.
Dca {
/// Byte index of the first frame after the JSON header.
first_frame: usize,
},
}
impl Container {
/// Tries to read the header of the next frame from an input stream.
pub fn next_frame_length(
&mut self,
mut reader: impl Read,
input: CodecType,
) -> IoResult<Frame> {
use Container::*;
match self {
Raw => Ok(Frame {
header_len: 0,
frame_len: input.sample_len(),
}),
Dca { .. } => reader.read_i16::<LittleEndian>().map(|frame_len| Frame {
header_len: mem::size_of::<i16>(),
frame_len: frame_len.max(0) as usize,
}),
}
}
/// Tries to seek on an input directly using sample length, if the input
/// is unframed.
pub fn try_seek_trivial(&self, input: CodecType) -> Option<usize> {
use Container::*;
match self {
Raw => Some(input.sample_len()),
_ => None,
}
}
/// Returns the byte index of the first frame containing audio payload data.
pub fn input_start(&self) -> usize {
use Container::*;
match self {
Raw => 0,
Dca { first_frame } => *first_frame,
}
}
}

137
src/input/dca.rs Normal file
View File

@@ -0,0 +1,137 @@
use super::{codec::OpusDecoderState, error::DcaError, Codec, Container, Input, Metadata, Reader};
use serde::Deserialize;
use std::{ffi::OsStr, io::BufReader, mem};
use tokio::{fs::File as TokioFile, io::AsyncReadExt};
/// Creates a streamed audio source from a DCA file.
/// Currently only accepts the [DCA1 format](https://github.com/bwmarrin/dca).
pub async fn dca<P: AsRef<OsStr>>(path: P) -> Result<Input, DcaError> {
_dca(path.as_ref()).await
}
async fn _dca(path: &OsStr) -> Result<Input, DcaError> {
let mut reader = TokioFile::open(path).await.map_err(DcaError::IoError)?;
let mut header = [0u8; 4];
// Read in the magic number to verify it's a DCA file.
reader
.read_exact(&mut header)
.await
.map_err(DcaError::IoError)?;
if header != b"DCA1"[..] {
return Err(DcaError::InvalidHeader);
}
let size = reader
.read_i32_le()
.await
.map_err(|_| DcaError::InvalidHeader)?;
// Sanity check
if size < 2 {
return Err(DcaError::InvalidSize(size));
}
let mut raw_json = Vec::with_capacity(size as usize);
let mut json_reader = reader.take(size as u64);
json_reader
.read_to_end(&mut raw_json)
.await
.map_err(DcaError::IoError)?;
let reader = BufReader::new(json_reader.into_inner().into_std().await);
let metadata: Metadata = serde_json::from_slice::<DcaMetadata>(raw_json.as_slice())
.map_err(DcaError::InvalidMetadata)?
.into();
let stereo = metadata.channels == Some(2);
Ok(Input::new(
stereo,
Reader::File(reader),
Codec::Opus(OpusDecoderState::new().map_err(DcaError::Opus)?),
Container::Dca {
first_frame: (size as usize) + mem::size_of::<i32>() + header.len(),
},
Some(metadata),
))
}
#[derive(Debug, Deserialize)]
pub(crate) struct DcaMetadata {
pub(crate) dca: Dca,
pub(crate) opus: Opus,
pub(crate) info: Option<Info>,
pub(crate) origin: Option<Origin>,
pub(crate) extra: Option<serde_json::Value>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct Dca {
pub(crate) version: u64,
pub(crate) tool: Tool,
}
#[derive(Debug, Deserialize)]
pub(crate) struct Tool {
pub(crate) name: String,
pub(crate) version: String,
pub(crate) url: String,
pub(crate) author: String,
}
#[derive(Debug, Deserialize)]
pub(crate) struct Opus {
pub(crate) mode: String,
pub(crate) sample_rate: u32,
pub(crate) frame_size: u64,
pub(crate) abr: u64,
pub(crate) vbr: u64,
pub(crate) channels: u8,
}
#[derive(Debug, Deserialize)]
pub(crate) struct Info {
pub(crate) title: Option<String>,
pub(crate) artist: Option<String>,
pub(crate) album: Option<String>,
pub(crate) genre: Option<String>,
pub(crate) cover: Option<String>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct Origin {
pub(crate) source: Option<String>,
pub(crate) abr: Option<u64>,
pub(crate) channels: Option<u8>,
pub(crate) encoding: Option<String>,
pub(crate) url: Option<String>,
}
impl From<DcaMetadata> for Metadata {
fn from(mut d: DcaMetadata) -> Self {
let (title, artist) = d
.info
.take()
.map(|mut m| (m.title.take(), m.artist.take()))
.unwrap_or_else(|| (None, None));
let channels = Some(d.opus.channels);
let sample_rate = Some(d.opus.sample_rate);
Self {
title,
artist,
channels,
sample_rate,
..Default::default()
}
}
}

93
src/input/error.rs Normal file
View File

@@ -0,0 +1,93 @@
//! Errors caused by input creation.
use audiopus::Error as OpusError;
use serde_json::{Error as JsonError, Value};
use std::{io::Error as IoError, process::Output};
use streamcatcher::CatcherError;
/// An error returned when creating a new [`Input`].
///
/// [`Input`]: ../struct.Input.html
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// An error occurred while opening a new DCA source.
Dca(DcaError),
/// An error occurred while reading, or opening a file.
Io(IoError),
/// An error occurred while parsing JSON (i.e., during metadata/stereo detection).
Json(JsonError),
/// An error occurred within the Opus codec.
Opus(OpusError),
/// Failed to extract metadata from alternate pipe.
Metadata,
/// Apparently failed to create stdout.
Stdout,
/// An error occurred while checking if a path is stereo.
Streams,
/// Configuration error for a cached Input.
Streamcatcher(CatcherError),
/// An error occurred while processing the JSON output from `youtube-dl`.
///
/// The JSON output is given.
YouTubeDLProcessing(Value),
/// An error occurred while running `youtube-dl`.
YouTubeDLRun(Output),
/// The `url` field of the `youtube-dl` JSON output was not present.
///
/// The JSON output is given.
YouTubeDLUrl(Value),
}
impl From<CatcherError> for Error {
fn from(e: CatcherError) -> Self {
Error::Streamcatcher(e)
}
}
impl From<DcaError> for Error {
fn from(e: DcaError) -> Self {
Error::Dca(e)
}
}
impl From<IoError> for Error {
fn from(e: IoError) -> Error {
Error::Io(e)
}
}
impl From<JsonError> for Error {
fn from(e: JsonError) -> Self {
Error::Json(e)
}
}
impl From<OpusError> for Error {
fn from(e: OpusError) -> Error {
Error::Opus(e)
}
}
/// An error returned from the [`dca`] method.
///
/// [`dca`]: ../fn.dca.html
#[derive(Debug)]
#[non_exhaustive]
pub enum DcaError {
/// An error occurred while reading, or opening a file.
IoError(IoError),
/// The file opened did not have a valid DCA JSON header.
InvalidHeader,
/// The file's metadata block was invalid, or could not be parsed.
InvalidMetadata(JsonError),
/// The file's header reported an invalid metadata block size.
InvalidSize(i32),
/// An error was encountered while creating a new Opus decoder.
Opus(OpusError),
}
/// Convenience type for fallible return of [`Input`]s.
///
/// [`Input`]: ../struct.Input.html
pub type Result<T> = std::result::Result<T, Error>;

146
src/input/ffmpeg_src.rs Normal file
View File

@@ -0,0 +1,146 @@
use super::{
child_to_reader,
error::{Error, Result},
Codec,
Container,
Input,
Metadata,
};
use serde_json::Value;
use std::{
ffi::OsStr,
process::{Command, Stdio},
};
use tokio::process::Command as TokioCommand;
use tracing::debug;
/// Opens an audio file through `ffmpeg` and creates an audio source.
pub async fn ffmpeg<P: AsRef<OsStr>>(path: P) -> Result<Input> {
_ffmpeg(path.as_ref()).await
}
pub(crate) async fn _ffmpeg(path: &OsStr) -> Result<Input> {
// Will fail if the path is not to a file on the fs. Likely a YouTube URI.
let is_stereo = is_stereo(path)
.await
.unwrap_or_else(|_e| (false, Default::default()));
let stereo_val = if is_stereo.0 { "2" } else { "1" };
_ffmpeg_optioned(
path,
&[],
&[
"-f",
"s16le",
"-ac",
stereo_val,
"-ar",
"48000",
"-acodec",
"pcm_f32le",
"-",
],
Some(is_stereo),
)
.await
}
/// Opens an audio file through `ffmpeg` and creates an audio source, with
/// user-specified arguments to pass to ffmpeg.
///
/// Note that this does _not_ build on the arguments passed by the [`ffmpeg`]
/// function.
///
/// # Examples
///
/// Pass options to create a custom ffmpeg streamer:
///
/// ```rust,no_run
/// use songbird::input;
///
/// let stereo_val = "2";
///
/// let streamer = futures::executor::block_on(input::ffmpeg_optioned("./some_file.mp3", &[], &[
/// "-f",
/// "s16le",
/// "-ac",
/// stereo_val,
/// "-ar",
/// "48000",
/// "-acodec",
/// "pcm_s16le",
/// "-",
/// ]));
///```
pub async fn ffmpeg_optioned<P: AsRef<OsStr>>(
path: P,
pre_input_args: &[&str],
args: &[&str],
) -> Result<Input> {
_ffmpeg_optioned(path.as_ref(), pre_input_args, args, None).await
}
pub(crate) async fn _ffmpeg_optioned(
path: &OsStr,
pre_input_args: &[&str],
args: &[&str],
is_stereo_known: Option<(bool, Metadata)>,
) -> Result<Input> {
let (is_stereo, metadata) = if let Some(vals) = is_stereo_known {
vals
} else {
is_stereo(path)
.await
.ok()
.unwrap_or_else(|| (false, Default::default()))
};
let command = Command::new("ffmpeg")
.args(pre_input_args)
.arg("-i")
.arg(path)
.args(args)
.stderr(Stdio::null())
.stdin(Stdio::null())
.stdout(Stdio::piped())
.spawn()?;
Ok(Input::new(
is_stereo,
child_to_reader::<f32>(command),
Codec::FloatPcm,
Container::Raw,
Some(metadata),
))
}
pub(crate) async fn is_stereo(path: &OsStr) -> Result<(bool, Metadata)> {
let args = [
"-v",
"quiet",
"-of",
"json",
"-show_format",
"-show_streams",
"-i",
];
let out = TokioCommand::new("ffprobe")
.args(&args)
.arg(path)
.stdin(Stdio::null())
.output()
.await?;
let value: Value = serde_json::from_reader(&out.stdout[..])?;
let metadata = Metadata::from_ffprobe_json(&value);
debug!("FFprobe metadata {:?}", metadata);
if let Some(count) = metadata.channels {
Ok((count == 2, metadata))
} else {
Err(Error::Streams)
}
}

166
src/input/metadata.rs Normal file
View File

@@ -0,0 +1,166 @@
use crate::constants::*;
use serde_json::Value;
use std::time::Duration;
/// Information about an [`Input`] source.
///
/// [`Input`]: struct.Input.html
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Metadata {
/// The title of this stream.
pub title: Option<String>,
/// The main artist of this stream.
pub artist: Option<String>,
/// The date of creation of this stream.
pub date: Option<String>,
/// The number of audio channels in this stream.
///
/// Any number `>= 2` is treated as stereo.
pub channels: Option<u8>,
/// The time at which the first true sample is played back.
///
/// This occurs as an artefact of coder delay.
pub start_time: Option<Duration>,
/// The reported duration of this stream.
pub duration: Option<Duration>,
/// The sample rate of this stream.
pub sample_rate: Option<u32>,
}
impl Metadata {
/// Extract metadata and details from the output of
/// `ffprobe`.
pub fn from_ffprobe_json(value: &Value) -> Self {
let format = value.as_object().and_then(|m| m.get("format"));
let duration = format
.and_then(|m| m.get("duration"))
.and_then(Value::as_str)
.and_then(|v| v.parse::<f64>().ok())
.map(Duration::from_secs_f64);
let start_time = format
.and_then(|m| m.get("start_time"))
.and_then(Value::as_str)
.and_then(|v| v.parse::<f64>().ok())
.map(Duration::from_secs_f64);
let tags = format.and_then(|m| m.get("tags"));
let title = tags
.and_then(|m| m.get("title"))
.and_then(Value::as_str)
.map(str::to_string);
let artist = tags
.and_then(|m| m.get("artist"))
.and_then(Value::as_str)
.map(str::to_string);
let date = tags
.and_then(|m| m.get("date"))
.and_then(Value::as_str)
.map(str::to_string);
let stream = value
.as_object()
.and_then(|m| m.get("streams"))
.and_then(|v| v.as_array())
.and_then(|v| {
v.iter()
.find(|line| line.get("codec_type").and_then(Value::as_str) == Some("audio"))
});
let channels = stream
.and_then(|m| m.get("channels"))
.and_then(Value::as_u64)
.map(|v| v as u8);
let sample_rate = stream
.and_then(|m| m.get("sample_rate"))
.and_then(Value::as_str)
.and_then(|v| v.parse::<u64>().ok())
.map(|v| v as u32);
Self {
title,
artist,
date,
channels,
start_time,
duration,
sample_rate,
}
}
/// Use `youtube-dl` to extract metadata for an online resource.
pub fn from_ytdl_output(value: Value) -> Self {
let obj = value.as_object();
let track = obj
.and_then(|m| m.get("track"))
.and_then(Value::as_str)
.map(str::to_string);
let title = track.or_else(|| {
obj.and_then(|m| m.get("title"))
.and_then(Value::as_str)
.map(str::to_string)
});
let true_artist = obj
.and_then(|m| m.get("artist"))
.and_then(Value::as_str)
.map(str::to_string);
let artist = true_artist.or_else(|| {
obj.and_then(|m| m.get("uploader"))
.and_then(Value::as_str)
.map(str::to_string)
});
let r_date = obj
.and_then(|m| m.get("release_date"))
.and_then(Value::as_str)
.map(str::to_string);
let date = r_date.or_else(|| {
obj.and_then(|m| m.get("upload_date"))
.and_then(Value::as_str)
.map(str::to_string)
});
let duration = obj
.and_then(|m| m.get("duration"))
.and_then(Value::as_f64)
.map(Duration::from_secs_f64);
Self {
title,
artist,
date,
channels: Some(2),
duration,
sample_rate: Some(SAMPLE_RATE_RAW as u32),
..Default::default()
}
}
/// Move all fields from a `Metadata` object into a new one.
pub fn take(&mut self) -> Self {
Self {
title: self.title.take(),
artist: self.artist.take(),
date: self.date.take(),
channels: self.channels.take(),
start_time: self.start_time.take(),
duration: self.duration.take(),
sample_rate: self.sample_rate.take(),
}
}
}

596
src/input/mod.rs Normal file
View File

@@ -0,0 +1,596 @@
//! Raw audio input data streams and sources.
//!
//! [`Input`] is handled in Songbird by combining metadata with:
//! * A 48kHz audio bytestream, via [`Reader`],
//! * A [`Container`] describing the framing mechanism of the bytestream,
//! * A [`Codec`], defining the format of audio frames.
//!
//! When used as a [`Read`], the output bytestream will be a floating-point
//! PCM stream at 48kHz, matching the channel count of the input source.
//!
//! ## Opus frame passthrough.
//! Some sources, such as [`Compressed`] or the output of [`dca`], support
//! direct frame passthrough to the driver. This lets you directly send the
//! audio data you have *without decoding, re-encoding, or mixing*. In many
//! cases, this can greatly reduce the processing/compute cost of the driver.
//!
//! This functionality requires that:
//! * only one track is active (including paused tracks),
//! * that track's input supports direct Opus frame reads,
//! * its [`Input`] [meets the promises described herein](codec/struct.OpusDecoderState.html#structfield.allow_passthrough),
//! * and that track's volume is set to `1.0`.
//!
//! [`Input`]: struct.Input.html
//! [`Reader`]: reader/enum.Reader.html
//! [`Container`]: enum.Container.html
//! [`Codec`]: codec/enum.Codec.html
//! [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
//! [`Compressed`]: cached/struct.Compressed.html
//! [`dca`]: fn.dca.html
pub mod cached;
mod child;
pub mod codec;
mod container;
mod dca;
pub mod error;
mod ffmpeg_src;
mod metadata;
pub mod reader;
pub mod restartable;
pub mod utils;
mod ytdl_src;
pub use self::{
child::*,
codec::{Codec, CodecType},
container::{Container, Frame},
dca::dca,
ffmpeg_src::*,
metadata::Metadata,
reader::Reader,
restartable::Restartable,
ytdl_src::*,
};
use crate::constants::*;
use audiopus::coder::GenericCtl;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use cached::OpusCompressor;
use error::{Error, Result};
use tokio::runtime::Handle;
use std::{
convert::TryFrom,
io::{
self,
Error as IoError,
ErrorKind as IoErrorKind,
Read,
Result as IoResult,
Seek,
SeekFrom,
},
mem,
time::Duration,
};
use tracing::{debug, error};
/// Data and metadata needed to correctly parse a [`Reader`]'s audio bytestream.
///
/// See the [module root] for more information.
///
/// [`Reader`]: enum.Reader.html
/// [module root]: index.html
#[derive(Debug)]
pub struct Input {
/// Information about the played source.
pub metadata: Metadata,
/// Indicates whether `source` is stereo or mono.
pub stereo: bool,
/// Underlying audio data bytestream.
pub reader: Reader,
/// Decoder used to parse the output of `reader`.
pub kind: Codec,
/// Framing strategy needed to identify frames of compressed audio.
pub container: Container,
pos: usize,
}
impl Input {
/// Creates a floating-point PCM Input from a given reader.
pub fn float_pcm(is_stereo: bool, reader: Reader) -> Input {
Input {
metadata: Default::default(),
stereo: is_stereo,
reader,
kind: Codec::FloatPcm,
container: Container::Raw,
pos: 0,
}
}
/// Creates a new Input using (at least) the given reader, codec, and container.
pub fn new(
stereo: bool,
reader: Reader,
kind: Codec,
container: Container,
metadata: Option<Metadata>,
) -> Self {
Input {
metadata: metadata.unwrap_or_default(),
stereo,
reader,
kind,
container,
pos: 0,
}
}
/// Returns whether the inner [`Reader`] implements [`Seek`].
///
/// [`Reader`]: reader/enum.Reader.html
/// [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
pub fn is_seekable(&self) -> bool {
self.reader.is_seekable()
}
/// Returns whether the read audio signal is stereo (or mono).
pub fn is_stereo(&self) -> bool {
self.stereo
}
/// Returns the type of the inner [`Codec`].
///
/// [`Codec`]: codec/enum.Codec.html
pub fn get_type(&self) -> CodecType {
(&self.kind).into()
}
/// Mixes the output of this stream into a 20ms stereo audio buffer.
#[inline]
pub fn mix(&mut self, float_buffer: &mut [f32; STEREO_FRAME_SIZE], volume: f32) -> usize {
match self.add_float_pcm_frame(float_buffer, self.stereo, volume) {
Some(len) => len,
None => 0,
}
}
/// Seeks the stream to the given time, if possible.
///
/// Returns the actual time reached.
pub fn seek_time(&mut self, time: Duration) -> Option<Duration> {
let future_pos = utils::timestamp_to_byte_count(time, self.stereo);
Seek::seek(self, SeekFrom::Start(future_pos as u64))
.ok()
.map(|a| utils::byte_count_to_timestamp(a as usize, self.stereo))
}
fn read_inner(&mut self, buffer: &mut [u8], ignore_decode: bool) -> IoResult<usize> {
// This implementation of Read converts the input stream
// to floating point output.
let sample_len = mem::size_of::<f32>();
let float_space = buffer.len() / sample_len;
let mut written_floats = 0;
// TODO: better decouple codec and container here.
// this is a little bit backwards, and assumes the bottom cases are always raw...
let out = match &mut self.kind {
Codec::Opus(decoder_state) => {
if matches!(self.container, Container::Raw) {
return Err(IoError::new(
IoErrorKind::InvalidInput,
"Raw container cannot demarcate Opus frames.",
));
}
if ignore_decode {
// If we're less than one frame away from the end of cheap seeking,
// then we must decode to make sure the next starting offset is correct.
// Step one: use up the remainder of the frame.
let mut aud_skipped =
decoder_state.current_frame.len() - decoder_state.frame_pos;
decoder_state.frame_pos = 0;
decoder_state.current_frame.truncate(0);
// Step two: take frames if we can.
while buffer.len() - aud_skipped >= STEREO_FRAME_BYTE_SIZE {
decoder_state.should_reset = true;
let frame = self
.container
.next_frame_length(&mut self.reader, CodecType::Opus)?;
self.reader.consume(frame.frame_len);
aud_skipped += STEREO_FRAME_BYTE_SIZE;
}
Ok(aud_skipped)
} else {
// get new frame *if needed*
if decoder_state.frame_pos == decoder_state.current_frame.len() {
let mut decoder = decoder_state.decoder.lock();
if decoder_state.should_reset {
decoder
.reset_state()
.expect("Critical failure resetting decoder.");
decoder_state.should_reset = false;
}
let frame = self
.container
.next_frame_length(&mut self.reader, CodecType::Opus)?;
let mut opus_data_buffer = [0u8; 4000];
decoder_state
.current_frame
.resize(decoder_state.current_frame.capacity(), 0.0);
let seen =
Read::read(&mut self.reader, &mut opus_data_buffer[..frame.frame_len])?;
let samples = decoder
.decode_float(
Some(&opus_data_buffer[..seen]),
&mut decoder_state.current_frame[..],
false,
)
.unwrap_or(0);
decoder_state.current_frame.truncate(2 * samples);
decoder_state.frame_pos = 0;
}
// read from frame which is present.
let mut buffer = &mut buffer[..];
let start = decoder_state.frame_pos;
let to_write = float_space.min(decoder_state.current_frame.len() - start);
for val in &decoder_state.current_frame[start..start + float_space] {
buffer.write_f32::<LittleEndian>(*val)?;
}
decoder_state.frame_pos += to_write;
written_floats = to_write;
Ok(written_floats * mem::size_of::<f32>())
}
},
Codec::Pcm => {
let mut buffer = &mut buffer[..];
while written_floats < float_space {
if let Ok(signal) = self.reader.read_i16::<LittleEndian>() {
buffer.write_f32::<LittleEndian>(f32::from(signal) / 32768.0)?;
written_floats += 1;
} else {
break;
}
}
Ok(written_floats * mem::size_of::<f32>())
},
Codec::FloatPcm => Read::read(&mut self.reader, buffer),
};
out.map(|v| {
self.pos += v;
v
})
}
fn cheap_consume(&mut self, count: usize) -> IoResult<usize> {
let mut scratch = [0u8; STEREO_FRAME_BYTE_SIZE * 4];
let len = scratch.len();
let mut done = 0;
loop {
let read = self.read_inner(&mut scratch[..len.min(count - done)], true)?;
if read == 0 {
break;
}
done += read;
}
Ok(done)
}
pub(crate) fn supports_passthrough(&self) -> bool {
match &self.kind {
Codec::Opus(state) => state.allow_passthrough,
_ => false,
}
}
pub(crate) fn read_opus_frame(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
// Called in event of opus passthrough.
if let Codec::Opus(state) = &mut self.kind {
// step 1: align to frame.
self.pos += state.current_frame.len() - state.frame_pos;
state.frame_pos = 0;
state.current_frame.truncate(0);
// step 2: read new header.
let frame = self
.container
.next_frame_length(&mut self.reader, CodecType::Opus)?;
// step 3: read in bytes.
self.reader
.read_exact(&mut buffer[..frame.frame_len])
.map(|_| {
self.pos += STEREO_FRAME_BYTE_SIZE;
frame.frame_len
})
} else {
Err(IoError::new(
IoErrorKind::InvalidInput,
"Frame passthrough not supported for this file.",
))
}
}
pub(crate) fn prep_with_handle(&mut self, handle: Handle) {
self.reader.prep_with_handle(handle);
}
}
impl Read for Input {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
self.read_inner(buffer, false)
}
}
impl Seek for Input {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
let mut target = self.pos;
match pos {
SeekFrom::Start(pos) => {
target = pos as usize;
},
SeekFrom::Current(rel) => {
target = target.wrapping_add(rel as usize);
},
SeekFrom::End(_pos) => unimplemented!(),
}
debug!("Seeking to {:?}", pos);
(if target == self.pos {
Ok(0)
} else if let Some(conversion) = self.container.try_seek_trivial(self.get_type()) {
let inside_target = (target * conversion) / mem::size_of::<f32>();
Seek::seek(&mut self.reader, SeekFrom::Start(inside_target as u64)).map(|inner_dest| {
let outer_dest = ((inner_dest as usize) * mem::size_of::<f32>()) / conversion;
self.pos = outer_dest;
outer_dest
})
} else if target > self.pos {
// seek in the next amount, disabling decoding if need be.
let shift = target - self.pos;
self.cheap_consume(shift)
} else {
// start from scratch, then seek in...
Seek::seek(
&mut self.reader,
SeekFrom::Start(self.container.input_start() as u64),
)?;
self.cheap_consume(target)
})
.map(|_| self.pos as u64)
}
}
/// Extension trait to pull frames of audio from a byte source.
pub(crate) trait ReadAudioExt {
fn add_float_pcm_frame(
&mut self,
float_buffer: &mut [f32; STEREO_FRAME_SIZE],
true_stereo: bool,
volume: f32,
) -> Option<usize>;
fn consume(&mut self, amt: usize) -> usize
where
Self: Sized;
}
impl<R: Read + Sized> ReadAudioExt for R {
fn add_float_pcm_frame(
&mut self,
float_buffer: &mut [f32; STEREO_FRAME_SIZE],
stereo: bool,
volume: f32,
) -> Option<usize> {
// IDEA: Read in 8 floats at a time, then use iterator code
// to gently nudge the compiler into vectorising for us.
// Max SIMD float32 lanes is 8 on AVX, older archs use a divisor of this
// e.g., 4.
const SAMPLE_LEN: usize = mem::size_of::<f32>();
const FLOAT_COUNT: usize = 512;
let mut simd_float_bytes = [0u8; FLOAT_COUNT * SAMPLE_LEN];
let mut simd_float_buf = [0f32; FLOAT_COUNT];
let mut frame_pos = 0;
// Code duplication here is because unifying these codepaths
// with a dynamic chunk size is not zero-cost.
if stereo {
let mut max_bytes = STEREO_FRAME_BYTE_SIZE;
while frame_pos < float_buffer.len() {
let progress = self
.read(&mut simd_float_bytes[..max_bytes.min(FLOAT_COUNT * SAMPLE_LEN)])
.and_then(|byte_len| {
let target = byte_len / SAMPLE_LEN;
(&simd_float_bytes[..byte_len])
.read_f32_into::<LittleEndian>(&mut simd_float_buf[..target])
.map(|_| target)
})
.map(|f32_len| {
let new_pos = frame_pos + f32_len;
for (el, new_el) in float_buffer[frame_pos..new_pos]
.iter_mut()
.zip(&simd_float_buf[..f32_len])
{
*el += volume * new_el;
}
(new_pos, f32_len)
});
match progress {
Ok((new_pos, delta)) => {
frame_pos = new_pos;
max_bytes -= delta * SAMPLE_LEN;
if delta == 0 {
break;
}
},
Err(ref e) =>
return if e.kind() == IoErrorKind::UnexpectedEof {
error!("EOF unexpectedly: {:?}", e);
Some(frame_pos)
} else {
error!("Input died unexpectedly: {:?}", e);
None
},
}
}
} else {
let mut max_bytes = MONO_FRAME_BYTE_SIZE;
while frame_pos < float_buffer.len() {
let progress = self
.read(&mut simd_float_bytes[..max_bytes.min(FLOAT_COUNT * SAMPLE_LEN)])
.and_then(|byte_len| {
let target = byte_len / SAMPLE_LEN;
(&simd_float_bytes[..byte_len])
.read_f32_into::<LittleEndian>(&mut simd_float_buf[..target])
.map(|_| target)
})
.map(|f32_len| {
let new_pos = frame_pos + (2 * f32_len);
for (els, new_el) in float_buffer[frame_pos..new_pos]
.chunks_exact_mut(2)
.zip(&simd_float_buf[..f32_len])
{
let sample = volume * new_el;
els[0] += sample;
els[1] += sample;
}
(new_pos, f32_len)
});
match progress {
Ok((new_pos, delta)) => {
frame_pos = new_pos;
max_bytes -= delta * SAMPLE_LEN;
if delta == 0 {
break;
}
},
Err(ref e) =>
return if e.kind() == IoErrorKind::UnexpectedEof {
Some(frame_pos)
} else {
error!("Input died unexpectedly: {:?}", e);
None
},
}
}
}
Some(frame_pos * SAMPLE_LEN)
}
fn consume(&mut self, amt: usize) -> usize {
io::copy(&mut self.by_ref().take(amt as u64), &mut io::sink()).unwrap_or(0) as usize
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::*;
#[test]
fn float_pcm_input_unchanged_mono() {
let data = make_sine(50 * MONO_FRAME_SIZE, false);
let mut input = Input::new(
false,
data.clone().into(),
Codec::FloatPcm,
Container::Raw,
None,
);
let mut out_vec = vec![];
let len = input.read_to_end(&mut out_vec).unwrap();
assert_eq!(out_vec[..len], data[..]);
}
#[test]
fn float_pcm_input_unchanged_stereo() {
let data = make_sine(50 * MONO_FRAME_SIZE, true);
let mut input = Input::new(
true,
data.clone().into(),
Codec::FloatPcm,
Container::Raw,
None,
);
let mut out_vec = vec![];
let len = input.read_to_end(&mut out_vec).unwrap();
assert_eq!(out_vec[..len], data[..]);
}
#[test]
fn pcm_input_becomes_float_mono() {
let data = make_pcm_sine(50 * MONO_FRAME_SIZE, false);
let mut input = Input::new(false, data.clone().into(), Codec::Pcm, Container::Raw, None);
let mut out_vec = vec![];
let len = input.read_to_end(&mut out_vec).unwrap();
let mut i16_window = &data[..];
let mut float_window = &out_vec[..];
while i16_window.len() != 0 {
let before = i16_window.read_i16::<LittleEndian>().unwrap() as f32;
let after = float_window.read_f32::<LittleEndian>().unwrap();
let diff = (before / 32768.0) - after;
assert!(diff.abs() < f32::EPSILON);
}
}
#[test]
fn pcm_input_becomes_float_stereo() {
let data = make_pcm_sine(50 * MONO_FRAME_SIZE, true);
let mut input = Input::new(true, data.clone().into(), Codec::Pcm, Container::Raw, None);
let mut out_vec = vec![];
let len = input.read_to_end(&mut out_vec).unwrap();
let mut i16_window = &data[..];
let mut float_window = &out_vec[..];
while i16_window.len() != 0 {
let before = i16_window.read_i16::<LittleEndian>().unwrap() as f32;
let after = float_window.read_f32::<LittleEndian>().unwrap();
let diff = (before / 32768.0) - after;
assert!(diff.abs() < f32::EPSILON);
}
}
}

180
src/input/reader.rs Normal file
View File

@@ -0,0 +1,180 @@
//! Raw handlers for input bytestreams.
use super::*;
use std::{
fmt::{Debug, Error as FormatError, Formatter},
fs::File,
io::{
BufReader,
Cursor,
Error as IoError,
ErrorKind as IoErrorKind,
Read,
Result as IoResult,
Seek,
SeekFrom,
},
result::Result as StdResult,
};
use streamcatcher::{Catcher, TxCatcher};
/// Usable data/byte sources for an audio stream.
///
/// Users may define their own data sources using [`Extension`]
/// and [`ExtensionSeek`].
///
/// [`Extension`]: #variant.Extension
/// [`ExtensionSeek`]: #variant.ExtensionSeek
pub enum Reader {
/// Piped output of another program (i.e., [`ffmpeg`]).
///
/// Does not support seeking.
///
/// [`ffmpeg`]: ../fn.ffmpeg.html
Pipe(BufReader<ChildContainer>),
/// A cached, raw in-memory store, provided by Songbird.
///
/// Supports seeking.
Memory(Catcher<Box<Reader>>),
/// A cached, Opus-compressed in-memory store, provided by Songbird.
///
/// Supports seeking.
Compressed(TxCatcher<Box<Input>, OpusCompressor>),
/// A source which supports seeking by recreating its inout stream.
///
/// Supports seeking.
Restartable(Restartable),
/// A source contained in a local file.
///
/// Supports seeking.
File(BufReader<File>),
/// A source contained as an array in memory.
///
/// Supports seeking.
Vec(Cursor<Vec<u8>>),
/// A basic user-provided source.
///
/// Does not support seeking.
Extension(Box<dyn Read + Send>),
/// A user-provided source which also implements [`Seek`].
///
/// Supports seeking.
///
/// [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
ExtensionSeek(Box<dyn ReadSeek + Send>),
}
impl Reader {
/// Returns whether the given source implements [`Seek`].
///
/// [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
pub fn is_seekable(&self) -> bool {
use Reader::*;
match self {
Restartable(_) | Compressed(_) | Memory(_) => true,
Extension(_) => false,
ExtensionSeek(_) => true,
_ => false,
}
}
#[allow(clippy::single_match)]
pub(crate) fn prep_with_handle(&mut self, handle: Handle) {
use Reader::*;
match self {
Restartable(r) => r.prep_with_handle(handle),
_ => {},
}
}
}
impl Read for Reader {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
use Reader::*;
match self {
Pipe(a) => Read::read(a, buffer),
Memory(a) => Read::read(a, buffer),
Compressed(a) => Read::read(a, buffer),
Restartable(a) => Read::read(a, buffer),
File(a) => Read::read(a, buffer),
Vec(a) => Read::read(a, buffer),
Extension(a) => a.read(buffer),
ExtensionSeek(a) => a.read(buffer),
}
}
}
impl Seek for Reader {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
use Reader::*;
match self {
Pipe(_) | Extension(_) => Err(IoError::new(
IoErrorKind::InvalidInput,
"Seeking not supported on Reader of this type.",
)),
Memory(a) => Seek::seek(a, pos),
Compressed(a) => Seek::seek(a, pos),
File(a) => Seek::seek(a, pos),
Restartable(a) => Seek::seek(a, pos),
Vec(a) => Seek::seek(a, pos),
ExtensionSeek(a) => a.seek(pos),
}
}
}
impl Debug for Reader {
fn fmt(&self, f: &mut Formatter<'_>) -> StdResult<(), FormatError> {
use Reader::*;
let field = match self {
Pipe(a) => format!("{:?}", a),
Memory(a) => format!("{:?}", a),
Compressed(a) => format!("{:?}", a),
Restartable(a) => format!("{:?}", a),
File(a) => format!("{:?}", a),
Vec(a) => format!("{:?}", a),
Extension(_) => "Extension".to_string(),
ExtensionSeek(_) => "ExtensionSeek".to_string(),
};
f.debug_tuple("Reader").field(&field).finish()
}
}
impl From<Vec<u8>> for Reader {
fn from(val: Vec<u8>) -> Reader {
Reader::Vec(Cursor::new(val))
}
}
/// Fusion trait for custom input sources which allow seeking.
pub trait ReadSeek {
/// See [`Read::read`].
///
/// [`Read::read`]: https://doc.rust-lang.org/nightly/std/io/trait.Read.html#tymethod.read
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize>;
/// See [`Seek::seek`].
///
/// [`Seek::seek`]: https://doc.rust-lang.org/nightly/std/io/trait.Seek.html#tymethod.seek
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64>;
}
impl Read for dyn ReadSeek {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
ReadSeek::read(self, buf)
}
}
impl Seek for dyn ReadSeek {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
ReadSeek::seek(self, pos)
}
}
impl<R: Read + Seek> ReadSeek for R {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
Read::read(self, buf)
}
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
Seek::seek(self, pos)
}
}

294
src/input/restartable.rs Normal file
View File

@@ -0,0 +1,294 @@
//! A source which supports seeking by recreating its input stream.
//!
//! This is intended for use with single-use audio tracks which
//! may require looping or seeking, but where additional memory
//! cannot be spared. Forward seeks will drain the track until reaching
//! the desired timestamp.
//!
//! Restarting occurs by temporarily pausing the track, running the restart
//! mechanism, and then passing the handle back to the mixer thread. Until
//! success/failure is confirmed, the track produces silence.
use super::*;
use flume::{Receiver, TryRecvError};
use futures::executor;
use std::{
ffi::OsStr,
fmt::{Debug, Error as FormatError, Formatter},
io::{Error as IoError, ErrorKind as IoErrorKind, Read, Result as IoResult, Seek, SeekFrom},
result::Result as StdResult,
time::Duration,
};
type Recreator = Box<dyn Restart + Send + 'static>;
type RecreateChannel = Receiver<Result<(Box<Input>, Recreator)>>;
/// A wrapper around a method to create a new [`Input`] which
/// seeks backward by recreating the source.
///
/// The main purpose of this wrapper is to enable seeking on
/// incompatible sources (i.e., ffmpeg output) and to ease resource
/// consumption for commonly reused/shared tracks. [`Compressed`]
/// and [`Memory`] offer the same functionality with different
/// tradeoffs.
///
/// This is intended for use with single-use audio tracks which
/// may require looping or seeking, but where additional memory
/// cannot be spared. Forward seeks will drain the track until reaching
/// the desired timestamp.
///
/// [`Input`]: struct.Input.html
/// [`Memory`]: cached/struct.Memory.html
/// [`Compressed`]: cached/struct.Compressed.html
pub struct Restartable {
async_handle: Option<Handle>,
awaiting_source: Option<RecreateChannel>,
position: usize,
recreator: Option<Recreator>,
source: Box<Input>,
}
impl Restartable {
/// Create a new source, which can be restarted using a `recreator` function.
pub fn new(mut recreator: impl Restart + Send + 'static) -> Result<Self> {
recreator.call_restart(None).map(move |source| Self {
async_handle: None,
awaiting_source: None,
position: 0,
recreator: Some(Box::new(recreator)),
source: Box::new(source),
})
}
/// Create a new restartable ffmpeg source for a local file.
pub fn ffmpeg<P: AsRef<OsStr> + Send + Clone + 'static>(path: P) -> Result<Self> {
Self::new(FfmpegRestarter { path })
}
/// Create a new restartable ytdl source.
///
/// The cost of restarting and seeking will probably be *very* high:
/// expect a pause if you seek backwards.
pub fn ytdl<P: AsRef<str> + Send + Clone + 'static>(uri: P) -> Result<Self> {
Self::new(move |time: Option<Duration>| {
if let Some(time) = time {
let ts = format!("{}.{}", time.as_secs(), time.subsec_millis());
executor::block_on(_ytdl(uri.as_ref(), &["-ss", &ts]))
} else {
executor::block_on(ytdl(uri.as_ref()))
}
})
}
/// Create a new restartable ytdl source, using the first result of a youtube search.
///
/// The cost of restarting and seeking will probably be *very* high:
/// expect a pause if you seek backwards.
pub fn ytdl_search(name: &str) -> Result<Self> {
Self::ytdl(format!("ytsearch1:{}", name))
}
pub(crate) fn prep_with_handle(&mut self, handle: Handle) {
self.async_handle = Some(handle);
}
}
/// Trait used to create an instance of a [`Reader`] at instantiation and when
/// a backwards seek is needed.
///
/// Many closures derive this automatically.
///
/// [`Reader`]: ../reader/enum.Reader.html
pub trait Restart {
/// Tries to create a replacement source.
fn call_restart(&mut self, time: Option<Duration>) -> Result<Input>;
}
struct FfmpegRestarter<P>
where
P: AsRef<OsStr> + Send,
{
path: P,
}
impl<P> Restart for FfmpegRestarter<P>
where
P: AsRef<OsStr> + Send,
{
fn call_restart(&mut self, time: Option<Duration>) -> Result<Input> {
executor::block_on(async {
if let Some(time) = time {
let is_stereo = is_stereo(self.path.as_ref())
.await
.unwrap_or_else(|_e| (false, Default::default()));
let stereo_val = if is_stereo.0 { "2" } else { "1" };
let ts = format!("{}.{}", time.as_secs(), time.subsec_millis());
_ffmpeg_optioned(
self.path.as_ref(),
&["-ss", &ts],
&[
"-f",
"s16le",
"-ac",
stereo_val,
"-ar",
"48000",
"-acodec",
"pcm_f32le",
"-",
],
Some(is_stereo),
)
.await
} else {
ffmpeg(self.path.as_ref()).await
}
})
}
}
impl<P> Restart for P
where
P: FnMut(Option<Duration>) -> Result<Input> + Send + 'static,
{
fn call_restart(&mut self, time: Option<Duration>) -> Result<Input> {
(self)(time)
}
}
impl Debug for Restartable {
fn fmt(&self, f: &mut Formatter<'_>) -> StdResult<(), FormatError> {
f.debug_struct("Restartable")
.field("async_handle", &self.async_handle)
.field("awaiting_source", &self.awaiting_source)
.field("position", &self.position)
.field("recreator", &"<fn>")
.field("source", &self.source)
.finish()
}
}
impl From<Restartable> for Input {
fn from(mut src: Restartable) -> Self {
let kind = src.source.kind.clone();
let meta = Some(src.source.metadata.take());
let stereo = src.source.stereo;
let container = src.source.container;
Input::new(stereo, Reader::Restartable(src), kind, container, meta)
}
}
// How do these work at a high level?
// If you need to restart, send a request to do this to the async context.
// if a request is pending, then just output all zeroes.
impl Read for Restartable {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
let (out_val, march_pos, remove_async) = if let Some(chan) = &self.awaiting_source {
match chan.try_recv() {
Ok(Ok((new_source, recreator))) => {
self.source = new_source;
self.recreator = Some(recreator);
(Read::read(&mut self.source, buffer), true, true)
},
Ok(Err(source_error)) => {
let e = Err(IoError::new(
IoErrorKind::UnexpectedEof,
format!("Failed to create new reader: {:?}.", source_error),
));
(e, false, true)
},
Err(TryRecvError::Empty) => {
// Output all zeroes.
for el in buffer.iter_mut() {
*el = 0;
}
(Ok(buffer.len()), false, false)
},
Err(_) => {
let e = Err(IoError::new(
IoErrorKind::UnexpectedEof,
"Failed to create new reader: dropped.",
));
(e, false, true)
},
}
} else {
// already have a good, valid source.
(Read::read(&mut self.source, buffer), true, false)
};
if remove_async {
self.awaiting_source = None;
}
if march_pos {
out_val.map(|a| {
self.position += a;
a
})
} else {
out_val
}
}
}
impl Seek for Restartable {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
let _local_pos = self.position as u64;
use SeekFrom::*;
match pos {
Start(offset) => {
let stereo = self.source.stereo;
let _current_ts = utils::byte_count_to_timestamp(self.position, stereo);
let offset = offset as usize;
if offset < self.position {
// We're going back in time.
if let Some(handle) = self.async_handle.as_ref() {
let (tx, rx) = flume::bounded(1);
self.awaiting_source = Some(rx);
let recreator = self.recreator.take();
if let Some(mut rec) = recreator {
handle.spawn(async move {
let ret_val = rec.call_restart(Some(
utils::byte_count_to_timestamp(offset, stereo),
));
let _ = tx.send(ret_val.map(Box::new).map(|v| (v, rec)));
});
} else {
return Err(IoError::new(
IoErrorKind::Interrupted,
"Previous seek in progress.",
));
}
self.position = offset;
} else {
return Err(IoError::new(
IoErrorKind::Interrupted,
"Cannot safely call seek until provided an async context handle.",
));
}
} else {
self.position += self.source.consume(offset - self.position);
}
Ok(offset as u64)
},
End(_offset) => Err(IoError::new(
IoErrorKind::InvalidInput,
"End point for Restartables is not known.",
)),
Current(_offset) => unimplemented!(),
}
}
}

41
src/input/utils.rs Normal file
View File

@@ -0,0 +1,41 @@
//! Utility methods for seeking or decoding.
use crate::constants::*;
use audiopus::{coder::Decoder, Channels, Result as OpusResult, SampleRate};
use std::{mem, time::Duration};
/// Calculates the sample position in a FloatPCM stream from a timestamp.
pub fn timestamp_to_sample_count(timestamp: Duration, stereo: bool) -> usize {
((timestamp.as_millis() as usize) * (MONO_FRAME_SIZE / FRAME_LEN_MS)) << stereo as usize
}
/// Calculates the time position in a FloatPCM stream from a sample index.
pub fn sample_count_to_timestamp(amt: usize, stereo: bool) -> Duration {
Duration::from_millis((((amt * FRAME_LEN_MS) / MONO_FRAME_SIZE) as u64) >> stereo as u64)
}
/// Calculates the byte position in a FloatPCM stream from a timestamp.
///
/// Each sample is sized by `mem::size_of::<f32>() == 4usize`.
pub fn timestamp_to_byte_count(timestamp: Duration, stereo: bool) -> usize {
timestamp_to_sample_count(timestamp, stereo) * mem::size_of::<f32>()
}
/// Calculates the time position in a FloatPCM stream from a byte index.
///
/// Each sample is sized by `mem::size_of::<f32>() == 4usize`.
pub fn byte_count_to_timestamp(amt: usize, stereo: bool) -> Duration {
sample_count_to_timestamp(amt / mem::size_of::<f32>(), stereo)
}
/// Create an Opus decoder outputting at a sample rate of 48kHz.
pub fn decoder(stereo: bool) -> OpusResult<Decoder> {
Decoder::new(
SampleRate::Hz48000,
if stereo {
Channels::Stereo
} else {
Channels::Mono
},
)
}

107
src/input/ytdl_src.rs Normal file
View File

@@ -0,0 +1,107 @@
use super::{
child_to_reader,
error::{Error, Result},
Codec,
Container,
Input,
Metadata,
};
use serde_json::Value;
use std::{
io::{BufRead, BufReader, Read},
process::{Command, Stdio},
};
use tokio::task;
use tracing::trace;
/// Creates a streamed audio source with `youtube-dl` and `ffmpeg`.
pub async fn ytdl(uri: &str) -> Result<Input> {
_ytdl(uri, &[]).await
}
pub(crate) async fn _ytdl(uri: &str, pre_args: &[&str]) -> Result<Input> {
let ytdl_args = [
"--print-json",
"-f",
"webm[abr>0]/bestaudio/best",
"-R",
"infinite",
"--no-playlist",
"--ignore-config",
uri,
"-o",
"-",
];
let ffmpeg_args = [
"-f",
"s16le",
"-ac",
"2",
"-ar",
"48000",
"-acodec",
"pcm_f32le",
"-",
];
let mut youtube_dl = Command::new("youtube-dl")
.args(&ytdl_args)
.stdin(Stdio::null())
.stderr(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?;
let stderr = youtube_dl.stderr.take();
let (returned_stderr, value) = task::spawn_blocking(move || {
if let Some(mut s) = stderr {
let out: Option<Value> = {
let mut o_vec = vec![];
let mut serde_read = BufReader::new(s.by_ref());
// Newline...
if let Ok(len) = serde_read.read_until(0xA, &mut o_vec) {
serde_json::from_slice(&o_vec[..len]).ok()
} else {
None
}
};
(Some(s), out)
} else {
(None, None)
}
})
.await
.map_err(|_| Error::Metadata)?;
youtube_dl.stderr = returned_stderr;
let ffmpeg = Command::new("ffmpeg")
.args(pre_args)
.arg("-i")
.arg("-")
.args(&ffmpeg_args)
.stdin(youtube_dl.stdout.ok_or(Error::Stdout)?)
.stderr(Stdio::null())
.stdout(Stdio::piped())
.spawn()?;
let metadata = Metadata::from_ytdl_output(value.unwrap_or_default());
trace!("ytdl metadata {:?}", metadata);
Ok(Input::new(
true,
child_to_reader::<f32>(ffmpeg),
Codec::FloatPcm,
Container::Raw,
Some(metadata),
))
}
/// Creates a streamed audio source from YouTube search results with `youtube-dl`,`ffmpeg`, and `ytsearch`.
/// Takes the first video listed from the YouTube search.
pub async fn ytdl_search(name: &str) -> Result<Input> {
ytdl(&format!("ytsearch1:{}", name)).await
}