feat(gst): Added gst a high level wrapper over gstreamer

chore(example): Added hdr-gstreamer-wgpu example
chore(license): Added MIT license to all crates
This commit is contained in:
uttarayan21
2025-12-16 02:23:30 +05:30
parent 6cc83ba655
commit 7f9152e8fd
26 changed files with 2815 additions and 53 deletions

View File

View File

@@ -0,0 +1,466 @@
use std::sync::Arc;
use gstreamer as gst;
use gstreamer_app as gst_app;
use anyhow::{Context, Result};
use winit::{
application::ApplicationHandler,
event::*,
event_loop::{ActiveEventLoop, EventLoop},
keyboard::*,
window::Window,
};
pub struct App {
state: Option<State>,
}
impl App {
pub fn new() -> Self {
Self { state: None }
}
}
pub struct State {
window: Arc<Window>,
gst: Video,
surface: wgpu::Surface<'static>,
surface_texture: wgpu::Texture,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
is_surface_initialized: bool,
}
impl State {
async fn new(window: Arc<Window>) -> Result<State> {
let instance = wgpu::Instance::default();
let surface = instance
.create_surface(window.clone())
.context("Failed to create wgpu surface")?;
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::LowPower,
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.context("Failed to request wgpu adapter")?;
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: wgpu::MemoryHints::default(),
..Default::default()
})
.await
.context("Failed to request wgpu device")?;
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps.formats.last().unwrap().clone();
let size = window.inner_size();
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 3, // calculate upto 5 frames ahead
};
surface.configure(&device, &config);
let shader = device.create_shader_module(wgpu::include_wgsl!("shader.wgsl"));
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Jello Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Jello Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
compilation_options: wgpu::PipelineCompilationOptions::default(),
targets: &[Some(wgpu::ColorTargetState {
format: surface_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let texture_size = wgpu::Extent3d {
width: size.width,
height: size.height,
depth_or_array_layers: 1,
};
let video_texture = device.create_texture(&wgpu::TextureDescriptor {
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgb10a2Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
label: Some("Jello Video Texture"),
view_formats: &[],
});
/// Todo: Use a better sampler
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("texture_sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&video_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Jello Texture Bind Group"),
});
let gst = Video::new().context("Failed to create Video")?;
Ok(Self {
window,
gst,
surface,
surface_texture: video_texture,
device,
queue,
config,
is_surface_initialized: true,
bind_group,
pipeline: render_pipeline,
})
}
// async fn next_frame(&mut self)
fn resize(&mut self, width: u32, height: u32) {
if width > 0 && height > 0 {
self.config.width = width;
self.config.height = height;
self.surface.configure(&self.device, &self.config);
self.is_surface_initialized = true;
}
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
if !self.is_surface_initialized {
return Ok(());
}
self.copy_next_frame_to_texture(&self.surface_texture)
.inspect_err(|e| {
tracing::error!("Failed to copy video frame to texture: {e:?}");
})
.map_err(|_| wgpu::SurfaceError::Lost)?;
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Jello Render Encoder"),
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Jello Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..3, 0..1);
drop(render_pass);
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
self.window.request_redraw();
Ok(())
}
pub fn copy_next_frame_to_texture(&self, texture: &wgpu::Texture) -> Result<()> {
let frame = self
.gst
.appsink
.try_pull_sample(gst::ClockTime::NONE)
.context("Failed to pull sample from appsink")?;
let caps = frame.caps().context("Failed to get caps from sample")?;
let size = caps
.structure(0)
.context("Failed to get structure from caps")?;
let width = size
.get::<i32>("width")
.context("Failed to get width from caps")?;
let height = size
.get::<i32>("height")
.context("Failed to get height from caps")?;
let buffer = frame.buffer().context("Failed to get buffer from sample")?;
let map = buffer
.map_readable()
.context("Failed to map buffer readable")?;
self.queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&map,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * width as u32),
rows_per_image: Some(height as u32),
},
texture.size(),
);
Ok(())
}
}
impl ApplicationHandler<State> for App {
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
#[allow(unused_mut)]
let mut window_attributes = Window::default_attributes();
let window = Arc::new(event_loop.create_window(window_attributes).unwrap());
self.state = Some(pollster::block_on(State::new(window)).expect("Failed to block"));
}
fn user_event(&mut self, _event_loop: &ActiveEventLoop, mut event: State) {
self.state = Some(event);
}
fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {
let state = match &mut self.state {
Some(canvas) => canvas,
None => return,
};
state.window.request_redraw();
}
fn window_event(
&mut self,
event_loop: &ActiveEventLoop,
_window_id: winit::window::WindowId,
event: WindowEvent,
) {
let state = match &mut self.state {
Some(canvas) => canvas,
None => return,
};
match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::Resized(size) => state.resize(size.width, size.height),
WindowEvent::RedrawRequested => {
// dbg!("RedrawRequested");
// if state.gst.poll() {
// event_loop.exit();
// return;
// }
match state.render() {
Ok(_) => {}
// Reconfigure the surface if lost
Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
let size = state.window.inner_size();
tracing::info!("Reconfiguring surface to {size:?}");
state.resize(size.width, size.height);
}
// The system is out of memory, we should probably quit
Err(wgpu::SurfaceError::OutOfMemory) => event_loop.exit(),
// All other errors (Outdated, Timeout) should be resolved by the next frame
Err(e) => {
tracing::error!("Failed to render frame: {e:?}");
}
}
}
// WindowEvent::AboutToWait => {
// state.window.request_redraw();
// }
WindowEvent::KeyboardInput {
event:
KeyEvent {
physical_key: PhysicalKey::Code(code),
state,
..
},
..
} => match (code, state.is_pressed()) {
(KeyCode::Escape, true) => event_loop.exit(),
(KeyCode::KeyQ, true) => event_loop.exit(),
_ => {}
},
_ => {}
}
}
}
pub fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
let event_loop = EventLoop::with_user_event().build()?;
let mut app = App::new();
event_loop.run_app(&mut app)?;
Ok(())
}
pub struct Video {
pipeline: gst::Pipeline,
bus: gst::Bus,
appsink: gst_app::AppSink,
}
impl Video {
pub fn new() -> Result<Self> {
gst::init()?;
use gst::prelude::*;
let pipeline = gst::parse::launch(
r##"playbin3 uri=https://jellyfin.tsuba.darksailor.dev/Items/6010382cf25273e624d305907010d773/Download?api_key=036c140222464878862231ef66a2bc9c video-sink="videoconvert ! video/x-raw,format=RGB10A2_LE ! appsink name=appsink""##,
).context("Failed to parse gst pipeline")?;
let pipeline = pipeline
.downcast::<gst::Pipeline>()
.map_err(|_| anyhow::anyhow!("Failed to downcast gst element to Pipeline"))?;
let video_sink = pipeline.property::<gst::Bin>("video-sink");
let appsink = video_sink
.by_name("appsink")
.context("Failed to get appsink from video-sink")?
.downcast::<gst_app::AppSink>()
.map_err(|_| {
anyhow::anyhow!("Failed to downcast video-sink appsink to gst_app::AppSink")
})?;
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(|_appsink| Ok(gst::FlowSuccess::Ok))
.build(),
);
let bus = pipeline.bus().context("Failed to get gst pipeline bus")?;
pipeline.set_state(gst::State::Playing)?;
pipeline
.state(gst::ClockTime::from_seconds(5))
.0
.context("Failed to wait for pipeline")?;
Ok(Self {
pipeline,
bus,
appsink,
})
}
pub fn poll(&mut self) -> bool {
use gst::prelude::*;
for msg in self.bus.iter_timed(gst::ClockTime::NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Eos(..) => {
tracing::info!("End of stream");
self.pipeline.set_state(gst::State::Null).ok();
return true;
}
MessageView::Error(err) => {
tracing::error!(
"Error from {:?}: {} ({:?})",
err.src().map(|s| s.path_string()),
err.error(),
err.debug()
);
self.pipeline.set_state(gst::State::Null).ok();
return true;
}
_ => {}
}
}
false
}
}

View File

@@ -0,0 +1,31 @@
// Vertex shader
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
};
@vertex
fn vs_main(
@builtin(vertex_index) in_vertex_index: u32,
) -> VertexOutput {
var out: VertexOutput;
let uv = vec2<f32>(f32((in_vertex_index << 1u) & 2u), f32(in_vertex_index & 2u));
out.clip_position = vec4<f32>(uv * 2.0 - 1.0, 0.0, 1.0);
out.clip_position.y = -out.clip_position.y;
out.tex_coords = uv;
return out;
}
// Fragment shader
@group(0) @binding(0)
var t_diffuse: texture_2d<f32>;
@group(0) @binding(1)
var s_diffuse: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_diffuse, s_diffuse, in.tex_coords);
}