feat: Modify gst crate to add lot of more granularity

This commit is contained in:
uttarayan21
2025-12-22 13:27:30 +05:30
parent d42ef3b550
commit 043d1e99f0
23 changed files with 947 additions and 392 deletions

View File

@@ -0,0 +1,12 @@
[package]
name = "iced-video"
version = "0.1.0"
edition = "2024"
[dependencies]
error-stack = "0.6.0"
gst.workspace = true
iced_core = "0.14.0"
iced_wgpu = "0.14.0"
thiserror = "2.0.17"
tracing = "0.1.43"

View File

@@ -0,0 +1,123 @@
pub mod primitive;
pub mod source;
use error_stack::{Report, ResultExt};
use gst::*;
use iced_core::Length;
use std::marker::PhantomData;
use gst::plugins::app::AppSink;
use gst::plugins::playback::Playbin3;
use gst::plugins::videoconvertscale::VideoConvert;
#[derive(Debug, thiserror::Error)]
#[error("Iced Video Error")]
pub struct Error;
pub type Result<T, E = Report<Error>> = core::result::Result<T, E>;
use std::sync::{Arc, Mutex, atomic::AtomicBool};
pub struct Video {
id: iced_core::Id,
source: source::VideoSource,
is_playing: Arc<AtomicBool>,
is_eos: Arc<AtomicBool>,
texture: Mutex<Option<iced_wgpu::wgpu::TextureView>>,
}
impl Video {
pub fn id(&self) -> &iced_core::Id {
&self.id
}
pub fn source(&self) -> &source::VideoSource {
&self.source
}
pub async fn new(url: impl AsRef<str>) -> Result<Self> {
Ok(Self {
id: iced_core::Id::unique(),
source: source::VideoSource::new(url)?,
is_playing: Arc::new(AtomicBool::new(false)),
is_eos: Arc::new(AtomicBool::new(false)),
texture: Mutex::new(None),
})
}
}
pub struct VideoPlayer<'a, Message, Theme = iced_core::Theme, Renderer = iced_wgpu::Renderer>
where
Renderer: PrimitiveRenderer,
{
videos: &'a Video,
content_fit: iced_core::ContentFit,
width: iced_core::Length,
height: iced_core::Length,
on_end_of_stream: Option<Message>,
on_new_frame: Option<Message>,
looping: bool,
// on_subtitle_text: Option<Box<dyn Fn(Option<String>) -> Message + 'a>>,
// on_error: Option<Box<dyn Fn(&glib::Error) -> Message + 'a>>,
theme: Theme,
__marker: PhantomData<Renderer>,
}
impl<Message, Theme, Renderer> VideoPlayer<Message, Theme, Renderer>
where
Renderer: PrimitiveRenderer,
{
pub fn new(source: source::VideoSource) -> Self {
Self {
videos: Video {
id: iced_core::Id::unique(),
source,
is_playing: Arc::new(AtomicBool::new(false)),
is_eos: Arc::new(AtomicBool::new(false)),
texture: Mutex::new(None),
},
content_fit: iced_core::ContentFit::Contain,
width: Length::Shrink,
height: Length::Shrink,
on_end_of_stream: None,
on_new_frame: None,
looping: false,
theme: Theme::default(),
__marker: PhantomData,
}
}
}
impl<Message, Theme, Renderer> iced_core::Widget<Message, Theme, Renderer>
for VideoPlayer<'_, Message, Theme, Renderer>
where
Message: Clone,
Renderer: PrimitiveRenderer,
{
fn size(&self) -> iced_core::Size<Length> {
iced_core::Size {
width: self.width,
height: self.height,
}
}
fn layout(
&mut self,
iced_core::widget::tree: &mut iced_core::widget::Tree,
iced_core::renderer: &Renderer,
limits: &iced_core::layout::Limits,
) -> iced_core::layout::Node {
todo!()
}
fn draw(
&self,
iced_core::widget::tree: &iced_core::widget::Tree,
iced_core::renderer: &mut Renderer,
theme: &Theme,
style: &iced_core::renderer::Style,
iced_core::layout: iced_core::Layout<'_>,
cursor: iced_core::mouse::Cursor,
viewport: &iced_core::Rectangle,
) {
todo!()
}
}

View File

@@ -0,0 +1,177 @@
use iced_wgpu::primitive::Pipeline;
use iced_wgpu::wgpu;
use std::collections::BTreeMap;
use std::sync::{Arc, atomic::AtomicBool};
#[derive(Debug)]
pub struct VideoPrimitive {
texture: wgpu::TextureView,
ready: Arc<AtomicBool>,
}
impl iced_wgpu::Primitive for VideoPrimitive {
type Pipeline = VideoPipeline;
fn prepare(
&self,
pipeline: &mut Self::Pipeline,
device: &wgpu::Device,
queue: &wgpu::Queue,
bounds: &iced_wgpu::core::Rectangle,
viewport: &iced_wgpu::graphics::Viewport,
) {
todo!()
}
fn draw(&self, _pipeline: &Self::Pipeline, _render_pass: &mut wgpu::RenderPass<'_>) -> bool {
false
}
fn render(
&self,
pipeline: &Self::Pipeline,
encoder: &mut wgpu::CommandEncoder,
target: &wgpu::TextureView,
clip_bounds: &iced_wgpu::core::Rectangle<u32>,
) {
if self.ready.load(std::sync::atomic::Ordering::SeqCst) {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("iced-video-render-pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: target,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_pipeline(&pipeline.pipeline);
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..3, 0..1);
self.ready
.store(false, std::sync::atomic::Ordering::Relaxed);
}
}
}
#[derive(Debug)]
pub struct VideoTextures {
id: u64,
texture: wgpu::Texture,
bind_group: wgpu::BindGroup,
ready: Arc<AtomicBool>,
}
#[derive(Debug)]
pub struct VideoPipeline {
pipeline: wgpu::RenderPipeline,
bind_group_layout: wgpu::BindGroupLayout,
sampler: wgpu::Sampler,
videos: BTreeMap<u64, VideoTextures>,
}
pub trait HdrTextureFormatExt {
fn is_hdr(&self) -> bool;
}
impl HdrTextureFormatExt for wgpu::TextureFormat {
fn is_hdr(&self) -> bool {
matches!(
self,
wgpu::TextureFormat::Rgba16Float
| wgpu::TextureFormat::Rgba32Float
| wgpu::TextureFormat::Rgb10a2Unorm
| wgpu::TextureFormat::Rgb10a2Uint
)
}
}
impl Pipeline for VideoPipeline {
fn new(device: &wgpu::Device, queue: &wgpu::Queue, format: wgpu::TextureFormat) -> Self
where
Self: Sized,
{
if format.is_hdr() {
tracing::info!("HDR texture format detected: {:?}", format);
}
let shader_passthrough =
device.create_shader_module(wgpu::include_wgsl!("shaders/passthrough.wgsl"));
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced-video-texture-bind-group-layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("iced-video-render-pipeline-layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("iced-video-render-pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader_passthrough,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader_passthrough,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("iced-video-sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
pipeline,
bind_group_layout,
sampler,
videos: BTreeMap::new(),
}
}
}

View File

@@ -0,0 +1,31 @@
// Vertex shader
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
};
@vertex
fn vs_main(
@builtin(vertex_index) in_vertex_index: u32,
) -> VertexOutput {
var out: VertexOutput;
let uv = vec2<f32>(f32((in_vertex_index << 1u) & 2u), f32(in_vertex_index & 2u));
out.clip_position = vec4<f32>(uv * 2.0 - 1.0, 0.0, 1.0);
out.clip_position.y = -out.clip_position.y;
out.tex_coords = uv;
return out;
}
// Fragment shader
@group(0) @binding(0)
var t_diffuse: texture_2d<f32>;
@group(0) @binding(1)
var s_diffuse: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_diffuse, s_diffuse, in.tex_coords);
}

View File

@@ -0,0 +1,64 @@
#[derive(Debug, Clone)]
pub struct VideoSource {
playbin: Playbin3,
videoconvert: VideoConvert,
appsink: AppSink,
bus: Bus,
}
impl VideoSource {
/// Creates a new video source from the given URL.
/// Since this doesn't have to parse the pipeline manually, we aren't sanitizing the URL for
/// now.
pub async fn new(url: impl AsRef<str>) -> Result<Self> {
Gst::new();
let videoconvert = VideoConvert::new("iced-video-convert").change_context(Error)?;
let appsink = AppSink::new("iced-video-sink").change_context(Error)?;
let video_sink = videoconvert.link(&appsink).change_context(Error)?;
let playbin = gst::plugins::playback::Playbin3::new("iced-video")
.change_context(Error)?
.with_uri(url.as_ref())
.with_video_sink(&video_sink);
let bus = playbin.bus().change_context(Error)?;
playbin.wait_ready()?;
// let bus_stream = bus.stream();
// bus_stream.find(|message| {
// let view = message.view();
// if let gst::MessageView::StateChanged(change) = view {
// change.current() == gst::State::Ready
// } else {
// false
// }
// });
Ok(Self {
playbin,
videoconvert,
appsink,
bus,
})
}
pub fn play(&self) -> Result<()> {
self.playbin
.play()
.change_context(Error)
.attach("Failed to play video")
}
pub fn pause(&self) -> Result<()> {
self.playbin
.pause()
.change_context(Error)
.attach("Failed to pause video")
}
pub fn bus(&self) -> &Bus {}
// pub fn copy_frame_to_texture(&self, texture: wgpu::TextureView) -> Result<()> {
// let frame = self
// .appsink
// .try_pull_sample(core::time::Duration::from_millis(1))?
// .ok_or(Error)
// .attach("No video frame available")?;
// }
}