fix(iced-video): Write the conversion matrix buffer so the video actually shows up
Some checks failed
build / checks-matrix (push) Has been cancelled
build / checks-build (push) Has been cancelled
build / codecov (push) Has been cancelled
docs / docs (push) Has been cancelled

This commit is contained in:
2026-01-15 17:01:41 +05:30
parent 335e8fdbef
commit 429371002b
4 changed files with 89 additions and 75 deletions

View File

@@ -108,3 +108,5 @@ planar formats have each of the channels in a contiguous array one after another
in semi-planar formats the y channel is seperate and uv channels are interleaved in semi-planar formats the y channel is seperate and uv channels are interleaved
## Chroma Subsampling

View File

@@ -3,6 +3,7 @@ pub mod primitive;
pub mod source; pub mod source;
pub mod widget; pub mod widget;
pub use widget::Video; pub use widget::Video;
pub mod yuv;
use error_stack::{Report, ResultExt}; use error_stack::{Report, ResultExt};

View File

@@ -8,7 +8,38 @@ use std::sync::{Arc, Mutex, atomic::AtomicBool};
#[derive(Clone, Copy, Debug, bytemuck::Zeroable, bytemuck::Pod)] #[derive(Clone, Copy, Debug, bytemuck::Zeroable, bytemuck::Pod)]
#[repr(transparent)] #[repr(transparent)]
pub struct ConversionMatrix { pub struct ConversionMatrix {
matrix: [[f32; 4]; 4], matrix: [Vec3f; 3],
}
#[derive(Clone, Copy, Debug, bytemuck::Zeroable, bytemuck::Pod)]
#[repr(C, align(16))]
pub struct Vec3f {
data: [f32; 3],
__padding: u32,
}
impl From<[f32; 3]> for Vec3f {
fn from(value: [f32; 3]) -> Self {
Vec3f {
data: [value[0], value[1], value[2]],
__padding: 0,
}
}
}
impl Vec3f {
pub fn new(x: f32, y: f32, z: f32) -> Self {
Vec3f {
data: [x, y, z],
__padding: 0,
}
}
pub const fn from(data: [f32; 3]) -> Self {
Vec3f {
data: [data[0], data[1], data[2]],
__padding: 0,
}
}
} }
// impl ConversionMatrix { // impl ConversionMatrix {
@@ -44,19 +75,17 @@ pub struct ConversionMatrix {
pub const BT2020_TO_RGB: ConversionMatrix = ConversionMatrix { pub const BT2020_TO_RGB: ConversionMatrix = ConversionMatrix {
matrix: [ matrix: [
[1.1684, 0.0000, 1.6836, -0.9122], Vec3f::from([1.0, 0.0, 1.13983]),
[1.1684, -0.1873, -0.6520, 0.3015], Vec3f::from([1.0, -0.39465, -0.58060]),
[1.1684, 2.1482, 0.0000, -1.1322], Vec3f::from([1.0, 2.03211, 0.0]),
[0.0, 0.0, 0.0, 1.0],
], ],
}; };
pub const BT709_TO_RGB: ConversionMatrix = ConversionMatrix { pub const BT709_TO_RGB: ConversionMatrix = ConversionMatrix {
matrix: [ matrix: [
[1.1644, 0.0000, 1.7927, -0.9729], Vec3f::from([1.0, 0.0, 1.13983]),
[1.1644, -0.2132, -0.5329, 0.3015], Vec3f::from([1.0, -0.39465, -0.58060]),
[1.1644, 2.1124, 0.0000, -1.1334], Vec3f::from([1.0, 2.03211, 0.0]),
[0.0, 0.0, 0.0, 1.0],
], ],
}; };
@@ -69,13 +98,6 @@ pub struct VideoFrame {
pub format: VideoFormat, pub format: VideoFormat,
} }
#[derive(Debug, Clone, Copy)]
pub enum ToneMapping {
None,
InverseOETF,
Reinhard,
}
impl iced_wgpu::Primitive for VideoFrame { impl iced_wgpu::Primitive for VideoFrame {
type Pipeline = VideoPipeline; type Pipeline = VideoPipeline;
@@ -95,17 +117,6 @@ impl iced_wgpu::Primitive for VideoFrame {
pipeline.format, pipeline.format,
self.format, self.format,
); );
let conversion_matrix = if texture.format().is_wide() {
BT2020_TO_RGB
} else {
BT709_TO_RGB
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("iced-video-conversion-matrix-buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: core::mem::size_of::<ConversionMatrix>() as wgpu::BufferAddress,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced-video-texture-bind-group"), label: Some("iced-video-texture-bind-group"),
@@ -130,11 +141,13 @@ impl iced_wgpu::Primitive for VideoFrame {
], ],
}); });
// texture.write_conversion_matrix(&BT709_TO_RGB, queue);
VideoFrameData { VideoFrameData {
id: self.id.clone(), id: self.id.clone(),
texture, texture,
conversion_matrix: buffer,
bind_group, bind_group,
conversion_matrix: BT709_TO_RGB,
ready: Arc::clone(&self.ready), ready: Arc::clone(&self.ready),
} }
}); });
@@ -179,24 +192,8 @@ impl iced_wgpu::Primitive for VideoFrame {
let data = buffer let data = buffer
.map_readable() .map_readable()
.expect("BUG: Failed to map gst::Buffer readable"); .expect("BUG: Failed to map gst::Buffer readable");
// queue.write_buffer(&video.buffer, 0, &data);
video.texture.write_texture(&data, queue); video.texture.write_texture(&data, queue);
// queue.write_texture(
// wgpu::TexelCopyTextureInfo {
// texture: &video.texture,
// mip_level: 0,
// origin: wgpu::Origin3d::ZERO,
// aspect: wgpu::TextureAspect::All,
// },
// &data,
// wgpu::TexelCopyBufferLayout {
// offset: 0,
// bytes_per_row: Some(4 * self.size.width),
// rows_per_image: Some(self.size.height),
// },
// self.size,
// );
drop(data); drop(data);
video video
@@ -250,6 +247,10 @@ impl iced_wgpu::Primitive for VideoFrame {
/// While we can use vulkan with moltenvk on macos, I'd much rather use metal directly /// While we can use vulkan with moltenvk on macos, I'd much rather use metal directly
/// Right now only supports interleaved UV formats. /// Right now only supports interleaved UV formats.
/// For planar formats we would need 3 textures. /// For planar formats we would need 3 textures.
/// Also NV12 and P010 textures are not COPY_DST capable
/// This assumes 4:2:0 chroma subsampling (for now).
/// So for 4 Y samples there is 1 U and 1 V sample.
/// This means that the UV texture is half the width and half the height of the Y texture.
#[derive(Debug)] #[derive(Debug)]
pub struct VideoTexture { pub struct VideoTexture {
y: wgpu::Texture, y: wgpu::Texture,
@@ -257,7 +258,7 @@ pub struct VideoTexture {
size: wgpu::Extent3d, size: wgpu::Extent3d,
video_format: VideoFormat, video_format: VideoFormat,
surface_format: wgpu::TextureFormat, surface_format: wgpu::TextureFormat,
tone_mapping: ToneMapping, conversion_matrix_buffer: wgpu::Buffer,
} }
impl VideoTexture { impl VideoTexture {
@@ -281,16 +282,6 @@ impl VideoTexture {
tracing::warn!("Video format is HDR but surface does not support HDR"); tracing::warn!("Video format is HDR but surface does not support HDR");
} }
let tone_mapping = if surface_hdr && video_hdr {
ToneMapping::None
} else if surface_hdr && !video_hdr {
ToneMapping::InverseOETF
} else if !surface_hdr && video_hdr {
ToneMapping::Reinhard
} else {
ToneMapping::None
};
let y_texture = device.create_texture(&wgpu::TextureDescriptor { let y_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(&format!("{}-y", label)), label: Some(&format!("{}-y", label)),
size: wgpu::Extent3d { size: wgpu::Extent3d {
@@ -319,13 +310,21 @@ impl VideoTexture {
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[], view_formats: &[],
}); });
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("iced-video-conversion-matrix-buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: core::mem::size_of::<ConversionMatrix>() as wgpu::BufferAddress,
mapped_at_creation: false,
});
VideoTexture { VideoTexture {
y: y_texture, y: y_texture,
uv: uv_texture, uv: uv_texture,
size, size,
surface_format, surface_format,
video_format, video_format,
tone_mapping, conversion_matrix_buffer: buffer,
} }
} }
@@ -343,23 +342,20 @@ impl VideoTexture {
} }
pub fn resize(&self, name: &str, new_size: wgpu::Extent3d, device: &wgpu::Device) -> Self { pub fn resize(&self, name: &str, new_size: wgpu::Extent3d, device: &wgpu::Device) -> Self {
VideoTexture::new(name, new_size, device, self.format(), self.video_format) VideoTexture::new(name, new_size, device, self.format(), self.pixel_format())
} }
pub fn pixel_format(&self) -> VideoFormat { pub fn pixel_format(&self) -> VideoFormat {
self.video_format self.video_format
} }
pub fn set_pixel_format(&mut self, format: VideoFormat) {
self.video_format = format;
}
/// This assumes that the data is laid out correctly for the texture format. /// This assumes that the data is laid out correctly for the texture format.
pub fn write_texture(&self, data: &[u8], queue: &wgpu::Queue) { pub fn write_texture(&self, data: &[u8], queue: &wgpu::Queue) {
// let (y, u, v) = match self.video_format { // let (y, u, v) = match self.video_format {
// VideoFormat::Nv12 | VideoFormat::P01010le | VideoFormat::P016Le => (4, 1, 1), // VideoFormat::Nv12 | VideoFormat::P01010le | VideoFormat::P016Le => (4, 1, 1),
// _ => (1, 1), // _ => (1, 1),
// }; // };
let Self { y, uv, .. } = self; let Self { y, uv, .. } = self;
let y_size = y.size(); let y_size = y.size();
let uv_size = uv.size(); let uv_size = uv.size();
@@ -367,6 +363,17 @@ impl VideoTexture {
let y_data_size = (y_size.width * y_size.height * 2) as usize; let y_data_size = (y_size.width * y_size.height * 2) as usize;
let uv_data_size = (y_data_size / 2) as usize; // UV is interleaved let uv_data_size = (y_data_size / 2) as usize; // UV is interleaved
// debug_assert_eq!(y_data_size, data.len() / 3 * 2);
// debug_assert_eq!(uv_data_size, data.len() / 3);
// let y_data = &data[0..y_data_size];
// let uv_data = &data[y_data_size..];
// dbg!(y_data.len());
// dbg!(uv_data.len());
// dbg!(y_data.len() + uv_data.len());
// dbg!(data.len());
// dbg!(y.size());
// dbg!(uv.size());
queue.write_texture( queue.write_texture(
wgpu::TexelCopyTextureInfo { wgpu::TexelCopyTextureInfo {
texture: y, texture: y,
@@ -374,11 +381,11 @@ impl VideoTexture {
origin: wgpu::Origin3d::ZERO, origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All, aspect: wgpu::TextureAspect::All,
}, },
&data[0..y_data_size], y_data,
wgpu::TexelCopyBufferLayout { wgpu::TexelCopyBufferLayout {
offset: 0, offset: 0,
bytes_per_row: Some(y_size.width), bytes_per_row: Some(y_size.width * 2),
rows_per_image: Some(y_size.height), rows_per_image: None,
}, },
y_size, y_size,
); );
@@ -390,15 +397,23 @@ impl VideoTexture {
origin: wgpu::Origin3d::ZERO, origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All, aspect: wgpu::TextureAspect::All,
}, },
&data[y_data_size..(y_data_size + uv_data_size)], uv_data,
wgpu::TexelCopyBufferLayout { wgpu::TexelCopyBufferLayout {
offset: 0, offset: 0,
bytes_per_row: Some(uv_size.width), bytes_per_row: Some(uv_size.width * 4),
rows_per_image: Some(uv_size.height), rows_per_image: None,
}, },
uv_size, uv_size,
); );
} }
pub fn write_conversion_matrix(&self, matrix: &ConversionMatrix, queue: &wgpu::Queue) {
queue.write_buffer(
&self.conversion_matrix_buffer,
0,
bytemuck::bytes_of(matrix),
);
}
} }
#[derive(Debug)] #[derive(Debug)]
@@ -406,7 +421,7 @@ pub struct VideoFrameData {
id: id::Id, id: id::Id,
texture: VideoTexture, texture: VideoTexture,
bind_group: wgpu::BindGroup, bind_group: wgpu::BindGroup,
conversion_matrix: wgpu::Buffer, conversion_matrix: ConversionMatrix,
ready: Arc<AtomicBool>, ready: Arc<AtomicBool>,
} }

View File

@@ -19,16 +19,12 @@ fn vs_main(
@group(0) @binding(0) var y_texture: texture_2d<f32>; @group(0) @binding(0) var y_texture: texture_2d<f32>;
@group(0) @binding(1) var uv_texture: texture_2d<f32>; @group(0) @binding(1) var uv_texture: texture_2d<f32>;
@group(0) @binding(2) var texture_sampler: sampler; @group(0) @binding(2) var texture_sampler: sampler;
@group(0) @binding(3) var<uniform> rgb_primaries: mat4x4<f32>; @group(0) @binding(3) var<uniform> rgb_primaries: mat3x3<f32>;
@fragment @fragment
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> { fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
let y = textureSample(y_texture, texture_sampler, input.tex_coords).r; let y = textureSample(y_texture, texture_sampler, input.tex_coords).r;
let uv = textureSample(uv_texture, texture_sampler, input.tex_coords).rg; let uv = textureSample(uv_texture, texture_sampler, input.tex_coords).rg;
let yuv = vec4f(y, uv, 0); let yuv = vec3f(y, uv);
let rgb = rgb_primaries * yuv; return vec4f(yuv * rgb_primaries, 1.0);
return vec4f(rgb.r, rgb.g, rgb.b, 1.0);
// let rgb = rgb_primaries * yuv;
// return vec4f(rgb, 1.0);
} }