554 lines
19 KiB
Rust
554 lines
19 KiB
Rust
use crate::id;
|
|
use gst::videoconvertscale::VideoFormat;
|
|
use iced_wgpu::primitive::Pipeline;
|
|
use iced_wgpu::wgpu;
|
|
use std::collections::BTreeMap;
|
|
use std::sync::{Arc, Mutex, atomic::AtomicBool};
|
|
|
|
#[derive(Clone, Copy, Debug, bytemuck::Zeroable, bytemuck::Pod)]
|
|
#[repr(transparent)]
|
|
pub struct ConversionMatrix {
|
|
matrix: [[f32; 4]; 4],
|
|
}
|
|
|
|
// impl ConversionMatrix {
|
|
// pub fn desc() -> wgpu::VertexBufferLayout<'static> {
|
|
// wgpu::VertexBufferLayout {
|
|
// array_stride: core::mem::size_of::<ConversionMatrix>() as wgpu::BufferAddress,
|
|
// step_mode: wgpu::VertexStepMode::Vertex,
|
|
// attributes: &[
|
|
// wgpu::VertexAttribute {
|
|
// offset: 0,
|
|
// shader_location: 0,
|
|
// format: wgpu::VertexFormat::Float32x4,
|
|
// },
|
|
// wgpu::VertexAttribute {
|
|
// offset: 16,
|
|
// shader_location: 1,
|
|
// format: wgpu::VertexFormat::Float32x4,
|
|
// },
|
|
// wgpu::VertexAttribute {
|
|
// offset: 32,
|
|
// shader_location: 2,
|
|
// format: wgpu::VertexFormat::Float32x4,
|
|
// },
|
|
// wgpu::VertexAttribute {
|
|
// offset: 48,
|
|
// shader_location: 3,
|
|
// format: wgpu::VertexFormat::Float32x4,
|
|
// },
|
|
// ],
|
|
// }
|
|
// }
|
|
// }
|
|
|
|
pub const BT2020_TO_RGB: ConversionMatrix = ConversionMatrix {
|
|
matrix: [
|
|
[1.1684, 0.0000, 1.6836, -0.9122],
|
|
[1.1684, -0.1873, -0.6520, 0.3015],
|
|
[1.1684, 2.1482, 0.0000, -1.1322],
|
|
[0.0, 0.0, 0.0, 1.0],
|
|
],
|
|
};
|
|
|
|
pub const BT709_TO_RGB: ConversionMatrix = ConversionMatrix {
|
|
matrix: [
|
|
[1.1644, 0.0000, 1.7927, -0.9729],
|
|
[1.1644, -0.2132, -0.5329, 0.3015],
|
|
[1.1644, 2.1124, 0.0000, -1.1334],
|
|
[0.0, 0.0, 0.0, 1.0],
|
|
],
|
|
};
|
|
|
|
#[derive(Debug)]
|
|
pub struct VideoFrame {
|
|
pub id: id::Id,
|
|
pub size: wgpu::Extent3d,
|
|
pub ready: Arc<AtomicBool>,
|
|
pub frame: Arc<Mutex<gst::Sample>>,
|
|
pub format: VideoFormat,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
pub enum ToneMapping {
|
|
None,
|
|
InverseOETF,
|
|
Reinhard,
|
|
}
|
|
|
|
impl iced_wgpu::Primitive for VideoFrame {
|
|
type Pipeline = VideoPipeline;
|
|
|
|
fn prepare(
|
|
&self,
|
|
pipeline: &mut Self::Pipeline,
|
|
device: &wgpu::Device,
|
|
queue: &wgpu::Queue,
|
|
bounds: &iced_wgpu::core::Rectangle,
|
|
viewport: &iced_wgpu::graphics::Viewport,
|
|
) {
|
|
let video = pipeline.videos.entry(self.id.clone()).or_insert_with(|| {
|
|
let texture = VideoTexture::new(
|
|
"iced-video-texture",
|
|
self.size,
|
|
device,
|
|
pipeline.format,
|
|
self.format,
|
|
);
|
|
let conversion_matrix = if texture.format().is_wide() {
|
|
BT2020_TO_RGB
|
|
} else {
|
|
BT709_TO_RGB
|
|
};
|
|
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
|
label: Some("iced-video-conversion-matrix-buffer"),
|
|
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
|
size: core::mem::size_of::<ConversionMatrix>() as wgpu::BufferAddress,
|
|
mapped_at_creation: false,
|
|
});
|
|
|
|
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
|
label: Some("iced-video-texture-bind-group"),
|
|
layout: &pipeline.bind_group_layout,
|
|
entries: &[
|
|
wgpu::BindGroupEntry {
|
|
binding: 0,
|
|
resource: wgpu::BindingResource::TextureView(&texture.y_texture()),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 1,
|
|
resource: wgpu::BindingResource::TextureView(&texture.uv_texture()),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 2,
|
|
resource: wgpu::BindingResource::Sampler(&pipeline.sampler),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 3,
|
|
resource: wgpu::BindingResource::Buffer(buffer.as_entire_buffer_binding()),
|
|
},
|
|
],
|
|
});
|
|
|
|
VideoFrameData {
|
|
id: self.id.clone(),
|
|
texture,
|
|
conversion_matrix: buffer,
|
|
bind_group,
|
|
ready: Arc::clone(&self.ready),
|
|
}
|
|
});
|
|
if self.size != video.texture.size() {
|
|
let new_texture = video
|
|
.texture
|
|
.resize("iced-video-texture-resized", self.size, device);
|
|
|
|
let new_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
|
label: Some("iced-video-texture-bind-group"),
|
|
layout: &pipeline.bind_group_layout,
|
|
entries: &[
|
|
wgpu::BindGroupEntry {
|
|
binding: 0,
|
|
resource: wgpu::BindingResource::TextureView(&new_texture.y_texture()),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 1,
|
|
resource: wgpu::BindingResource::TextureView(&new_texture.uv_texture()),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 2,
|
|
resource: wgpu::BindingResource::Sampler(&pipeline.sampler),
|
|
},
|
|
wgpu::BindGroupEntry {
|
|
binding: 3,
|
|
resource: wgpu::BindingResource::Buffer(
|
|
video.conversion_matrix.as_entire_buffer_binding(),
|
|
),
|
|
},
|
|
],
|
|
});
|
|
video.texture = new_texture;
|
|
video.bind_group = new_bind_group;
|
|
}
|
|
if video.ready.load(std::sync::atomic::Ordering::SeqCst) {
|
|
let frame = self.frame.lock().expect("BUG: Mutex poisoned");
|
|
let buffer = frame
|
|
.buffer()
|
|
.expect("BUG: Failed to get frame data from gst::Sample");
|
|
|
|
let data = buffer
|
|
.map_readable()
|
|
.expect("BUG: Failed to map gst::Buffer readable");
|
|
// queue.write_buffer(&video.buffer, 0, &data);
|
|
|
|
video.texture.write_texture(&data, queue);
|
|
// queue.write_texture(
|
|
// wgpu::TexelCopyTextureInfo {
|
|
// texture: &video.texture,
|
|
// mip_level: 0,
|
|
// origin: wgpu::Origin3d::ZERO,
|
|
// aspect: wgpu::TextureAspect::All,
|
|
// },
|
|
// &data,
|
|
// wgpu::TexelCopyBufferLayout {
|
|
// offset: 0,
|
|
// bytes_per_row: Some(4 * self.size.width),
|
|
// rows_per_image: Some(self.size.height),
|
|
// },
|
|
// self.size,
|
|
// );
|
|
|
|
drop(data);
|
|
video
|
|
.ready
|
|
.store(false, std::sync::atomic::Ordering::SeqCst);
|
|
}
|
|
}
|
|
|
|
fn render(
|
|
&self,
|
|
pipeline: &Self::Pipeline,
|
|
encoder: &mut wgpu::CommandEncoder,
|
|
target: &wgpu::TextureView,
|
|
bounds: &iced_wgpu::core::Rectangle<u32>,
|
|
) {
|
|
let Some(video) = pipeline.videos.get(&self.id) else {
|
|
return;
|
|
};
|
|
|
|
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
|
label: Some("iced-video-render-pass"),
|
|
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
|
view: target,
|
|
resolve_target: None,
|
|
ops: wgpu::Operations {
|
|
load: wgpu::LoadOp::Load,
|
|
store: wgpu::StoreOp::Store,
|
|
},
|
|
depth_slice: None,
|
|
})],
|
|
depth_stencil_attachment: None,
|
|
timestamp_writes: None,
|
|
occlusion_query_set: None,
|
|
});
|
|
|
|
render_pass.set_pipeline(&pipeline.pipeline);
|
|
render_pass.set_bind_group(0, &video.bind_group, &[]);
|
|
render_pass.set_scissor_rect(
|
|
bounds.x as _,
|
|
bounds.y as _,
|
|
bounds.width as _,
|
|
bounds.height as _,
|
|
);
|
|
render_pass.draw(0..3, 0..1);
|
|
// self.ready
|
|
// .store(false, std::sync::atomic::Ordering::Relaxed);
|
|
}
|
|
}
|
|
|
|
/// NV12 or P010 are only supported in DX12 and Vulkan backends.
|
|
/// While we can use vulkan with moltenvk on macos, I'd much rather use metal directly
|
|
/// Right now only supports interleaved UV formats.
|
|
/// For planar formats we would need 3 textures.
|
|
#[derive(Debug)]
|
|
pub struct VideoTexture {
|
|
y: wgpu::Texture,
|
|
uv: wgpu::Texture,
|
|
size: wgpu::Extent3d,
|
|
video_format: VideoFormat,
|
|
surface_format: wgpu::TextureFormat,
|
|
tone_mapping: ToneMapping,
|
|
}
|
|
|
|
impl VideoTexture {
|
|
pub fn size(&self) -> wgpu::Extent3d {
|
|
self.size
|
|
}
|
|
|
|
pub fn new(
|
|
label: &str,
|
|
size: wgpu::Extent3d,
|
|
device: &wgpu::Device,
|
|
surface_format: wgpu::TextureFormat,
|
|
video_format: VideoFormat,
|
|
) -> Self {
|
|
let surface_hdr = surface_format.is_wide();
|
|
let video_hdr = matches!(video_format, VideoFormat::P01010le | VideoFormat::P016Le);
|
|
|
|
if surface_hdr && !video_hdr {
|
|
tracing::warn!("Surface texture is HDR but video format is SDR");
|
|
} else if !surface_hdr && video_hdr {
|
|
tracing::warn!("Video format is HDR but surface does not support HDR");
|
|
}
|
|
|
|
let tone_mapping = if surface_hdr && video_hdr {
|
|
ToneMapping::None
|
|
} else if surface_hdr && !video_hdr {
|
|
ToneMapping::InverseOETF
|
|
} else if !surface_hdr && video_hdr {
|
|
ToneMapping::Reinhard
|
|
} else {
|
|
ToneMapping::None
|
|
};
|
|
|
|
let y_texture = device.create_texture(&wgpu::TextureDescriptor {
|
|
label: Some(&format!("{}-y", label)),
|
|
size: wgpu::Extent3d {
|
|
width: size.width,
|
|
height: size.height,
|
|
depth_or_array_layers: 1,
|
|
},
|
|
mip_level_count: 1,
|
|
sample_count: 1,
|
|
dimension: wgpu::TextureDimension::D2,
|
|
format: wgpu::TextureFormat::R16Unorm,
|
|
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
|
view_formats: &[],
|
|
});
|
|
let uv_texture = device.create_texture(&wgpu::TextureDescriptor {
|
|
label: Some(&format!("{}-uv", label)),
|
|
size: wgpu::Extent3d {
|
|
width: size.width / 2,
|
|
height: size.height / 2,
|
|
depth_or_array_layers: 1,
|
|
},
|
|
mip_level_count: 1,
|
|
sample_count: 1,
|
|
dimension: wgpu::TextureDimension::D2,
|
|
format: wgpu::TextureFormat::Rg16Unorm,
|
|
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
|
view_formats: &[],
|
|
});
|
|
VideoTexture {
|
|
y: y_texture,
|
|
uv: uv_texture,
|
|
size,
|
|
surface_format,
|
|
video_format,
|
|
tone_mapping,
|
|
}
|
|
}
|
|
|
|
// This return the surface texture format, not the video pixel format
|
|
pub fn format(&self) -> wgpu::TextureFormat {
|
|
self.surface_format
|
|
}
|
|
|
|
pub fn y_texture(&self) -> wgpu::TextureView {
|
|
self.y.create_view(&wgpu::TextureViewDescriptor::default())
|
|
}
|
|
|
|
pub fn uv_texture(&self) -> wgpu::TextureView {
|
|
self.uv.create_view(&wgpu::TextureViewDescriptor::default())
|
|
}
|
|
|
|
pub fn resize(&self, name: &str, new_size: wgpu::Extent3d, device: &wgpu::Device) -> Self {
|
|
VideoTexture::new(name, new_size, device, self.format(), self.video_format)
|
|
}
|
|
|
|
pub fn pixel_format(&self) -> VideoFormat {
|
|
self.video_format
|
|
}
|
|
|
|
pub fn set_pixel_format(&mut self, format: VideoFormat) {
|
|
self.video_format = format;
|
|
}
|
|
|
|
/// This assumes that the data is laid out correctly for the texture format.
|
|
pub fn write_texture(&self, data: &[u8], queue: &wgpu::Queue) {
|
|
// let (y, u, v) = match self.video_format {
|
|
// VideoFormat::Nv12 | VideoFormat::P01010le | VideoFormat::P016Le => (4, 1, 1),
|
|
// _ => (1, 1),
|
|
// };
|
|
let Self { y, uv, .. } = self;
|
|
let y_size = y.size();
|
|
let uv_size = uv.size();
|
|
|
|
let y_data_size = (y_size.width * y_size.height * 2) as usize;
|
|
let uv_data_size = (y_data_size / 2) as usize; // UV is interleaved
|
|
|
|
queue.write_texture(
|
|
wgpu::TexelCopyTextureInfo {
|
|
texture: y,
|
|
mip_level: 0,
|
|
origin: wgpu::Origin3d::ZERO,
|
|
aspect: wgpu::TextureAspect::All,
|
|
},
|
|
&data[0..y_data_size],
|
|
wgpu::TexelCopyBufferLayout {
|
|
offset: 0,
|
|
bytes_per_row: Some(y_size.width),
|
|
rows_per_image: Some(y_size.height),
|
|
},
|
|
y_size,
|
|
);
|
|
|
|
queue.write_texture(
|
|
wgpu::TexelCopyTextureInfo {
|
|
texture: uv,
|
|
mip_level: 0,
|
|
origin: wgpu::Origin3d::ZERO,
|
|
aspect: wgpu::TextureAspect::All,
|
|
},
|
|
&data[y_data_size..(y_data_size + uv_data_size)],
|
|
wgpu::TexelCopyBufferLayout {
|
|
offset: 0,
|
|
bytes_per_row: Some(uv_size.width),
|
|
rows_per_image: Some(uv_size.height),
|
|
},
|
|
uv_size,
|
|
);
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub struct VideoFrameData {
|
|
id: id::Id,
|
|
texture: VideoTexture,
|
|
bind_group: wgpu::BindGroup,
|
|
conversion_matrix: wgpu::Buffer,
|
|
ready: Arc<AtomicBool>,
|
|
}
|
|
|
|
impl VideoFrameData {
|
|
pub fn is_hdr(&self) -> bool {
|
|
self.texture.format().is_wide()
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub struct VideoPipeline {
|
|
pipeline: wgpu::RenderPipeline,
|
|
bind_group_layout: wgpu::BindGroupLayout,
|
|
sampler: wgpu::Sampler,
|
|
format: wgpu::TextureFormat,
|
|
videos: BTreeMap<id::Id, VideoFrameData>,
|
|
}
|
|
|
|
pub trait WideTextureFormatExt {
|
|
fn is_wide(&self) -> bool;
|
|
}
|
|
|
|
impl WideTextureFormatExt for wgpu::TextureFormat {
|
|
fn is_wide(&self) -> bool {
|
|
matches!(
|
|
self,
|
|
wgpu::TextureFormat::Rgba16Float
|
|
| wgpu::TextureFormat::Rgba32Float
|
|
| wgpu::TextureFormat::Rgb10a2Unorm
|
|
| wgpu::TextureFormat::Rgb10a2Uint
|
|
| wgpu::TextureFormat::P010
|
|
)
|
|
}
|
|
}
|
|
|
|
impl Pipeline for VideoPipeline {
|
|
fn new(device: &wgpu::Device, queue: &wgpu::Queue, format: wgpu::TextureFormat) -> Self
|
|
where
|
|
Self: Sized,
|
|
{
|
|
if format.is_wide() {
|
|
tracing::info!("HDR texture format detected: {:?}", format);
|
|
}
|
|
|
|
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
|
label: Some("iced-video-texture-bind-group-layout"),
|
|
entries: &[
|
|
// y
|
|
wgpu::BindGroupLayoutEntry {
|
|
binding: 0,
|
|
visibility: wgpu::ShaderStages::FRAGMENT,
|
|
ty: wgpu::BindingType::Texture {
|
|
multisampled: false,
|
|
view_dimension: wgpu::TextureViewDimension::D2,
|
|
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
|
},
|
|
count: None,
|
|
},
|
|
// uv
|
|
wgpu::BindGroupLayoutEntry {
|
|
binding: 1,
|
|
visibility: wgpu::ShaderStages::FRAGMENT,
|
|
ty: wgpu::BindingType::Texture {
|
|
multisampled: false,
|
|
view_dimension: wgpu::TextureViewDimension::D2,
|
|
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
|
},
|
|
count: None,
|
|
},
|
|
// sampler
|
|
wgpu::BindGroupLayoutEntry {
|
|
binding: 2,
|
|
visibility: wgpu::ShaderStages::FRAGMENT,
|
|
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
|
count: None,
|
|
},
|
|
// conversion matrix
|
|
wgpu::BindGroupLayoutEntry {
|
|
binding: 3,
|
|
visibility: wgpu::ShaderStages::FRAGMENT,
|
|
ty: wgpu::BindingType::Buffer {
|
|
ty: wgpu::BufferBindingType::Uniform,
|
|
has_dynamic_offset: false,
|
|
min_binding_size: None,
|
|
},
|
|
count: None,
|
|
},
|
|
],
|
|
});
|
|
|
|
let shader_passthrough =
|
|
device.create_shader_module(wgpu::include_wgsl!("shaders/passthrough.wgsl"));
|
|
let render_pipeline_layout =
|
|
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
|
label: Some("iced-video-render-pipeline-layout"),
|
|
bind_group_layouts: &[&bind_group_layout],
|
|
push_constant_ranges: &[],
|
|
});
|
|
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
|
label: Some("iced-video-render-pipeline"),
|
|
layout: Some(&render_pipeline_layout),
|
|
vertex: wgpu::VertexState {
|
|
module: &shader_passthrough,
|
|
entry_point: Some("vs_main"),
|
|
buffers: &[],
|
|
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
|
},
|
|
fragment: Some(wgpu::FragmentState {
|
|
module: &shader_passthrough,
|
|
entry_point: Some("fs_main"),
|
|
targets: &[Some(wgpu::ColorTargetState {
|
|
format,
|
|
blend: Some(wgpu::BlendState::REPLACE),
|
|
write_mask: wgpu::ColorWrites::ALL,
|
|
})],
|
|
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
|
}),
|
|
primitive: wgpu::PrimitiveState::default(),
|
|
depth_stencil: None,
|
|
multisample: wgpu::MultisampleState::default(),
|
|
multiview: None,
|
|
cache: None,
|
|
});
|
|
|
|
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
|
label: Some("iced-video-sampler"),
|
|
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
|
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
|
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
|
mag_filter: wgpu::FilterMode::Linear,
|
|
min_filter: wgpu::FilterMode::Linear,
|
|
mipmap_filter: wgpu::FilterMode::Nearest,
|
|
..Default::default()
|
|
});
|
|
|
|
Self {
|
|
pipeline,
|
|
bind_group_layout,
|
|
sampler,
|
|
format,
|
|
videos: BTreeMap::new(),
|
|
}
|
|
}
|
|
}
|