Files
face-detector/src/gui/app.rs
uttarayan21 ac8f1d01b4
Some checks failed
build / checks-build (push) Has been cancelled
build / codecov (push) Has been cancelled
docs / docs (push) Has been cancelled
build / checks-matrix (push) Has been cancelled
feat(detector): add CUDA support for ONNX face detection
2025-08-28 18:32:00 +05:30

1054 lines
38 KiB
Rust

use iced::{
Alignment, Element, Length, Settings, Task, Theme,
widget::{
Space, button, column, container, image, pick_list, progress_bar, row, scrollable, slider,
text,
},
};
use rfd::FileDialog;
use std::path::PathBuf;
use std::sync::Arc;
use crate::gui::bridge::FaceDetectionBridge;
use ::image::{DynamicImage, ImageFormat, RgbImage};
#[derive(Debug, Clone)]
pub enum Message {
// File operations
OpenImageDialog,
ImageSelected(Option<PathBuf>),
OpenSecondImageDialog,
SecondImageSelected(Option<PathBuf>),
SaveOutputDialog,
OutputPathSelected(Option<PathBuf>),
// Detection parameters
ThresholdChanged(f32),
NmsThresholdChanged(f32),
ExecutorChanged(ExecutorType),
// Actions
DetectFaces,
CompareFaces,
ClearResults,
// Results
DetectionComplete(DetectionResult),
ComparisonComplete(ComparisonResult),
// UI state
TabChanged(Tab),
ProgressUpdate(f32),
// Image loading
ImageLoaded(Option<Arc<Vec<u8>>>),
SecondImageLoaded(Option<Arc<Vec<u8>>>),
ProcessedImageUpdated(Option<Vec<u8>>),
FaceRoisLoaded(Vec<image::Handle>, Vec<image::Handle>),
}
#[derive(Debug, Clone, PartialEq)]
pub enum Tab {
Detection,
Comparison,
Settings,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ExecutorType {
MnnCpu,
#[cfg(feature = "mnn-metal")]
MnnMetal,
#[cfg(feature = "mnn-coreml")]
MnnCoreML,
OnnxCpu,
#[cfg(feature = "ort-cuda")]
OrtCuda,
}
#[derive(Debug, Clone)]
pub enum DetectionResult {
Success {
image_path: PathBuf,
faces_count: usize,
processed_image: Option<Vec<u8>>,
processing_time: f64,
},
Error(String),
}
#[derive(Debug, Clone)]
pub enum ComparisonResult {
Success {
image1_faces: usize,
image2_faces: usize,
image1_face_rois: Vec<ndarray::Array3<u8>>,
image2_face_rois: Vec<ndarray::Array3<u8>>,
best_similarity: f32,
processing_time: f64,
},
Error(String),
}
#[derive(Debug)]
pub struct FaceDetectorApp {
// Current tab
current_tab: Tab,
// File paths
input_image: Option<PathBuf>,
second_image: Option<PathBuf>,
output_path: Option<PathBuf>,
// Detection parameters
threshold: f32,
nms_threshold: f32,
executor_type: ExecutorType,
// UI state
is_processing: bool,
progress: f32,
status_message: String,
// Results
detection_result: Option<DetectionResult>,
comparison_result: Option<ComparisonResult>,
// Image data for display
current_image_handle: Option<image::Handle>,
processed_image_handle: Option<image::Handle>,
second_image_handle: Option<image::Handle>,
// Face ROI handles for comparison display
image1_face_roi_handles: Vec<image::Handle>,
image2_face_roi_handles: Vec<image::Handle>,
}
impl Default for FaceDetectorApp {
fn default() -> Self {
Self {
current_tab: Tab::Detection,
input_image: None,
second_image: None,
output_path: None,
threshold: 0.8,
nms_threshold: 0.3,
#[cfg(not(any(feature = "mnn-metal", feature = "ort-cuda")))]
executor_type: ExecutorType::MnnCpu,
#[cfg(feature = "ort-cuda")]
executor_type: ExecutorType::OrtCuda,
is_processing: false,
progress: 0.0,
status_message: "Ready".to_string(),
detection_result: None,
comparison_result: None,
current_image_handle: None,
processed_image_handle: None,
second_image_handle: None,
image1_face_roi_handles: Vec::new(),
image2_face_roi_handles: Vec::new(),
}
}
}
impl FaceDetectorApp {
fn new() -> (Self, Task<Message>) {
(Self::default(), Task::none())
}
fn title(&self) -> String {
"Face Detector - Rust GUI".to_string()
}
fn update(&mut self, message: Message) -> Task<Message> {
match message {
Message::TabChanged(tab) => {
self.current_tab = tab;
Task::none()
}
Message::OpenImageDialog => {
self.status_message = "Opening file dialog...".to_string();
Task::perform(
async {
FileDialog::new()
.add_filter("Images", &["jpg", "jpeg", "png", "bmp", "tiff", "webp"])
.pick_file()
},
Message::ImageSelected,
)
}
Message::ImageSelected(path) => {
if let Some(path) = path {
self.input_image = Some(path.clone());
self.status_message = format!("Selected: {}", path.display());
// Load image data for display
Task::perform(
async move {
match std::fs::read(&path) {
Ok(data) => Some(Arc::new(data)),
Err(_) => None,
}
},
Message::ImageLoaded,
)
} else {
self.status_message = "No file selected".to_string();
Task::none()
}
}
Message::OpenSecondImageDialog => Task::perform(
async {
FileDialog::new()
.add_filter("Images", &["jpg", "jpeg", "png", "bmp", "tiff", "webp"])
.pick_file()
},
Message::SecondImageSelected,
),
Message::SecondImageSelected(path) => {
if let Some(path) = path {
self.second_image = Some(path.clone());
self.status_message = format!("Second image selected: {}", path.display());
// Load second image data for display
Task::perform(
async move {
match std::fs::read(&path) {
Ok(data) => Some(Arc::new(data)),
Err(_) => None,
}
},
Message::SecondImageLoaded,
)
} else {
self.status_message = "No second image selected".to_string();
Task::none()
}
}
Message::SaveOutputDialog => Task::perform(
async {
FileDialog::new()
.add_filter("Images", &["jpg", "jpeg", "png"])
.save_file()
},
Message::OutputPathSelected,
),
Message::OutputPathSelected(path) => {
if let Some(path) = path {
self.output_path = Some(path.clone());
self.status_message = format!("Output will be saved to: {}", path.display());
} else {
self.status_message = "No output path selected".to_string();
}
Task::none()
}
Message::ThresholdChanged(value) => {
self.threshold = value;
Task::none()
}
Message::NmsThresholdChanged(value) => {
self.nms_threshold = value;
Task::none()
}
Message::ExecutorChanged(executor_type) => {
self.executor_type = executor_type;
Task::none()
}
Message::DetectFaces => {
if let Some(input_path) = &self.input_image {
self.is_processing = true;
self.progress = 0.0;
self.status_message = "Detecting faces...".to_string();
let input_path = input_path.clone();
let output_path = self.output_path.clone();
let threshold = self.threshold;
let nms_threshold = self.nms_threshold;
let executor_type = self.executor_type.clone();
Task::perform(
async move {
FaceDetectionBridge::detect_faces(
input_path,
output_path,
threshold,
nms_threshold,
executor_type,
)
.await
},
Message::DetectionComplete,
)
} else {
self.status_message = "Please select an image first".to_string();
Task::none()
}
}
Message::CompareFaces => {
if let (Some(image1), Some(image2)) = (&self.input_image, &self.second_image) {
self.is_processing = true;
self.progress = 0.0;
self.status_message = "Comparing faces...".to_string();
let image1 = image1.clone();
let image2 = image2.clone();
let threshold = self.threshold;
let nms_threshold = self.nms_threshold;
let executor_type = self.executor_type.clone();
Task::perform(
async move {
FaceDetectionBridge::compare_faces(
image1,
image2,
threshold,
nms_threshold,
executor_type,
)
.await
},
Message::ComparisonComplete,
)
} else {
self.status_message = "Please select both images for comparison".to_string();
Task::none()
}
}
Message::ClearResults => {
self.detection_result = None;
self.comparison_result = None;
self.processed_image_handle = None;
self.image1_face_roi_handles.clear();
self.image2_face_roi_handles.clear();
self.status_message = "Results cleared".to_string();
Task::none()
}
Message::DetectionComplete(result) => {
self.is_processing = false;
self.progress = 100.0;
match &result {
DetectionResult::Success {
faces_count,
processing_time,
processed_image,
..
} => {
self.status_message = format!(
"Detection complete! Found {} faces in {:.2}s",
faces_count, processing_time
);
// Update processed image if available
if let Some(image_data) = processed_image {
self.processed_image_handle =
Some(image::Handle::from_bytes(image_data.clone()));
}
}
DetectionResult::Error(error) => {
self.status_message = format!("Detection failed: {}", error);
}
}
self.detection_result = Some(result);
Task::none()
}
Message::ComparisonComplete(result) => {
self.is_processing = false;
self.progress = 100.0;
match &result {
ComparisonResult::Success {
best_similarity,
processing_time,
image1_face_rois,
image2_face_rois,
..
} => {
let interpretation = if *best_similarity > 0.8 {
"Very likely the same person"
} else if *best_similarity > 0.6 {
"Possibly the same person"
} else if *best_similarity > 0.4 {
"Unlikely to be the same person"
} else {
"Very unlikely to be the same person"
};
self.status_message = format!(
"Comparison complete! Similarity: {:.3} - {} (Processing time: {:.2}s)",
best_similarity, interpretation, processing_time
);
// Convert face ROIs to image handles
let image1_handles = convert_face_rois_to_handles(image1_face_rois.clone());
let image2_handles = convert_face_rois_to_handles(image2_face_rois.clone());
self.comparison_result = Some(result);
return Task::perform(
async move { (image1_handles, image2_handles) },
|(h1, h2)| Message::FaceRoisLoaded(h1, h2),
);
}
ComparisonResult::Error(error) => {
self.status_message = format!("Comparison failed: {}", error);
}
}
self.comparison_result = Some(result);
Task::none()
}
Message::FaceRoisLoaded(image1_handles, image2_handles) => {
self.image1_face_roi_handles = image1_handles;
self.image2_face_roi_handles = image2_handles;
Task::none()
}
Message::ProgressUpdate(progress) => {
self.progress = progress;
Task::none()
}
Message::ImageLoaded(data) => {
if let Some(image_data) = data {
self.current_image_handle =
Some(image::Handle::from_bytes(image_data.as_ref().clone()));
self.status_message = "Image loaded successfully".to_string();
} else {
self.status_message = "Failed to load image".to_string();
}
Task::none()
}
Message::SecondImageLoaded(data) => {
if let Some(image_data) = data {
self.second_image_handle =
Some(image::Handle::from_bytes(image_data.as_ref().clone()));
self.status_message = "Second image loaded successfully".to_string();
} else {
self.status_message = "Failed to load second image".to_string();
}
Task::none()
}
Message::ProcessedImageUpdated(data) => {
if let Some(image_data) = data {
self.processed_image_handle = Some(image::Handle::from_bytes(image_data));
}
Task::none()
}
}
}
fn view(&self) -> Element<'_, Message> {
let tabs = row![
button("Detection")
.on_press(Message::TabChanged(Tab::Detection))
.style(if self.current_tab == Tab::Detection {
button::primary
} else {
button::secondary
}),
button("Comparison")
.on_press(Message::TabChanged(Tab::Comparison))
.style(if self.current_tab == Tab::Comparison {
button::primary
} else {
button::secondary
}),
button("Settings")
.on_press(Message::TabChanged(Tab::Settings))
.style(if self.current_tab == Tab::Settings {
button::primary
} else {
button::secondary
}),
]
.spacing(10)
.padding(10);
let content = match self.current_tab {
Tab::Detection => self.detection_view(),
Tab::Comparison => self.comparison_view(),
Tab::Settings => self.settings_view(),
};
let status_bar = container(
row![
text(&self.status_message),
Space::with_width(Length::Fill),
if self.is_processing {
Element::from(progress_bar(0.0..=100.0, self.progress))
} else {
Space::with_width(Length::Shrink).into()
}
]
.align_y(Alignment::Center)
.spacing(10),
)
.padding(10)
.style(container::bordered_box);
column![tabs, content, status_bar].into()
}
}
impl FaceDetectorApp {
fn detection_view(&self) -> Element<'_, Message> {
let file_section = column![
text("Input Image").size(18),
row![
button("Select Image").on_press(Message::OpenImageDialog),
text(
self.input_image
.as_ref()
.map(|p| p
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string())
.unwrap_or_else(|| "No image selected".to_string())
),
]
.spacing(10)
.align_y(Alignment::Center),
row![
button("Output Path").on_press(Message::SaveOutputDialog),
text(
self.output_path
.as_ref()
.map(|p| p
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string())
.unwrap_or_else(|| "Auto-generate".to_string())
),
]
.spacing(10)
.align_y(Alignment::Center),
]
.spacing(10);
// Image display section
let image_section = if let Some(ref handle) = self.current_image_handle {
let original_image = column![
text("Original Image").size(16),
container(
image(handle.clone())
.width(400)
.height(300)
.content_fit(iced::ContentFit::ScaleDown)
)
.style(container::bordered_box)
.padding(5),
]
.spacing(5)
.align_x(Alignment::Center);
let processed_section = if let Some(ref processed_handle) = self.processed_image_handle
{
column![
text("Detected Faces").size(16),
container(
image(processed_handle.clone())
.width(400)
.height(300)
.content_fit(iced::ContentFit::ScaleDown)
)
.style(container::bordered_box)
.padding(5),
]
.spacing(5)
.align_x(Alignment::Center)
} else {
column![
text("Detected Faces").size(16),
container(
text("Process image to see results").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
})
)
.width(400)
.height(300)
.style(container::bordered_box)
.padding(5)
.center_x(Length::Fill)
.center_y(Length::Fill),
]
.spacing(5)
.align_x(Alignment::Center)
};
row![original_image, processed_section]
.spacing(20)
.align_y(Alignment::Start)
} else {
row![
container(
text("Select an image to display").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
})
)
.width(400)
.height(300)
.style(container::bordered_box)
.padding(5)
.center_x(Length::Fill)
.center_y(Length::Fill)
]
};
let controls = column![
text("Detection Parameters").size(18),
row![
text("Threshold:"),
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged).step(0.01),
text(format!("{:.2}", self.threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
row![
text("NMS Threshold:"),
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged).step(0.01),
text(format!("{:.2}", self.nms_threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
row![
button("Detect Faces")
.on_press(Message::DetectFaces)
.style(button::primary),
button("Clear Results").on_press(Message::ClearResults),
]
.spacing(10),
]
.spacing(10);
let results = if let Some(result) = &self.detection_result {
match result {
DetectionResult::Success {
faces_count,
processing_time,
..
} => column![
text("Detection Results").size(18),
text(format!("Faces detected: {}", faces_count)),
text(format!("Processing time: {:.2}s", processing_time)),
]
.spacing(5),
DetectionResult::Error(error) => column![
text("Detection Results").size(18),
text(format!("Error: {}", error)).style(text::danger),
]
.spacing(5),
}
} else {
column![text("No results yet").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
})]
};
column![file_section, image_section, controls, results]
.spacing(20)
.padding(20)
.into()
}
fn comparison_view(&self) -> Element<'_, Message> {
let file_section = column![
text("Image Comparison").size(18),
row![
button("Select First Image").on_press(Message::OpenImageDialog),
text(
self.input_image
.as_ref()
.map(|p| p
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string())
.unwrap_or_else(|| "No image selected".to_string())
),
]
.spacing(10)
.align_y(Alignment::Center),
row![
button("Select Second Image").on_press(Message::OpenSecondImageDialog),
text(
self.second_image
.as_ref()
.map(|p| p
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string())
.unwrap_or_else(|| "No image selected".to_string())
),
]
.spacing(10)
.align_y(Alignment::Center),
]
.spacing(10);
// Image comparison display section
let comparison_image_section = {
let first_image = if let Some(ref handle) = self.current_image_handle {
column![
text("First Image").size(16),
container(
image(handle.clone())
.width(350)
.height(250)
.content_fit(iced::ContentFit::ScaleDown)
)
.style(container::bordered_box)
.padding(5),
]
.spacing(5)
.align_x(Alignment::Center)
} else {
column![
text("First Image").size(16),
container(text("Select first image").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
}))
.width(350)
.height(250)
.style(container::bordered_box)
.padding(5)
.center_x(Length::Fill)
.center_y(Length::Fill),
]
.spacing(5)
.align_x(Alignment::Center)
};
let second_image = if let Some(ref handle) = self.second_image_handle {
column![
text("Second Image").size(16),
container(
image(handle.clone())
.width(350)
.height(250)
.content_fit(iced::ContentFit::ScaleDown)
)
.style(container::bordered_box)
.padding(5),
]
.spacing(5)
.align_x(Alignment::Center)
} else {
column![
text("Second Image").size(16),
container(text("Select second image").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
}))
.width(350)
.height(250)
.style(container::bordered_box)
.padding(5)
.center_x(Length::Fill)
.center_y(Length::Fill),
]
.spacing(5)
.align_x(Alignment::Center)
};
row![first_image, second_image]
.spacing(20)
.align_y(Alignment::Start)
};
let controls = column![
text("Comparison Parameters").size(18),
row![
text("Threshold:"),
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged).step(0.01),
text(format!("{:.2}", self.threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
row![
text("NMS Threshold:"),
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged).step(0.01),
text(format!("{:.2}", self.nms_threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
button("Compare Faces")
.on_press(Message::CompareFaces)
.style(button::primary),
]
.spacing(10);
let results = if let Some(result) = &self.comparison_result {
match result {
ComparisonResult::Success {
image1_faces,
image2_faces,
image1_face_rois: _,
image2_face_rois: _,
best_similarity,
processing_time,
} => {
let interpretation = if *best_similarity > 0.8 {
(
"Very likely the same person",
iced::Color::from_rgb(0.2, 0.8, 0.2),
)
} else if *best_similarity > 0.6 {
(
"Possibly the same person",
iced::Color::from_rgb(0.8, 0.8, 0.2),
)
} else if *best_similarity > 0.4 {
(
"Unlikely to be the same person",
iced::Color::from_rgb(0.8, 0.6, 0.2),
)
} else {
(
"Very unlikely to be the same person",
iced::Color::from_rgb(0.8, 0.2, 0.2),
)
};
let mut result_column = column![
text("Comparison Results").size(18),
text(format!("First image faces: {}", image1_faces)),
text(format!("Second image faces: {}", image2_faces)),
text(format!("Best similarity: {:.3}", best_similarity)),
text(interpretation.0).style(move |_theme| text::Style {
color: Some(interpretation.1),
}),
text(format!("Processing time: {:.2}s", processing_time)),
]
.spacing(5);
// Add face ROI displays if available
if !self.image1_face_roi_handles.is_empty()
|| !self.image2_face_roi_handles.is_empty()
{
result_column = result_column.push(text("Detected Faces").size(16));
// Create face ROI rows
let image1_faces_row = if !self.image1_face_roi_handles.is_empty() {
let faces: Element<'_, Message> = self
.image1_face_roi_handles
.iter()
.enumerate()
.fold(row![].spacing(5), |row, (i, handle)| {
row.push(
column![
text(format!("Face {}", i + 1)).size(12),
container(
image(handle.clone())
.width(80)
.height(80)
.content_fit(iced::ContentFit::Cover)
)
.style(container::bordered_box)
.padding(2),
]
.spacing(2)
.align_x(Alignment::Center),
)
})
.into();
column![
text("First Image Faces:").size(14),
scrollable(faces).direction(scrollable::Direction::Horizontal(
scrollable::Scrollbar::new()
)),
]
.spacing(5)
} else {
column![text("First Image Faces: None detected").size(14)]
};
let image2_faces_row = if !self.image2_face_roi_handles.is_empty() {
let faces: Element<'_, Message> = self
.image2_face_roi_handles
.iter()
.enumerate()
.fold(row![].spacing(5), |row, (i, handle)| {
row.push(
column![
text(format!("Face {}", i + 1)).size(12),
container(
image(handle.clone())
.width(80)
.height(80)
.content_fit(iced::ContentFit::Cover)
)
.style(container::bordered_box)
.padding(2),
]
.spacing(2)
.align_x(Alignment::Center),
)
})
.into();
column![
text("Second Image Faces:").size(14),
scrollable(faces).direction(scrollable::Direction::Horizontal(
scrollable::Scrollbar::new()
)),
]
.spacing(5)
} else {
column![text("Second Image Faces: None detected").size(14)]
};
result_column = result_column.push(image1_faces_row).push(image2_faces_row);
}
result_column
}
ComparisonResult::Error(error) => column![
text("Comparison Results").size(18),
text(format!("Error: {}", error)).style(text::danger),
]
.spacing(5),
}
} else {
column![
text("No comparison results yet").style(|_theme| text::Style {
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
})
]
};
scrollable(
column![file_section, comparison_image_section, controls, results]
.spacing(20)
.padding(20),
)
.into()
}
fn settings_view(&self) -> Element<'_, Message> {
#[allow(unused_mut)]
let mut executor_options = vec![ExecutorType::MnnCpu, ExecutorType::OnnxCpu];
#[cfg(feature = "mnn-metal")]
executor_options.push(ExecutorType::MnnMetal);
#[cfg(feature = "mnn-coreml")]
executor_options.push(ExecutorType::MnnCoreML);
#[cfg(feature = "ort-cuda")]
executor_options.push(ExecutorType::OrtCuda);
container(
column![
text("Model Settings").size(18),
row![
text("Execution Backend:"),
pick_list(
executor_options,
Some(self.executor_type.clone()),
Message::ExecutorChanged,
),
]
.spacing(10)
.align_y(Alignment::Center),
text("Detection Thresholds").size(18),
row![
text("Detection Threshold:"),
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged).step(0.01),
text(format!("{:.2}", self.threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
row![
text("NMS Threshold:"),
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged).step(0.01),
text(format!("{:.2}", self.nms_threshold)),
]
.spacing(10)
.align_y(Alignment::Center),
text("About").size(18),
text("Face Detection and Embedding - Rust GUI"),
text("Built with iced.rs and your face detection engine"),
]
.spacing(15)
.padding(20),
)
.height(Length::Shrink)
.into()
}
}
impl std::fmt::Display for ExecutorType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ExecutorType::MnnCpu => write!(f, "MNN (CPU)"),
#[cfg(feature = "mnn-metal")]
ExecutorType::MnnMetal => write!(f, "MNN (Metal)"),
#[cfg(feature = "mnn-coreml")]
ExecutorType::MnnCoreML => write!(f, "MNN (CoreML)"),
ExecutorType::OnnxCpu => write!(f, "ONNX (CPU)"),
#[cfg(feature = "ort-cuda")]
ExecutorType::OrtCuda => write!(f, "ONNX (CUDA)"),
}
}
}
// Helper function to convert face ROIs to image handles
fn convert_face_rois_to_handles(face_rois: Vec<ndarray::Array3<u8>>) -> Vec<image::Handle> {
face_rois
.into_iter()
.filter_map(|roi| {
// Convert ndarray to image::RgbImage
let (height, width, _) = roi.dim();
let (raw_data, _offset) = roi.into_raw_vec_and_offset();
if let Some(img) = RgbImage::from_raw(width as u32, height as u32, raw_data) {
// Convert to PNG bytes
let mut buffer = Vec::new();
let mut cursor = std::io::Cursor::new(&mut buffer);
if DynamicImage::ImageRgb8(img)
.write_to(&mut cursor, ImageFormat::Png)
.is_ok()
{
return Some(image::Handle::from_bytes(buffer));
}
}
None
})
.collect()
}
pub fn run() -> iced::Result {
let settings = Settings {
antialiasing: true,
..Default::default()
};
iced::application(
"Face Detector",
FaceDetectorApp::update,
FaceDetectorApp::view,
)
.settings(settings)
.run_with(FaceDetectorApp::new)
}