feat(gui): Added iced gui
This commit is contained in:
891
src/gui/app.rs
Normal file
891
src/gui/app.rs
Normal file
@@ -0,0 +1,891 @@
|
||||
use iced::{
|
||||
Alignment, Element, Length, Task, Theme,
|
||||
widget::{
|
||||
Space, button, column, container, image, pick_list, progress_bar, row, scrollable, slider,
|
||||
text,
|
||||
},
|
||||
};
|
||||
use rfd::FileDialog;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::gui::bridge::FaceDetectionBridge;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Message {
|
||||
// File operations
|
||||
OpenImageDialog,
|
||||
ImageSelected(Option<PathBuf>),
|
||||
OpenSecondImageDialog,
|
||||
SecondImageSelected(Option<PathBuf>),
|
||||
SaveOutputDialog,
|
||||
OutputPathSelected(Option<PathBuf>),
|
||||
|
||||
// Detection parameters
|
||||
ThresholdChanged(f32),
|
||||
NmsThresholdChanged(f32),
|
||||
ExecutorChanged(ExecutorType),
|
||||
|
||||
// Actions
|
||||
DetectFaces,
|
||||
CompareFaces,
|
||||
ClearResults,
|
||||
|
||||
// Results
|
||||
DetectionComplete(DetectionResult),
|
||||
ComparisonComplete(ComparisonResult),
|
||||
|
||||
// UI state
|
||||
TabChanged(Tab),
|
||||
ProgressUpdate(f32),
|
||||
|
||||
// Image loading
|
||||
ImageLoaded(Option<Arc<Vec<u8>>>),
|
||||
SecondImageLoaded(Option<Arc<Vec<u8>>>),
|
||||
ProcessedImageUpdated(Option<Vec<u8>>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Tab {
|
||||
Detection,
|
||||
Comparison,
|
||||
Settings,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ExecutorType {
|
||||
MnnCpu,
|
||||
MnnMetal,
|
||||
MnnCoreML,
|
||||
OnnxCpu,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DetectionResult {
|
||||
Success {
|
||||
image_path: PathBuf,
|
||||
faces_count: usize,
|
||||
processed_image: Option<Vec<u8>>,
|
||||
processing_time: f64,
|
||||
},
|
||||
Error(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ComparisonResult {
|
||||
Success {
|
||||
image1_faces: usize,
|
||||
image2_faces: usize,
|
||||
best_similarity: f32,
|
||||
processing_time: f64,
|
||||
},
|
||||
Error(String),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FaceDetectorApp {
|
||||
// Current tab
|
||||
current_tab: Tab,
|
||||
|
||||
// File paths
|
||||
input_image: Option<PathBuf>,
|
||||
second_image: Option<PathBuf>,
|
||||
output_path: Option<PathBuf>,
|
||||
|
||||
// Detection parameters
|
||||
threshold: f32,
|
||||
nms_threshold: f32,
|
||||
executor_type: ExecutorType,
|
||||
|
||||
// UI state
|
||||
is_processing: bool,
|
||||
progress: f32,
|
||||
status_message: String,
|
||||
|
||||
// Results
|
||||
detection_result: Option<DetectionResult>,
|
||||
comparison_result: Option<ComparisonResult>,
|
||||
|
||||
// Image data for display
|
||||
current_image_handle: Option<image::Handle>,
|
||||
processed_image_handle: Option<image::Handle>,
|
||||
second_image_handle: Option<image::Handle>,
|
||||
}
|
||||
|
||||
impl Default for FaceDetectorApp {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
current_tab: Tab::Detection,
|
||||
input_image: None,
|
||||
second_image: None,
|
||||
output_path: None,
|
||||
threshold: 0.8,
|
||||
nms_threshold: 0.3,
|
||||
executor_type: ExecutorType::MnnCpu,
|
||||
is_processing: false,
|
||||
progress: 0.0,
|
||||
status_message: "Ready".to_string(),
|
||||
detection_result: None,
|
||||
comparison_result: None,
|
||||
current_image_handle: None,
|
||||
processed_image_handle: None,
|
||||
second_image_handle: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FaceDetectorApp {
|
||||
fn new() -> (Self, Task<Message>) {
|
||||
(Self::default(), Task::none())
|
||||
}
|
||||
|
||||
fn title(&self) -> String {
|
||||
"Face Detector - Rust GUI".to_string()
|
||||
}
|
||||
|
||||
fn update(&mut self, message: Message) -> Task<Message> {
|
||||
match message {
|
||||
Message::TabChanged(tab) => {
|
||||
self.current_tab = tab;
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::OpenImageDialog => {
|
||||
self.status_message = "Opening file dialog...".to_string();
|
||||
Task::perform(
|
||||
async {
|
||||
FileDialog::new()
|
||||
.add_filter("Images", &["jpg", "jpeg", "png", "bmp", "tiff", "webp"])
|
||||
.pick_file()
|
||||
},
|
||||
Message::ImageSelected,
|
||||
)
|
||||
}
|
||||
|
||||
Message::ImageSelected(path) => {
|
||||
if let Some(path) = path {
|
||||
self.input_image = Some(path.clone());
|
||||
self.status_message = format!("Selected: {}", path.display());
|
||||
|
||||
// Load image data for display
|
||||
Task::perform(
|
||||
async move {
|
||||
match std::fs::read(&path) {
|
||||
Ok(data) => Some(Arc::new(data)),
|
||||
Err(_) => None,
|
||||
}
|
||||
},
|
||||
Message::ImageLoaded,
|
||||
)
|
||||
} else {
|
||||
self.status_message = "No file selected".to_string();
|
||||
Task::none()
|
||||
}
|
||||
}
|
||||
|
||||
Message::OpenSecondImageDialog => Task::perform(
|
||||
async {
|
||||
FileDialog::new()
|
||||
.add_filter("Images", &["jpg", "jpeg", "png", "bmp", "tiff", "webp"])
|
||||
.pick_file()
|
||||
},
|
||||
Message::SecondImageSelected,
|
||||
),
|
||||
|
||||
Message::SecondImageSelected(path) => {
|
||||
if let Some(path) = path {
|
||||
self.second_image = Some(path.clone());
|
||||
self.status_message = format!("Second image selected: {}", path.display());
|
||||
|
||||
// Load second image data for display
|
||||
Task::perform(
|
||||
async move {
|
||||
match std::fs::read(&path) {
|
||||
Ok(data) => Some(Arc::new(data)),
|
||||
Err(_) => None,
|
||||
}
|
||||
},
|
||||
Message::SecondImageLoaded,
|
||||
)
|
||||
} else {
|
||||
self.status_message = "No second image selected".to_string();
|
||||
Task::none()
|
||||
}
|
||||
}
|
||||
|
||||
Message::SaveOutputDialog => Task::perform(
|
||||
async {
|
||||
FileDialog::new()
|
||||
.add_filter("Images", &["jpg", "jpeg", "png"])
|
||||
.save_file()
|
||||
},
|
||||
Message::OutputPathSelected,
|
||||
),
|
||||
|
||||
Message::OutputPathSelected(path) => {
|
||||
if let Some(path) = path {
|
||||
self.output_path = Some(path.clone());
|
||||
self.status_message = format!("Output will be saved to: {}", path.display());
|
||||
} else {
|
||||
self.status_message = "No output path selected".to_string();
|
||||
}
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ThresholdChanged(value) => {
|
||||
self.threshold = value;
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::NmsThresholdChanged(value) => {
|
||||
self.nms_threshold = value;
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ExecutorChanged(executor_type) => {
|
||||
self.executor_type = executor_type;
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::DetectFaces => {
|
||||
if let Some(input_path) = &self.input_image {
|
||||
self.is_processing = true;
|
||||
self.progress = 0.0;
|
||||
self.status_message = "Detecting faces...".to_string();
|
||||
|
||||
let input_path = input_path.clone();
|
||||
let output_path = self.output_path.clone();
|
||||
let threshold = self.threshold;
|
||||
let nms_threshold = self.nms_threshold;
|
||||
let executor_type = self.executor_type.clone();
|
||||
|
||||
Task::perform(
|
||||
async move {
|
||||
FaceDetectionBridge::detect_faces(
|
||||
input_path,
|
||||
output_path,
|
||||
threshold,
|
||||
nms_threshold,
|
||||
executor_type,
|
||||
)
|
||||
.await
|
||||
},
|
||||
Message::DetectionComplete,
|
||||
)
|
||||
} else {
|
||||
self.status_message = "Please select an image first".to_string();
|
||||
Task::none()
|
||||
}
|
||||
}
|
||||
|
||||
Message::CompareFaces => {
|
||||
if let (Some(image1), Some(image2)) = (&self.input_image, &self.second_image) {
|
||||
self.is_processing = true;
|
||||
self.progress = 0.0;
|
||||
self.status_message = "Comparing faces...".to_string();
|
||||
|
||||
let image1 = image1.clone();
|
||||
let image2 = image2.clone();
|
||||
let threshold = self.threshold;
|
||||
let nms_threshold = self.nms_threshold;
|
||||
let executor_type = self.executor_type.clone();
|
||||
|
||||
Task::perform(
|
||||
async move {
|
||||
FaceDetectionBridge::compare_faces(
|
||||
image1,
|
||||
image2,
|
||||
threshold,
|
||||
nms_threshold,
|
||||
executor_type,
|
||||
)
|
||||
.await
|
||||
},
|
||||
Message::ComparisonComplete,
|
||||
)
|
||||
} else {
|
||||
self.status_message = "Please select both images for comparison".to_string();
|
||||
Task::none()
|
||||
}
|
||||
}
|
||||
|
||||
Message::ClearResults => {
|
||||
self.detection_result = None;
|
||||
self.comparison_result = None;
|
||||
self.processed_image_handle = None;
|
||||
self.status_message = "Results cleared".to_string();
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::DetectionComplete(result) => {
|
||||
self.is_processing = false;
|
||||
self.progress = 100.0;
|
||||
|
||||
match &result {
|
||||
DetectionResult::Success {
|
||||
faces_count,
|
||||
processing_time,
|
||||
processed_image,
|
||||
..
|
||||
} => {
|
||||
self.status_message = format!(
|
||||
"Detection complete! Found {} faces in {:.2}s",
|
||||
faces_count, processing_time
|
||||
);
|
||||
|
||||
// Update processed image if available
|
||||
if let Some(image_data) = processed_image {
|
||||
self.processed_image_handle =
|
||||
Some(image::Handle::from_bytes(image_data.clone()));
|
||||
}
|
||||
}
|
||||
DetectionResult::Error(error) => {
|
||||
self.status_message = format!("Detection failed: {}", error);
|
||||
}
|
||||
}
|
||||
|
||||
self.detection_result = Some(result);
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ComparisonComplete(result) => {
|
||||
self.is_processing = false;
|
||||
self.progress = 100.0;
|
||||
|
||||
match &result {
|
||||
ComparisonResult::Success {
|
||||
best_similarity,
|
||||
processing_time,
|
||||
..
|
||||
} => {
|
||||
let interpretation = if *best_similarity > 0.8 {
|
||||
"Very likely the same person"
|
||||
} else if *best_similarity > 0.6 {
|
||||
"Possibly the same person"
|
||||
} else if *best_similarity > 0.4 {
|
||||
"Unlikely to be the same person"
|
||||
} else {
|
||||
"Very unlikely to be the same person"
|
||||
};
|
||||
|
||||
self.status_message = format!(
|
||||
"Comparison complete! Similarity: {:.3} - {} (Processing time: {:.2}s)",
|
||||
best_similarity, interpretation, processing_time
|
||||
);
|
||||
}
|
||||
ComparisonResult::Error(error) => {
|
||||
self.status_message = format!("Comparison failed: {}", error);
|
||||
}
|
||||
}
|
||||
|
||||
self.comparison_result = Some(result);
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ProgressUpdate(progress) => {
|
||||
self.progress = progress;
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ImageLoaded(data) => {
|
||||
if let Some(image_data) = data {
|
||||
self.current_image_handle =
|
||||
Some(image::Handle::from_bytes(image_data.as_ref().clone()));
|
||||
self.status_message = "Image loaded successfully".to_string();
|
||||
} else {
|
||||
self.status_message = "Failed to load image".to_string();
|
||||
}
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::SecondImageLoaded(data) => {
|
||||
if let Some(image_data) = data {
|
||||
self.second_image_handle =
|
||||
Some(image::Handle::from_bytes(image_data.as_ref().clone()));
|
||||
self.status_message = "Second image loaded successfully".to_string();
|
||||
} else {
|
||||
self.status_message = "Failed to load second image".to_string();
|
||||
}
|
||||
Task::none()
|
||||
}
|
||||
|
||||
Message::ProcessedImageUpdated(data) => {
|
||||
if let Some(image_data) = data {
|
||||
self.processed_image_handle = Some(image::Handle::from_bytes(image_data));
|
||||
}
|
||||
Task::none()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn view(&self) -> Element<Message> {
|
||||
let tabs = row![
|
||||
button("Detection")
|
||||
.on_press(Message::TabChanged(Tab::Detection))
|
||||
.style(if self.current_tab == Tab::Detection {
|
||||
button::primary
|
||||
} else {
|
||||
button::secondary
|
||||
}),
|
||||
button("Comparison")
|
||||
.on_press(Message::TabChanged(Tab::Comparison))
|
||||
.style(if self.current_tab == Tab::Comparison {
|
||||
button::primary
|
||||
} else {
|
||||
button::secondary
|
||||
}),
|
||||
button("Settings")
|
||||
.on_press(Message::TabChanged(Tab::Settings))
|
||||
.style(if self.current_tab == Tab::Settings {
|
||||
button::primary
|
||||
} else {
|
||||
button::secondary
|
||||
}),
|
||||
]
|
||||
.spacing(10)
|
||||
.padding(10);
|
||||
|
||||
let content = match self.current_tab {
|
||||
Tab::Detection => self.detection_view(),
|
||||
Tab::Comparison => self.comparison_view(),
|
||||
Tab::Settings => self.settings_view(),
|
||||
};
|
||||
|
||||
let status_bar = container(
|
||||
row![
|
||||
text(&self.status_message),
|
||||
Space::with_width(Length::Fill),
|
||||
if self.is_processing {
|
||||
Element::from(progress_bar(0.0..=100.0, self.progress))
|
||||
} else {
|
||||
Space::with_width(Length::Shrink).into()
|
||||
}
|
||||
]
|
||||
.align_y(Alignment::Center)
|
||||
.spacing(10),
|
||||
)
|
||||
.padding(10)
|
||||
.style(container::bordered_box);
|
||||
|
||||
column![tabs, content, status_bar].into()
|
||||
}
|
||||
}
|
||||
|
||||
impl FaceDetectorApp {
|
||||
fn detection_view(&self) -> Element<Message> {
|
||||
let file_section = column![
|
||||
text("Input Image").size(18),
|
||||
row![
|
||||
button("Select Image").on_press(Message::OpenImageDialog),
|
||||
text(
|
||||
self.input_image
|
||||
.as_ref()
|
||||
.map(|p| p
|
||||
.file_name()
|
||||
.unwrap_or_default()
|
||||
.to_string_lossy()
|
||||
.to_string())
|
||||
.unwrap_or_else(|| "No image selected".to_string())
|
||||
),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
button("Output Path").on_press(Message::SaveOutputDialog),
|
||||
text(
|
||||
self.output_path
|
||||
.as_ref()
|
||||
.map(|p| p
|
||||
.file_name()
|
||||
.unwrap_or_default()
|
||||
.to_string_lossy()
|
||||
.to_string())
|
||||
.unwrap_or_else(|| "Auto-generate".to_string())
|
||||
),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
]
|
||||
.spacing(10);
|
||||
|
||||
// Image display section
|
||||
let image_section = if let Some(ref handle) = self.current_image_handle {
|
||||
let original_image = column![
|
||||
text("Original Image").size(16),
|
||||
container(
|
||||
image(handle.clone())
|
||||
.width(400)
|
||||
.height(300)
|
||||
.content_fit(iced::ContentFit::ScaleDown)
|
||||
)
|
||||
.style(container::bordered_box)
|
||||
.padding(5),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center);
|
||||
|
||||
let processed_section = if let Some(ref processed_handle) = self.processed_image_handle
|
||||
{
|
||||
column![
|
||||
text("Detected Faces").size(16),
|
||||
container(
|
||||
image(processed_handle.clone())
|
||||
.width(400)
|
||||
.height(300)
|
||||
.content_fit(iced::ContentFit::ScaleDown)
|
||||
)
|
||||
.style(container::bordered_box)
|
||||
.padding(5),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
} else {
|
||||
column![
|
||||
text("Detected Faces").size(16),
|
||||
container(
|
||||
text("Process image to see results").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
})
|
||||
)
|
||||
.width(400)
|
||||
.height(300)
|
||||
.style(container::bordered_box)
|
||||
.padding(5)
|
||||
.center_x(Length::Fill)
|
||||
.center_y(Length::Fill),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
};
|
||||
|
||||
row![original_image, processed_section]
|
||||
.spacing(20)
|
||||
.align_y(Alignment::Start)
|
||||
} else {
|
||||
row![
|
||||
container(
|
||||
text("Select an image to display").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
})
|
||||
)
|
||||
.width(400)
|
||||
.height(300)
|
||||
.style(container::bordered_box)
|
||||
.padding(5)
|
||||
.center_x(Length::Fill)
|
||||
.center_y(Length::Fill)
|
||||
]
|
||||
};
|
||||
|
||||
let controls = column![
|
||||
text("Detection Parameters").size(18),
|
||||
row![
|
||||
text("Threshold:"),
|
||||
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged),
|
||||
text(format!("{:.2}", self.threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
text("NMS Threshold:"),
|
||||
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged),
|
||||
text(format!("{:.2}", self.nms_threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
button("Detect Faces")
|
||||
.on_press(Message::DetectFaces)
|
||||
.style(button::primary),
|
||||
button("Clear Results").on_press(Message::ClearResults),
|
||||
]
|
||||
.spacing(10),
|
||||
]
|
||||
.spacing(10);
|
||||
|
||||
let results = if let Some(result) = &self.detection_result {
|
||||
match result {
|
||||
DetectionResult::Success {
|
||||
faces_count,
|
||||
processing_time,
|
||||
..
|
||||
} => column![
|
||||
text("Detection Results").size(18),
|
||||
text(format!("Faces detected: {}", faces_count)),
|
||||
text(format!("Processing time: {:.2}s", processing_time)),
|
||||
]
|
||||
.spacing(5),
|
||||
DetectionResult::Error(error) => column![
|
||||
text("Detection Results").size(18),
|
||||
text(format!("Error: {}", error)).style(text::danger),
|
||||
]
|
||||
.spacing(5),
|
||||
}
|
||||
} else {
|
||||
column![text("No results yet").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
})]
|
||||
};
|
||||
|
||||
column![file_section, image_section, controls, results]
|
||||
.spacing(20)
|
||||
.padding(20)
|
||||
.into()
|
||||
}
|
||||
|
||||
fn comparison_view(&self) -> Element<Message> {
|
||||
let file_section = column![
|
||||
text("Image Comparison").size(18),
|
||||
row![
|
||||
button("Select First Image").on_press(Message::OpenImageDialog),
|
||||
text(
|
||||
self.input_image
|
||||
.as_ref()
|
||||
.map(|p| p
|
||||
.file_name()
|
||||
.unwrap_or_default()
|
||||
.to_string_lossy()
|
||||
.to_string())
|
||||
.unwrap_or_else(|| "No image selected".to_string())
|
||||
),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
button("Select Second Image").on_press(Message::OpenSecondImageDialog),
|
||||
text(
|
||||
self.second_image
|
||||
.as_ref()
|
||||
.map(|p| p
|
||||
.file_name()
|
||||
.unwrap_or_default()
|
||||
.to_string_lossy()
|
||||
.to_string())
|
||||
.unwrap_or_else(|| "No image selected".to_string())
|
||||
),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
]
|
||||
.spacing(10);
|
||||
|
||||
// Image comparison display section
|
||||
let comparison_image_section = {
|
||||
let first_image = if let Some(ref handle) = self.current_image_handle {
|
||||
column![
|
||||
text("First Image").size(16),
|
||||
container(
|
||||
image(handle.clone())
|
||||
.width(350)
|
||||
.height(250)
|
||||
.content_fit(iced::ContentFit::ScaleDown)
|
||||
)
|
||||
.style(container::bordered_box)
|
||||
.padding(5),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
} else {
|
||||
column![
|
||||
text("First Image").size(16),
|
||||
container(text("Select first image").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
}))
|
||||
.width(350)
|
||||
.height(250)
|
||||
.style(container::bordered_box)
|
||||
.padding(5)
|
||||
.center_x(Length::Fill)
|
||||
.center_y(Length::Fill),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
};
|
||||
|
||||
let second_image = if let Some(ref handle) = self.second_image_handle {
|
||||
column![
|
||||
text("Second Image").size(16),
|
||||
container(
|
||||
image(handle.clone())
|
||||
.width(350)
|
||||
.height(250)
|
||||
.content_fit(iced::ContentFit::ScaleDown)
|
||||
)
|
||||
.style(container::bordered_box)
|
||||
.padding(5),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
} else {
|
||||
column![
|
||||
text("Second Image").size(16),
|
||||
container(text("Select second image").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
}))
|
||||
.width(350)
|
||||
.height(250)
|
||||
.style(container::bordered_box)
|
||||
.padding(5)
|
||||
.center_x(Length::Fill)
|
||||
.center_y(Length::Fill),
|
||||
]
|
||||
.spacing(5)
|
||||
.align_x(Alignment::Center)
|
||||
};
|
||||
|
||||
row![first_image, second_image]
|
||||
.spacing(20)
|
||||
.align_y(Alignment::Start)
|
||||
};
|
||||
|
||||
let controls = column![
|
||||
text("Comparison Parameters").size(18),
|
||||
row![
|
||||
text("Threshold:"),
|
||||
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged),
|
||||
text(format!("{:.2}", self.threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
text("NMS Threshold:"),
|
||||
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged),
|
||||
text(format!("{:.2}", self.nms_threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
button("Compare Faces")
|
||||
.on_press(Message::CompareFaces)
|
||||
.style(button::primary),
|
||||
]
|
||||
.spacing(10);
|
||||
|
||||
let results = if let Some(result) = &self.comparison_result {
|
||||
match result {
|
||||
ComparisonResult::Success {
|
||||
image1_faces,
|
||||
image2_faces,
|
||||
best_similarity,
|
||||
processing_time,
|
||||
} => {
|
||||
let interpretation = if *best_similarity > 0.8 {
|
||||
(
|
||||
"Very likely the same person",
|
||||
iced::Color::from_rgb(0.2, 0.8, 0.2),
|
||||
)
|
||||
} else if *best_similarity > 0.6 {
|
||||
(
|
||||
"Possibly the same person",
|
||||
iced::Color::from_rgb(0.8, 0.8, 0.2),
|
||||
)
|
||||
} else if *best_similarity > 0.4 {
|
||||
(
|
||||
"Unlikely to be the same person",
|
||||
iced::Color::from_rgb(0.8, 0.6, 0.2),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
"Very unlikely to be the same person",
|
||||
iced::Color::from_rgb(0.8, 0.2, 0.2),
|
||||
)
|
||||
};
|
||||
|
||||
column![
|
||||
text("Comparison Results").size(18),
|
||||
text(format!("First image faces: {}", image1_faces)),
|
||||
text(format!("Second image faces: {}", image2_faces)),
|
||||
text(format!("Best similarity: {:.3}", best_similarity)),
|
||||
text(interpretation.0).style(move |_theme| text::Style {
|
||||
color: Some(interpretation.1),
|
||||
}),
|
||||
text(format!("Processing time: {:.2}s", processing_time)),
|
||||
]
|
||||
.spacing(5)
|
||||
}
|
||||
ComparisonResult::Error(error) => column![
|
||||
text("Comparison Results").size(18),
|
||||
text(format!("Error: {}", error)).style(text::danger),
|
||||
]
|
||||
.spacing(5),
|
||||
}
|
||||
} else {
|
||||
column![
|
||||
text("No comparison results yet").style(|_theme| text::Style {
|
||||
color: Some(iced::Color::from_rgb(0.6, 0.6, 0.6)),
|
||||
})
|
||||
]
|
||||
};
|
||||
|
||||
column![file_section, comparison_image_section, controls, results]
|
||||
.spacing(20)
|
||||
.padding(20)
|
||||
.into()
|
||||
}
|
||||
|
||||
fn settings_view(&self) -> Element<Message> {
|
||||
let executor_options = vec![
|
||||
ExecutorType::MnnCpu,
|
||||
ExecutorType::MnnMetal,
|
||||
ExecutorType::MnnCoreML,
|
||||
ExecutorType::OnnxCpu,
|
||||
];
|
||||
|
||||
container(
|
||||
column![
|
||||
text("Model Settings").size(18),
|
||||
row![
|
||||
text("Execution Backend:"),
|
||||
pick_list(
|
||||
executor_options,
|
||||
Some(self.executor_type.clone()),
|
||||
Message::ExecutorChanged,
|
||||
),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
text("Detection Thresholds").size(18),
|
||||
row![
|
||||
text("Detection Threshold:"),
|
||||
slider(0.1..=1.0, self.threshold, Message::ThresholdChanged),
|
||||
text(format!("{:.2}", self.threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
row![
|
||||
text("NMS Threshold:"),
|
||||
slider(0.1..=1.0, self.nms_threshold, Message::NmsThresholdChanged),
|
||||
text(format!("{:.2}", self.nms_threshold)),
|
||||
]
|
||||
.spacing(10)
|
||||
.align_y(Alignment::Center),
|
||||
text("About").size(18),
|
||||
text("Face Detection and Embedding - Rust GUI"),
|
||||
text("Built with iced.rs and your face detection engine"),
|
||||
]
|
||||
.spacing(15)
|
||||
.padding(20),
|
||||
)
|
||||
.height(Length::Shrink)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ExecutorType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ExecutorType::MnnCpu => write!(f, "MNN (CPU)"),
|
||||
ExecutorType::MnnMetal => write!(f, "MNN (Metal)"),
|
||||
ExecutorType::MnnCoreML => write!(f, "MNN (CoreML)"),
|
||||
ExecutorType::OnnxCpu => write!(f, "ONNX (CPU)"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run() -> iced::Result {
|
||||
iced::application(
|
||||
"Face Detector",
|
||||
FaceDetectorApp::update,
|
||||
FaceDetectorApp::view,
|
||||
)
|
||||
.run_with(FaceDetectorApp::new)
|
||||
}
|
||||
368
src/gui/bridge.rs
Normal file
368
src/gui/bridge.rs
Normal file
@@ -0,0 +1,368 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::facedet::{FaceDetectionConfig, FaceDetector, retinaface};
|
||||
use crate::faceembed::facenet;
|
||||
use crate::gui::app::{ComparisonResult, DetectionResult, ExecutorType};
|
||||
use ndarray_image::ImageToNdarray;
|
||||
|
||||
const RETINAFACE_MODEL_MNN: &[u8] = include_bytes!("../../models/retinaface.mnn");
|
||||
const FACENET_MODEL_MNN: &[u8] = include_bytes!("../../models/facenet.mnn");
|
||||
const RETINAFACE_MODEL_ONNX: &[u8] = include_bytes!("../../models/retinaface.onnx");
|
||||
const FACENET_MODEL_ONNX: &[u8] = include_bytes!("../../models/facenet.onnx");
|
||||
|
||||
pub struct FaceDetectionBridge;
|
||||
|
||||
impl FaceDetectionBridge {
|
||||
pub async fn detect_faces(
|
||||
image_path: PathBuf,
|
||||
output_path: Option<PathBuf>,
|
||||
threshold: f32,
|
||||
nms_threshold: f32,
|
||||
executor_type: ExecutorType,
|
||||
) -> DetectionResult {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
match Self::run_detection_internal(
|
||||
image_path.clone(),
|
||||
output_path,
|
||||
threshold,
|
||||
nms_threshold,
|
||||
executor_type,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((faces_count, processed_image)) => {
|
||||
let processing_time = start_time.elapsed().as_secs_f64();
|
||||
DetectionResult::Success {
|
||||
image_path,
|
||||
faces_count,
|
||||
processed_image,
|
||||
processing_time,
|
||||
}
|
||||
}
|
||||
Err(error) => DetectionResult::Error(error.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn compare_faces(
|
||||
image1_path: PathBuf,
|
||||
image2_path: PathBuf,
|
||||
threshold: f32,
|
||||
nms_threshold: f32,
|
||||
executor_type: ExecutorType,
|
||||
) -> ComparisonResult {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
match Self::run_comparison_internal(
|
||||
image1_path,
|
||||
image2_path,
|
||||
threshold,
|
||||
nms_threshold,
|
||||
executor_type,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((image1_faces, image2_faces, best_similarity)) => {
|
||||
let processing_time = start_time.elapsed().as_secs_f64();
|
||||
ComparisonResult::Success {
|
||||
image1_faces,
|
||||
image2_faces,
|
||||
best_similarity,
|
||||
processing_time,
|
||||
}
|
||||
}
|
||||
Err(error) => ComparisonResult::Error(error.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_detection_internal(
|
||||
image_path: PathBuf,
|
||||
output_path: Option<PathBuf>,
|
||||
threshold: f32,
|
||||
nms_threshold: f32,
|
||||
executor_type: ExecutorType,
|
||||
) -> Result<(usize, Option<Vec<u8>>), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Load the image
|
||||
let img = image::open(&image_path)?;
|
||||
let img_rgb = img.to_rgb8();
|
||||
let (width, height) = img_rgb.dimensions();
|
||||
|
||||
// Convert to ndarray format
|
||||
let image_array = img_rgb.as_ndarray()?;
|
||||
|
||||
// Create detection configuration
|
||||
let config = FaceDetectionConfig::default()
|
||||
.with_threshold(threshold)
|
||||
.with_nms_threshold(nms_threshold)
|
||||
.with_input_width(width as usize)
|
||||
.with_input_height(height as usize);
|
||||
|
||||
// Create detector and detect faces
|
||||
let faces = match executor_type {
|
||||
ExecutorType::MnnCpu | ExecutorType::MnnMetal | ExecutorType::MnnCoreML => {
|
||||
let forward_type = match executor_type {
|
||||
ExecutorType::MnnCpu => mnn::ForwardType::CPU,
|
||||
ExecutorType::MnnMetal => mnn::ForwardType::Metal,
|
||||
ExecutorType::MnnCoreML => mnn::ForwardType::CoreML,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut detector = retinaface::mnn::FaceDetection::builder(RETINAFACE_MODEL_MNN)
|
||||
.map_err(|e| format!("Failed to create MNN detector: {}", e))?
|
||||
.with_forward_type(forward_type)
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build MNN detector: {}", e))?;
|
||||
|
||||
detector
|
||||
.detect_faces(image_array.view(), &config)
|
||||
.map_err(|e| format!("Detection failed: {}", e))?
|
||||
}
|
||||
ExecutorType::OnnxCpu => {
|
||||
let mut detector = retinaface::ort::FaceDetection::builder(RETINAFACE_MODEL_ONNX)
|
||||
.map_err(|e| format!("Failed to create ONNX detector: {}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build ONNX detector: {}", e))?;
|
||||
|
||||
detector
|
||||
.detect_faces(image_array.view(), &config)
|
||||
.map_err(|e| format!("Detection failed: {}", e))?
|
||||
}
|
||||
};
|
||||
|
||||
let faces_count = faces.bbox.len();
|
||||
|
||||
// Generate output image with bounding boxes if requested
|
||||
let processed_image = if output_path.is_some() || true {
|
||||
// Always generate for GUI display
|
||||
let mut output_img = img.to_rgb8();
|
||||
|
||||
for bbox in &faces.bbox {
|
||||
let min_point = bbox.min_vertex();
|
||||
let size = bbox.size();
|
||||
let rect = imageproc::rect::Rect::at(min_point.x as i32, min_point.y as i32)
|
||||
.of_size(size.x as u32, size.y as u32);
|
||||
imageproc::drawing::draw_hollow_rect_mut(
|
||||
&mut output_img,
|
||||
rect,
|
||||
image::Rgb([255, 0, 0]),
|
||||
);
|
||||
}
|
||||
|
||||
// Convert to bytes for GUI display
|
||||
let mut buffer = Vec::new();
|
||||
let mut cursor = std::io::Cursor::new(&mut buffer);
|
||||
image::DynamicImage::ImageRgb8(output_img.clone())
|
||||
.write_to(&mut cursor, image::ImageFormat::Png)?;
|
||||
|
||||
// Save to file if output path is specified
|
||||
if let Some(ref output_path) = output_path {
|
||||
output_img.save(output_path)?;
|
||||
}
|
||||
|
||||
Some(buffer)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok((faces_count, processed_image))
|
||||
}
|
||||
|
||||
async fn run_comparison_internal(
|
||||
image1_path: PathBuf,
|
||||
image2_path: PathBuf,
|
||||
threshold: f32,
|
||||
nms_threshold: f32,
|
||||
executor_type: ExecutorType,
|
||||
) -> Result<(usize, usize, f32), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Load both images
|
||||
let img1 = image::open(&image1_path)?.to_rgb8();
|
||||
let img2 = image::open(&image2_path)?.to_rgb8();
|
||||
|
||||
// Convert to ndarray format
|
||||
let image1_array = img1.as_ndarray()?;
|
||||
let image2_array = img2.as_ndarray()?;
|
||||
|
||||
// Create detection configuration
|
||||
let config1 = FaceDetectionConfig::default()
|
||||
.with_threshold(threshold)
|
||||
.with_nms_threshold(nms_threshold)
|
||||
.with_input_width(img1.width() as usize)
|
||||
.with_input_height(img1.height() as usize);
|
||||
|
||||
let config2 = FaceDetectionConfig::default()
|
||||
.with_threshold(threshold)
|
||||
.with_nms_threshold(nms_threshold)
|
||||
.with_input_width(img2.width() as usize)
|
||||
.with_input_height(img2.height() as usize);
|
||||
|
||||
// Create detector and embedder, detect faces and generate embeddings
|
||||
let (faces1, faces2, best_similarity) = match executor_type {
|
||||
ExecutorType::MnnCpu | ExecutorType::MnnMetal | ExecutorType::MnnCoreML => {
|
||||
let forward_type = match executor_type {
|
||||
ExecutorType::MnnCpu => mnn::ForwardType::CPU,
|
||||
ExecutorType::MnnMetal => mnn::ForwardType::Metal,
|
||||
ExecutorType::MnnCoreML => mnn::ForwardType::CoreML,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut detector = retinaface::mnn::FaceDetection::builder(RETINAFACE_MODEL_MNN)
|
||||
.map_err(|e| format!("Failed to create MNN detector: {}", e))?
|
||||
.with_forward_type(forward_type.clone())
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build MNN detector: {}", e))?;
|
||||
|
||||
let embedder = facenet::mnn::EmbeddingGenerator::builder(FACENET_MODEL_MNN)
|
||||
.map_err(|e| format!("Failed to create MNN embedder: {}", e))?
|
||||
.with_forward_type(forward_type)
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build MNN embedder: {}", e))?;
|
||||
|
||||
// Detect faces in both images
|
||||
let faces1 = detector
|
||||
.detect_faces(image1_array.view(), &config1)
|
||||
.map_err(|e| format!("Detection failed for image 1: {}", e))?;
|
||||
let faces2 = detector
|
||||
.detect_faces(image2_array.view(), &config2)
|
||||
.map_err(|e| format!("Detection failed for image 2: {}", e))?;
|
||||
|
||||
// Extract face crops and generate embeddings
|
||||
let mut best_similarity = 0.0f32;
|
||||
|
||||
for bbox1 in &faces1.bbox {
|
||||
let crop1 = Self::crop_face_from_image(&img1, bbox1)?;
|
||||
let crop1_array = ndarray::Array::from_shape_vec(
|
||||
(1, crop1.height() as usize, crop1.width() as usize, 3),
|
||||
crop1
|
||||
.pixels()
|
||||
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
|
||||
.collect(),
|
||||
)?;
|
||||
|
||||
let embedding1 = embedder
|
||||
.run_models(crop1_array.view())
|
||||
.map_err(|e| format!("Embedding generation failed: {}", e))?;
|
||||
|
||||
for bbox2 in &faces2.bbox {
|
||||
let crop2 = Self::crop_face_from_image(&img2, bbox2)?;
|
||||
let crop2_array = ndarray::Array::from_shape_vec(
|
||||
(1, crop2.height() as usize, crop2.width() as usize, 3),
|
||||
crop2
|
||||
.pixels()
|
||||
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
|
||||
.collect(),
|
||||
)?;
|
||||
|
||||
let embedding2 = embedder
|
||||
.run_models(crop2_array.view())
|
||||
.map_err(|e| format!("Embedding generation failed: {}", e))?;
|
||||
|
||||
let similarity = Self::cosine_similarity(
|
||||
embedding1.row(0).as_slice().unwrap(),
|
||||
embedding2.row(0).as_slice().unwrap(),
|
||||
);
|
||||
best_similarity = best_similarity.max(similarity);
|
||||
}
|
||||
}
|
||||
|
||||
(faces1, faces2, best_similarity)
|
||||
}
|
||||
ExecutorType::OnnxCpu => {
|
||||
let mut detector = retinaface::ort::FaceDetection::builder(RETINAFACE_MODEL_ONNX)
|
||||
.map_err(|e| format!("Failed to create ONNX detector: {}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build ONNX detector: {}", e))?;
|
||||
|
||||
let mut embedder = facenet::ort::EmbeddingGenerator::builder(FACENET_MODEL_ONNX)
|
||||
.map_err(|e| format!("Failed to create ONNX embedder: {}", e))?
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build ONNX embedder: {}", e))?;
|
||||
|
||||
// Detect faces in both images
|
||||
let faces1 = detector
|
||||
.detect_faces(image1_array.view(), &config1)
|
||||
.map_err(|e| format!("Detection failed for image 1: {}", e))?;
|
||||
let faces2 = detector
|
||||
.detect_faces(image2_array.view(), &config2)
|
||||
.map_err(|e| format!("Detection failed for image 2: {}", e))?;
|
||||
|
||||
// Extract face crops and generate embeddings
|
||||
let mut best_similarity = 0.0f32;
|
||||
|
||||
for bbox1 in &faces1.bbox {
|
||||
let crop1 = Self::crop_face_from_image(&img1, bbox1)?;
|
||||
let crop1_array = ndarray::Array::from_shape_vec(
|
||||
(1, crop1.height() as usize, crop1.width() as usize, 3),
|
||||
crop1
|
||||
.pixels()
|
||||
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
|
||||
.collect(),
|
||||
)?;
|
||||
|
||||
let embedding1 = embedder
|
||||
.run_models(crop1_array.view())
|
||||
.map_err(|e| format!("Embedding generation failed: {}", e))?;
|
||||
|
||||
for bbox2 in &faces2.bbox {
|
||||
let crop2 = Self::crop_face_from_image(&img2, bbox2)?;
|
||||
let crop2_array = ndarray::Array::from_shape_vec(
|
||||
(1, crop2.height() as usize, crop2.width() as usize, 3),
|
||||
crop2
|
||||
.pixels()
|
||||
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
|
||||
.collect(),
|
||||
)?;
|
||||
|
||||
let embedding2 = embedder
|
||||
.run_models(crop2_array.view())
|
||||
.map_err(|e| format!("Embedding generation failed: {}", e))?;
|
||||
|
||||
let similarity = Self::cosine_similarity(
|
||||
embedding1.row(0).as_slice().unwrap(),
|
||||
embedding2.row(0).as_slice().unwrap(),
|
||||
);
|
||||
best_similarity = best_similarity.max(similarity);
|
||||
}
|
||||
}
|
||||
|
||||
(faces1, faces2, best_similarity)
|
||||
}
|
||||
};
|
||||
|
||||
Ok((faces1.bbox.len(), faces2.bbox.len(), best_similarity))
|
||||
}
|
||||
|
||||
fn crop_face_from_image(
|
||||
img: &image::RgbImage,
|
||||
bbox: &bounding_box::Aabb2<usize>,
|
||||
) -> Result<image::RgbImage, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let min_point = bbox.min_vertex();
|
||||
let size = bbox.size();
|
||||
let x = min_point.x as u32;
|
||||
let y = min_point.y as u32;
|
||||
let width = size.x as u32;
|
||||
let height = size.y as u32;
|
||||
|
||||
// Ensure crop bounds are within image
|
||||
let img_width = img.width();
|
||||
let img_height = img.height();
|
||||
|
||||
let crop_x = x.min(img_width.saturating_sub(1));
|
||||
let crop_y = y.min(img_height.saturating_sub(1));
|
||||
let crop_width = width.min(img_width - crop_x);
|
||||
let crop_height = height.min(img_height - crop_y);
|
||||
|
||||
Ok(image::imageops::crop_imm(img, crop_x, crop_y, crop_width, crop_height).to_image())
|
||||
}
|
||||
|
||||
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
|
||||
let dot_product: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
|
||||
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||
|
||||
if norm_a == 0.0 || norm_b == 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
dot_product / (norm_a * norm_b)
|
||||
}
|
||||
}
|
||||
}
|
||||
5
src/gui/mod.rs
Normal file
5
src/gui/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod app;
|
||||
pub mod bridge;
|
||||
|
||||
pub use app::{FaceDetectorApp, Message, run};
|
||||
pub use bridge::FaceDetectionBridge;
|
||||
Reference in New Issue
Block a user