feat(bounding-box): add scale_uniform method for consistent scaling
Some checks failed
build / checks-matrix (push) Successful in 19m22s
build / codecov (push) Failing after 19m26s
docs / docs (push) Failing after 28m51s
build / checks-build (push) Has been cancelled

feat(gui): display face ROIs in comparison results

refactor(bridge): pad detected face bounding boxes uniformly
This commit is contained in:
uttarayan21
2025-08-22 19:01:34 +05:30
parent c758fd8d41
commit 3eec262076
3 changed files with 180 additions and 18 deletions

View File

@@ -163,6 +163,21 @@ impl<T: Num, const D: usize> AxisAlignedBoundingBox<T, D> {
}
}
pub fn scale_uniform(self, scalar: T) -> Self
where
T: core::ops::MulAssign,
T: core::ops::DivAssign,
T: core::ops::SubAssign,
{
let two = T::one() + T::one();
let new_size = self.size * scalar;
let new_point = self.point.coords - (new_size - self.size) / two;
Self {
point: Point::from(new_point),
size: new_size,
}
}
pub fn contains_bbox(&self, other: &Self) -> bool
where
T: core::ops::AddAssign,
@@ -270,15 +285,17 @@ impl<T: Num, const D: usize> AxisAlignedBoundingBox<T, D> {
})
}
// pub fn as_<T2>(&self) -> Option<Aabb<T2, D>>
// where
// T2: Num + simba::scalar::SubsetOf<T>,
// {
// Some(Aabb {
// point: Point::from(self.point.coords.as_()),
// size: self.size.as_(),
// })
// }
pub fn as_<T2>(&self) -> Aabb<T2, D>
where
T2: Num,
T: num::cast::AsPrimitive<T2>,
{
Aabb {
point: Point::from(self.point.coords.map(|x| x.as_())),
size: self.size.map(|x| x.as_()),
}
}
pub fn measure(&self) -> T
where
T: core::ops::MulAssign,

View File

@@ -10,6 +10,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use crate::gui::bridge::FaceDetectionBridge;
use ::image::{DynamicImage, ImageFormat, RgbImage};
#[derive(Debug, Clone)]
pub enum Message {
@@ -43,6 +44,7 @@ pub enum Message {
ImageLoaded(Option<Arc<Vec<u8>>>),
SecondImageLoaded(Option<Arc<Vec<u8>>>),
ProcessedImageUpdated(Option<Vec<u8>>),
FaceRoisLoaded(Vec<image::Handle>, Vec<image::Handle>),
}
#[derive(Debug, Clone, PartialEq)]
@@ -112,6 +114,10 @@ pub struct FaceDetectorApp {
current_image_handle: Option<image::Handle>,
processed_image_handle: Option<image::Handle>,
second_image_handle: Option<image::Handle>,
// Face ROI handles for comparison display
image1_face_roi_handles: Vec<image::Handle>,
image2_face_roi_handles: Vec<image::Handle>,
}
impl Default for FaceDetectorApp {
@@ -132,6 +138,8 @@ impl Default for FaceDetectorApp {
current_image_handle: None,
processed_image_handle: None,
second_image_handle: None,
image1_face_roi_handles: Vec::new(),
image2_face_roi_handles: Vec::new(),
}
}
}
@@ -315,6 +323,8 @@ impl FaceDetectorApp {
self.detection_result = None;
self.comparison_result = None;
self.processed_image_handle = None;
self.image1_face_roi_handles.clear();
self.image2_face_roi_handles.clear();
self.status_message = "Results cleared".to_string();
Task::none()
}
@@ -358,6 +368,8 @@ impl FaceDetectorApp {
ComparisonResult::Success {
best_similarity,
processing_time,
image1_face_rois,
image2_face_rois,
..
} => {
let interpretation = if *best_similarity > 0.8 {
@@ -374,6 +386,16 @@ impl FaceDetectorApp {
"Comparison complete! Similarity: {:.3} - {} (Processing time: {:.2}s)",
best_similarity, interpretation, processing_time
);
// Convert face ROIs to image handles
let image1_handles = convert_face_rois_to_handles(image1_face_rois.clone());
let image2_handles = convert_face_rois_to_handles(image2_face_rois.clone());
self.comparison_result = Some(result);
return Task::perform(
async move { (image1_handles, image2_handles) },
|(h1, h2)| Message::FaceRoisLoaded(h1, h2),
);
}
ComparisonResult::Error(error) => {
self.status_message = format!("Comparison failed: {}", error);
@@ -384,6 +406,12 @@ impl FaceDetectorApp {
Task::none()
}
Message::FaceRoisLoaded(image1_handles, image2_handles) => {
self.image1_face_roi_handles = image1_handles;
self.image2_face_roi_handles = image2_handles;
Task::none()
}
Message::ProgressUpdate(progress) => {
self.progress = progress;
Task::none()
@@ -794,7 +822,7 @@ impl FaceDetectorApp {
)
};
column![
let mut result_column = column![
text("Comparison Results").size(18),
text(format!("First image faces: {}", image1_faces)),
text(format!("Second image faces: {}", image2_faces)),
@@ -804,7 +832,89 @@ impl FaceDetectorApp {
}),
text(format!("Processing time: {:.2}s", processing_time)),
]
.spacing(5);
// Add face ROI displays if available
if !self.image1_face_roi_handles.is_empty()
|| !self.image2_face_roi_handles.is_empty()
{
result_column = result_column.push(text("Detected Faces").size(16));
// Create face ROI rows
let image1_faces_row = if !self.image1_face_roi_handles.is_empty() {
let faces: Element<'_, Message> = self
.image1_face_roi_handles
.iter()
.enumerate()
.fold(row![].spacing(5), |row, (i, handle)| {
row.push(
column![
text(format!("Face {}", i + 1)).size(12),
container(
image(handle.clone())
.width(80)
.height(80)
.content_fit(iced::ContentFit::Cover)
)
.style(container::bordered_box)
.padding(2),
]
.spacing(2)
.align_x(Alignment::Center),
)
})
.into();
column![
text("First Image Faces:").size(14),
scrollable(faces).direction(scrollable::Direction::Horizontal(
scrollable::Scrollbar::new()
)),
]
.spacing(5)
} else {
column![text("First Image Faces: None detected").size(14)]
};
let image2_faces_row = if !self.image2_face_roi_handles.is_empty() {
let faces: Element<'_, Message> = self
.image2_face_roi_handles
.iter()
.enumerate()
.fold(row![].spacing(5), |row, (i, handle)| {
row.push(
column![
text(format!("Face {}", i + 1)).size(12),
container(
image(handle.clone())
.width(80)
.height(80)
.content_fit(iced::ContentFit::Cover)
)
.style(container::bordered_box)
.padding(2),
]
.spacing(2)
.align_x(Alignment::Center),
)
})
.into();
column![
text("Second Image Faces:").size(14),
scrollable(faces).direction(scrollable::Direction::Horizontal(
scrollable::Scrollbar::new()
)),
]
.spacing(5)
} else {
column![text("Second Image Faces: None detected").size(14)]
};
result_column = result_column.push(image1_faces_row).push(image2_faces_row);
}
result_column
}
ComparisonResult::Error(error) => column![
text("Comparison Results").size(18),
@@ -820,9 +930,11 @@ impl FaceDetectorApp {
]
};
scrollable(
column![file_section, comparison_image_section, controls, results]
.spacing(20)
.padding(20)
.padding(20),
)
.into()
}
@@ -885,6 +997,31 @@ impl std::fmt::Display for ExecutorType {
}
}
// Helper function to convert face ROIs to image handles
fn convert_face_rois_to_handles(face_rois: Vec<ndarray::Array3<u8>>) -> Vec<image::Handle> {
face_rois
.into_iter()
.filter_map(|roi| {
// Convert ndarray to image::RgbImage
let (height, width, _) = roi.dim();
let (raw_data, _offset) = roi.into_raw_vec_and_offset();
if let Some(img) = RgbImage::from_raw(width as u32, height as u32, raw_data) {
// Convert to PNG bytes
let mut buffer = Vec::new();
let mut cursor = std::io::Cursor::new(&mut buffer);
if DynamicImage::ImageRgb8(img)
.write_to(&mut cursor, ImageFormat::Png)
.is_ok()
{
return Some(image::Handle::from_bytes(buffer));
}
}
None
})
.collect()
}
pub fn run() -> iced::Result {
iced::application(
"Face Detector",

View File

@@ -326,14 +326,22 @@ where
.change_context(errors::Error)
.attach_printable("Failed to detect faces")?;
for bbox in &output.bbox {
let bboxes = output
.bbox
.iter()
.inspect(|bbox| tracing::info!("Raw bbox: {:?}", bbox))
.map(|bbox| bbox.as_::<f32>().scale_uniform(1.30).as_::<usize>())
.inspect(|bbox| tracing::info!("Padded bbox: {:?}", bbox))
.collect_vec();
for bbox in &bboxes {
tracing::info!("Detected face: {:?}", bbox);
use bounding_box::draw::*;
array.draw(bbox, color::palette::css::GREEN_YELLOW.to_rgba8(), 1);
}
use itertools::Itertools;
let face_rois = array
.view()
.multi_roi(&output.bbox)
.multi_roi(&bboxes)
.change_context(Error)?
.into_iter()
.map(|roi| {
@@ -388,7 +396,7 @@ where
.collect::<Vec<Array1<f32>>>();
Ok(DetectionOutput {
bbox: output.bbox,
bbox: bboxes,
rois: face_rois,
embeddings,
})