feat(ndcv-bridge): add ndcv-bridge for ndarray and opencv interaction

This commit is contained in:
uttarayan21
2025-08-22 15:10:41 +05:30
parent 65560825fa
commit aab3d84db0
30 changed files with 3666 additions and 120 deletions

315
Cargo.lock generated
View File

@@ -203,6 +203,9 @@ name = "arbitrary"
version = "1.4.2" version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1"
dependencies = [
"derive_arbitrary",
]
[[package]] [[package]]
name = "arg_enum_proc_macro" name = "arg_enum_proc_macro"
@@ -478,6 +481,15 @@ dependencies = [
"windows-targets 0.52.6", "windows-targets 0.52.6",
] ]
[[package]]
name = "bbox"
version = "0.1.0"
dependencies = [
"ndarray",
"num",
"serde",
]
[[package]] [[package]]
name = "bindgen" name = "bindgen"
version = "0.60.1" version = "0.60.1"
@@ -770,6 +782,16 @@ dependencies = [
"windows-link", "windows-link",
] ]
[[package]]
name = "clang"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84c044c781163c001b913cd018fc95a628c50d0d2dfea8bca77dad71edb16e37"
dependencies = [
"clang-sys",
"libc",
]
[[package]] [[package]]
name = "clang-sys" name = "clang-sys"
version = "1.8.1" version = "1.8.1"
@@ -816,6 +838,7 @@ dependencies = [
"anstyle", "anstyle",
"clap_lex 0.7.5", "clap_lex 0.7.5",
"strsim 0.11.1", "strsim 0.11.1",
"terminal_size",
] ]
[[package]] [[package]]
@@ -979,6 +1002,12 @@ dependencies = [
"crossbeam-utils", "crossbeam-utils",
] ]
[[package]]
name = "condtype"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf0a07a401f374238ab8e2f11a104d2851bf9ce711ec69804834de8af45c7af"
[[package]] [[package]]
name = "core-foundation" name = "core-foundation"
version = "0.9.4" version = "0.9.4"
@@ -1210,6 +1239,17 @@ dependencies = [
"powerfmt", "powerfmt",
] ]
[[package]]
name = "derive_arbitrary"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]] [[package]]
name = "detect-desktop-environment" name = "detect-desktop-environment"
version = "0.2.0" version = "0.2.0"
@@ -1240,6 +1280,7 @@ dependencies = [
"ndarray-math 0.1.0 (git+https://git.darksailor.dev/servius/ndarray-math)", "ndarray-math 0.1.0 (git+https://git.darksailor.dev/servius/ndarray-math)",
"ndarray-resize", "ndarray-resize",
"ndarray-safetensors", "ndarray-safetensors",
"opencv",
"ordered-float", "ordered-float",
"ort", "ort",
"rfd", "rfd",
@@ -1320,6 +1361,31 @@ dependencies = [
"syn 2.0.106", "syn 2.0.106",
] ]
[[package]]
name = "divan"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a405457ec78b8fe08b0e32b4a3570ab5dff6dd16eb9e76a5ee0a9d9cbd898933"
dependencies = [
"cfg-if",
"clap 4.5.45",
"condtype",
"divan-macros",
"libc",
"regex-lite",
]
[[package]]
name = "divan-macros"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9556bc800956545d6420a640173e5ba7dfa82f38d3ea5a167eb555bc69ac3323"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]] [[package]]
name = "dlib" name = "dlib"
version = "0.5.2" version = "0.5.2"
@@ -2026,7 +2092,7 @@ dependencies = [
"presser", "presser",
"thiserror 1.0.69", "thiserror 1.0.69",
"winapi", "winapi",
"windows", "windows 0.52.0",
] ]
[[package]] [[package]]
@@ -2552,6 +2618,17 @@ dependencies = [
"rayon", "rayon",
] ]
[[package]]
name = "img-parts"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19734e3c43b2a850f5889c077056e47c874095f2d87e853c7c41214ae67375f0"
dependencies = [
"bytes",
"crc32fast",
"miniz_oxide",
]
[[package]] [[package]]
name = "imgref" name = "imgref"
version = "1.11.0" version = "1.11.0"
@@ -3154,6 +3231,7 @@ dependencies = [
"portable-atomic", "portable-atomic",
"portable-atomic-util", "portable-atomic-util",
"rawpointer", "rawpointer",
"rayon",
] ]
[[package]] [[package]]
@@ -3183,6 +3261,20 @@ dependencies = [
"thiserror 2.0.15", "thiserror 2.0.15",
] ]
[[package]]
name = "ndarray-npy"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b313788c468c49141a9d9b6131fc15f403e6ef4e8446a0b2e18f664ddb278a9"
dependencies = [
"byteorder",
"ndarray",
"num-complex",
"num-traits",
"py_literal",
"zip",
]
[[package]] [[package]]
name = "ndarray-resize" name = "ndarray-resize"
version = "0.1.0" version = "0.1.0"
@@ -3206,6 +3298,26 @@ dependencies = [
"thiserror 2.0.15", "thiserror 2.0.15",
] ]
[[package]]
name = "ndcv-bridge"
version = "0.1.0"
dependencies = [
"bbox",
"bytemuck",
"divan",
"error-stack",
"fast_image_resize",
"img-parts",
"ndarray",
"ndarray-npy",
"num",
"opencv",
"rayon",
"thiserror 2.0.15",
"tracing",
"wide",
]
[[package]] [[package]]
name = "ndk" name = "ndk"
version = "0.9.0" version = "0.9.0"
@@ -3727,6 +3839,41 @@ version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea" checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea"
[[package]]
name = "opencv"
version = "0.95.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c73b6fccd78797a87cdb885c997351a1a290b0ebde778e996b694dec2a4c04a"
dependencies = [
"cc",
"dunce",
"jobserver",
"libc",
"num-traits",
"once_cell",
"opencv-binding-generator",
"pkg-config",
"semver",
"shlex",
"vcpkg",
"windows 0.59.0",
]
[[package]]
name = "opencv-binding-generator"
version = "0.97.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "010a78e4cc47ff85cf58fb1cbbbab9dcdb8e5e6718917eac26623f077872d012"
dependencies = [
"clang",
"clang-sys",
"dunce",
"once_cell",
"percent-encoding",
"regex",
"shlex",
]
[[package]] [[package]]
name = "orbclient" name = "orbclient"
version = "0.3.48" version = "0.3.48"
@@ -3903,6 +4050,50 @@ version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "pest"
version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323"
dependencies = [
"memchr",
"thiserror 2.0.15",
"ucd-trie",
]
[[package]]
name = "pest_derive"
version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc"
dependencies = [
"pest",
"pest_generator",
]
[[package]]
name = "pest_generator"
version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "pest_meta"
version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5"
dependencies = [
"pest",
"sha2",
]
[[package]] [[package]]
name = "phf" name = "phf"
version = "0.11.3" version = "0.11.3"
@@ -4119,6 +4310,19 @@ dependencies = [
"syn 2.0.106", "syn 2.0.106",
] ]
[[package]]
name = "py_literal"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "102df7a3d46db9d3891f178dcc826dc270a6746277a9ae6436f8d29fd490a8e1"
dependencies = [
"num-bigint",
"num-complex",
"num-traits",
"pest",
"pest_derive",
]
[[package]] [[package]]
name = "qoi" name = "qoi"
version = "0.4.1" version = "0.4.1"
@@ -4401,6 +4605,12 @@ dependencies = [
"regex-syntax 0.8.5", "regex-syntax 0.8.5",
] ]
[[package]]
name = "regex-lite"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a"
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.6.29" version = "0.6.29"
@@ -4696,6 +4906,17 @@ dependencies = [
"digest", "digest",
] ]
[[package]]
name = "sha2"
version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]] [[package]]
name = "sharded-slab" name = "sharded-slab"
version = "0.1.7" version = "0.1.7"
@@ -5079,6 +5300,16 @@ dependencies = [
"winapi-util", "winapi-util",
] ]
[[package]]
name = "terminal_size"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0"
dependencies = [
"rustix 1.0.8",
"windows-sys 0.60.2",
]
[[package]] [[package]]
name = "textwrap" name = "textwrap"
version = "0.16.2" version = "0.16.2"
@@ -5373,6 +5604,12 @@ version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "ucd-trie"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
[[package]] [[package]]
name = "uds_windows" name = "uds_windows"
version = "1.1.0" version = "1.1.0"
@@ -5955,6 +6192,16 @@ dependencies = [
"windows-targets 0.52.6", "windows-targets 0.52.6",
] ]
[[package]]
name = "windows"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f919aee0a93304be7f62e8e5027811bbba96bcb1de84d6618be56e43f8a32a1"
dependencies = [
"windows-core 0.59.0",
"windows-targets 0.53.3",
]
[[package]] [[package]]
name = "windows-core" name = "windows-core"
version = "0.52.0" version = "0.52.0"
@@ -5964,17 +6211,41 @@ dependencies = [
"windows-targets 0.52.6", "windows-targets 0.52.6",
] ]
[[package]]
name = "windows-core"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce"
dependencies = [
"windows-implement 0.59.0",
"windows-interface",
"windows-result",
"windows-strings 0.3.1",
"windows-targets 0.53.3",
]
[[package]] [[package]]
name = "windows-core" name = "windows-core"
version = "0.61.2" version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [ dependencies = [
"windows-implement", "windows-implement 0.60.0",
"windows-interface", "windows-interface",
"windows-link", "windows-link",
"windows-result", "windows-result",
"windows-strings", "windows-strings 0.4.2",
]
[[package]]
name = "windows-implement"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
] ]
[[package]] [[package]]
@@ -6014,6 +6285,15 @@ dependencies = [
"windows-link", "windows-link",
] ]
[[package]]
name = "windows-strings"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319"
dependencies = [
"windows-link",
]
[[package]] [[package]]
name = "windows-strings" name = "windows-strings"
version = "0.4.2" version = "0.4.2"
@@ -6701,6 +6981,35 @@ dependencies = [
"syn 2.0.106", "syn 2.0.106",
] ]
[[package]]
name = "zip"
version = "2.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fabe6324e908f85a1c52063ce7aa26b68dcb7eb6dbc83a2d148403c9bc3eba50"
dependencies = [
"arbitrary",
"crc32fast",
"crossbeam-utils",
"displaydoc",
"flate2",
"indexmap 2.10.0",
"memchr",
"thiserror 2.0.15",
"zopfli",
]
[[package]]
name = "zopfli"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edfc5ee405f504cd4984ecc6f14d02d55cfda60fa4b689434ef4102aae150cd7"
dependencies = [
"bumpalo",
"crc32fast",
"log",
"simd-adler32",
]
[[package]] [[package]]
name = "zune-core" name = "zune-core"
version = "0.4.12" version = "0.4.12"

View File

@@ -1,11 +1,24 @@
[workspace] [workspace]
members = ["ndarray-image", "ndarray-resize", ".", "bounding-box", "ndarray-safetensors", "sqlite3-safetensor-cosine"] members = [
"ndarray-image",
"ndarray-resize",
".",
"bounding-box",
"ndarray-safetensors",
"sqlite3-safetensor-cosine",
"ndcv-bridge",
"bbox"
]
[workspace.package] [workspace.package]
version = "0.1.0" version = "0.1.0"
edition = "2024" edition = "2024"
[workspace.dependencies] [workspace.dependencies]
bbox = { path = "bbox" }
divan = { version = "0.1.21" }
ndarray-npy = "0.9.1"
serde = { version = "1.0", features = ["derive"] }
ndarray-image = { path = "ndarray-image" } ndarray-image = { path = "ndarray-image" }
ndarray-resize = { path = "ndarray-resize" } ndarray-resize = { path = "ndarray-resize" }
mnn = { git = "https://github.com/uttarayan21/mnn-rs", version = "0.2.0", features = [ mnn = { git = "https://github.com/uttarayan21/mnn-rs", version = "0.2.0", features = [
@@ -20,6 +33,18 @@ mnn-sync = { git = "https://github.com/uttarayan21/mnn-rs", version = "0.1.0", f
"tracing", "tracing",
], branch = "restructure-tensor-type" } ], branch = "restructure-tensor-type" }
nalgebra = { version = "0.34.0", default-features = false, features = ["std"] } nalgebra = { version = "0.34.0", default-features = false, features = ["std"] }
opencv = { version = "0.95.1" }
bounding-box = { path = "bounding-box" }
ndarray-safetensors = { path = "ndarray-safetensors" }
wide = "0.7.33"
rayon = "1.11.0"
bytemuck = "1.23.2"
error-stack = "0.5.0"
thiserror = "2.0"
fast_image_resize = "5.2.0"
img-parts = "0.4.0"
ndarray = { version = "0.16.1", features = ["rayon"] }
num = "0.4"
[package] [package]
name = "detector" name = "detector"
@@ -50,7 +75,11 @@ bounding-box = { version = "0.1.0", path = "bounding-box" }
color = "0.3.1" color = "0.3.1"
itertools = "0.14.0" itertools = "0.14.0"
ordered-float = "5.0.0" ordered-float = "5.0.0"
ort = { version = "2.0.0-rc.10", default-features = false, features = [ "std", "tracing", "ndarray"]} ort = { version = "2.0.0-rc.10", default-features = false, features = [
"std",
"tracing",
"ndarray",
] }
ndarray-math = { git = "https://git.darksailor.dev/servius/ndarray-math", version = "0.1.0" } ndarray-math = { git = "https://git.darksailor.dev/servius/ndarray-math", version = "0.1.0" }
ndarray-safetensors = { version = "0.1.0", path = "ndarray-safetensors" } ndarray-safetensors = { version = "0.1.0", path = "ndarray-safetensors" }
sqlite3-safetensor-cosine = { version = "0.1.0", path = "sqlite3-safetensor-cosine" } sqlite3-safetensor-cosine = { version = "0.1.0", path = "sqlite3-safetensor-cosine" }
@@ -60,6 +89,7 @@ iced = { version = "0.13", features = ["tokio", "image"] }
rfd = "0.15" rfd = "0.15"
futures = "0.3" futures = "0.3"
imageproc = "0.25" imageproc = "0.25"
opencv = "0.95.1"
[profile.release] [profile.release]
debug = true debug = true

View File

@@ -211,6 +211,7 @@
mnn mnn
cargo-make cargo-make
hyperfine hyperfine
opencv
] ]
++ (lib.optionals pkgs.stdenv.isDarwin [ ++ (lib.optionals pkgs.stdenv.isDarwin [
apple-sdk_13 apple-sdk_13

35
ndcv-bridge/Cargo.toml Normal file
View File

@@ -0,0 +1,35 @@
[package]
name = "ndcv-bridge"
version.workspace = true
edition.workspace = true
[dependencies]
bbox.workspace = true
bytemuck.workspace = true
error-stack.workspace = true
fast_image_resize.workspace = true
ndarray = { workspace = true, features = ["rayon"] }
num.workspace = true
opencv = { workspace = true, optional = true }
rayon = "1.10.0"
thiserror.workspace = true
tracing = "0.1.41"
wide = "0.7.32"
img-parts.workspace = true
[dev-dependencies]
divan.workspace = true
ndarray-npy.workspace = true
[features]
opencv = ["dep:opencv"]
default = ["opencv"]
[[bench]]
name = "conversions"
harness = false
[[bench]]
name = "gaussian"
harness = false

View File

@@ -0,0 +1,75 @@
use divan::black_box;
use ndcv_bridge::*;
// #[global_allocator]
// static ALLOC: AllocProfiler = AllocProfiler::system();
fn main() {
divan::main();
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_512() {
bench_mat_to_3d_ndarray(512);
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_1024() {
bench_mat_to_3d_ndarray(1024);
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_2k() {
bench_mat_to_3d_ndarray(2048);
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_4k() {
bench_mat_to_3d_ndarray(4096);
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_8k() {
bench_mat_to_3d_ndarray(8192);
}
#[divan::bench]
fn bench_3d_mat_to_ndarray_8k_ref() {
bench_mat_to_3d_ndarray_ref(8192);
}
#[divan::bench]
fn bench_2d_mat_to_ndarray_8k_ref() {
bench_mat_to_2d_ndarray(8192);
}
fn bench_mat_to_2d_ndarray(size: i32) -> ndarray::Array2<u8> {
let mat =
opencv::core::Mat::new_nd_with_default(&[size, size], opencv::core::CV_8UC1, (200).into())
.expect("failed");
let ndarray: ndarray::Array2<u8> = mat.as_ndarray().expect("failed").to_owned();
ndarray
}
fn bench_mat_to_3d_ndarray(size: i32) -> ndarray::Array3<u8> {
let mat = opencv::core::Mat::new_nd_with_default(
&[size, size],
opencv::core::CV_8UC3,
(200, 100, 10).into(),
)
.expect("failed");
// ndarray::Array3::<u8>::from_mat(black_box(mat)).expect("failed")
let ndarray: ndarray::Array3<u8> = mat.as_ndarray().expect("failed").to_owned();
ndarray
}
fn bench_mat_to_3d_ndarray_ref(size: i32) {
let mut mat = opencv::core::Mat::new_nd_with_default(
&[size, size],
opencv::core::CV_8UC3,
(200, 100, 10).into(),
)
.expect("failed");
let array: ndarray::ArrayView3<u8> = black_box(&mut mat).as_ndarray().expect("failed");
let _ = black_box(array);
}

View File

@@ -0,0 +1,265 @@
use divan::black_box;
use ndarray::*;
use ndcv_bridge::*;
// #[global_allocator]
// static ALLOC: AllocProfiler = AllocProfiler::system();
fn main() {
divan::main();
}
// Helper function to create test images with different patterns
fn create_test_image(size: usize, pattern: &str) -> Array3<u8> {
let mut arr = Array3::<u8>::zeros((size, size, 3));
match pattern {
"edges" => {
// Create a pattern with sharp edges
arr.slice_mut(s![size / 4..3 * size / 4, size / 4..3 * size / 4, ..])
.fill(255);
}
"gradient" => {
// Create a gradual gradient
for i in 0..size {
let val = (i * 255 / size) as u8;
arr.slice_mut(s![i, .., ..]).fill(val);
}
}
"checkerboard" => {
// Create a checkerboard pattern
for i in 0..size {
for j in 0..size {
if (i / 20 + j / 20) % 2 == 0 {
arr[[i, j, 0]] = 255;
arr[[i, j, 1]] = 255;
arr[[i, j, 2]] = 255;
}
}
}
}
_ => arr.fill(255), // Default to solid white
}
arr
}
#[divan::bench_group]
mod sizes {
use super::*;
// Benchmark different image sizes
#[divan::bench(args = [512, 1024, 2048, 4096])]
fn bench_gaussian_sizes_u8(size: usize) {
let arr = Array3::<u8>::ones((size, size, 3));
let _out = black_box(
arr.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench(args = [512, 1024, 2048, 4096])]
fn bench_gaussian_sizes_u8_inplace(size: usize) {
let mut arr = Array3::<u8>::ones((size, size, 3));
black_box(
arr.gaussian_blur_inplace((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench(args = [512, 1024, 2048, 4096])]
fn bench_gaussian_sizes_f32(size: usize) {
let arr = Array3::<f32>::ones((size, size, 3));
let _out = black_box(
arr.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench(args = [512, 1024, 2048, 4096])]
fn bench_gaussian_sizes_f32_inplace(size: usize) {
let mut arr = Array3::<f32>::ones((size, size, 3));
black_box(
arr.gaussian_blur_inplace((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
}
// Benchmark different kernel sizes
#[divan::bench(args = [(3, 3), (5, 5), (7, 7), (9, 9), (11, 11)])]
fn bench_gaussian_kernels(kernel_size: (u8, u8)) {
let mut arr = Array3::<u8>::ones((1000, 1000, 3));
arr.gaussian_blur_inplace(kernel_size, 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
}
// Benchmark different sigma values
#[divan::bench(args = [0.5, 1.0, 2.0, 5.0])]
fn bench_gaussian_sigmas(sigma: f64) {
let mut arr = Array3::<u8>::ones((1000, 1000, 3));
arr.gaussian_blur_inplace((3, 3), sigma, sigma, BorderType::BorderConstant)
.unwrap();
}
// Benchmark different sigma_x and sigma_y combinations
#[divan::bench(args = [(0.5, 2.0), (1.0, 1.0), (2.0, 0.5), (3.0, 1.0)])]
fn bench_gaussian_asymmetric_sigmas(sigmas: (f64, f64)) {
let mut arr = Array3::<u8>::ones((1000, 1000, 3));
arr.gaussian_blur_inplace((3, 3), sigmas.0, sigmas.1, BorderType::BorderConstant)
.unwrap();
}
// Benchmark different border types
#[divan::bench]
fn bench_gaussian_border_types() -> Vec<()> {
let border_types = [
BorderType::BorderConstant,
BorderType::BorderReplicate,
BorderType::BorderReflect,
BorderType::BorderReflect101,
];
let mut arr = Array3::<u8>::ones((1000, 1000, 3));
border_types
.iter()
.map(|border_type| {
arr.gaussian_blur_inplace((3, 3), 1.0, 1.0, *border_type)
.unwrap();
})
.collect()
}
// Benchmark different image patterns
#[divan::bench]
fn bench_gaussian_patterns() {
let patterns = ["edges", "gradient", "checkerboard", "solid"];
patterns.iter().for_each(|&pattern| {
let mut arr = create_test_image(1000, pattern);
arr.gaussian_blur_inplace((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
})
}
#[divan::bench_group]
mod allocation {
use super::*;
#[divan::bench]
fn bench_gaussian_allocation_inplace() {
let mut arr = Array3::<f32>::ones((3840, 2160, 3));
black_box(
arr.gaussian_blur_inplace((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn bench_gaussian_allocation_allocate() {
let arr = Array3::<f32>::ones((3840, 2160, 3));
let _out = black_box(
arr.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap(),
);
}
}
#[divan::bench_group]
mod realistic {
use super::*;
#[divan::bench]
fn small_800_600_3x3() {
let small_blur = Array3::<u8>::ones((800, 600, 3));
let _blurred = black_box(
small_blur
.gaussian_blur((3, 3), 0.5, 0.5, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn small_800_600_3x3_inplace() {
let mut small_blur = Array3::<u8>::ones((800, 600, 3));
small_blur
.gaussian_blur_inplace((3, 3), 0.5, 0.5, BorderType::BorderConstant)
.unwrap();
}
#[divan::bench]
fn medium_1920x1080_5x5() {
let mut medium_blur = Array3::<u8>::ones((1920, 1080, 3));
let _blurred = black_box(
medium_blur
.gaussian_blur_inplace((5, 5), 2.0, 2.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn medium_1920x1080_5x5_inplace() {
let mut medium_blur = Array3::<u8>::ones((1920, 1080, 3));
medium_blur
.gaussian_blur_inplace((5, 5), 2.0, 2.0, BorderType::BorderConstant)
.unwrap();
}
#[divan::bench]
fn large_3840x2160_9x9() {
let large_blur = Array3::<u8>::ones((3840, 2160, 3));
let _blurred = black_box(
large_blur
.gaussian_blur((9, 9), 5.0, 5.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn large_3840x2160_9x9_inplace() {
let mut large_blur = Array3::<u8>::ones((3840, 2160, 3));
large_blur
.gaussian_blur_inplace((9, 9), 5.0, 5.0, BorderType::BorderConstant)
.unwrap();
}
#[divan::bench]
fn small_800_600_3x3_f32() {
let small_blur = Array3::<f32>::ones((800, 600, 3));
let _blurred = black_box(
small_blur
.gaussian_blur((3, 3), 0.5, 0.5, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn small_800_600_3x3_inplace_f32() {
let mut small_blur = Array3::<f32>::ones((800, 600, 3));
small_blur
.gaussian_blur_inplace((3, 3), 0.5, 0.5, BorderType::BorderConstant)
.unwrap();
}
#[divan::bench]
fn medium_1920x1080_5x5_f32() {
let mut medium_blur = Array3::<f32>::ones((1920, 1080, 3));
let _blurred = black_box(
medium_blur
.gaussian_blur_inplace((5, 5), 2.0, 2.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn medium_1920x1080_5x5_inplace_f32() {
let mut medium_blur = Array3::<f32>::ones((1920, 1080, 3));
medium_blur
.gaussian_blur_inplace((5, 5), 2.0, 2.0, BorderType::BorderConstant)
.unwrap();
}
#[divan::bench]
fn large_3840x2160_9x9_f32() {
let large_blur = Array3::<f32>::ones((3840, 2160, 3));
let _blurred = black_box(
large_blur
.gaussian_blur((9, 9), 5.0, 5.0, BorderType::BorderConstant)
.unwrap(),
);
}
#[divan::bench]
fn large_3840x2160_9x9_inplace_f32() {
let mut large_blur = Array3::<f32>::ones((3840, 2160, 3));
large_blur
.gaussian_blur_inplace((9, 9), 5.0, 5.0, BorderType::BorderConstant)
.unwrap();
}
}

180
ndcv-bridge/src/blend.rs Normal file
View File

@@ -0,0 +1,180 @@
use crate::prelude_::*;
use ndarray::*;
type Result<T, E = Report<NdCvError>> = std::result::Result<T, E>;
mod seal {
pub trait Sealed {}
impl<T: ndarray::Data<Elem = f32>> Sealed for ndarray::ArrayBase<T, ndarray::Ix3> {}
}
pub trait NdBlend<T, D: ndarray::Dimension>: seal::Sealed {
fn blend(
&self,
mask: ndarray::ArrayView<T, D::Smaller>,
other: ndarray::ArrayView<T, D>,
alpha: T,
) -> Result<ndarray::Array<T, D>>;
fn blend_inplace(
&mut self,
mask: ndarray::ArrayView<T, D::Smaller>,
other: ndarray::ArrayView<T, D>,
alpha: T,
) -> Result<()>;
}
impl<S> NdBlend<f32, Ix3> for ndarray::ArrayBase<S, Ix3>
where
S: ndarray::DataMut<Elem = f32>,
{
fn blend(
&self,
mask: ndarray::ArrayView<f32, Ix2>,
other: ndarray::ArrayView<f32, Ix3>,
alpha: f32,
) -> Result<ndarray::Array<f32, Ix3>> {
if self.shape() != other.shape() {
return Err(NdCvError)
.attach_printable("Shapes of image and other imagge do not match");
}
if self.shape()[0] != mask.shape()[0] || self.shape()[1] != mask.shape()[1] {
return Err(NdCvError).attach_printable("Shapes of image and mask do not match");
}
let mut output = ndarray::Array3::zeros(self.dim());
let (_height, _width, channels) = self.dim();
Zip::from(output.lanes_mut(Axis(2)))
.and(self.lanes(Axis(2)))
.and(other.lanes(Axis(2)))
.and(mask)
.par_for_each(|mut out, this, other, mask| {
let this = wide::f32x4::from(this.as_slice().expect("Invalid self array"));
let other = wide::f32x4::from(other.as_slice().expect("Invalid other array"));
let mask = wide::f32x4::splat(mask * alpha);
let o = this * (1.0 - mask) + other * mask;
out.as_slice_mut()
.expect("Failed to get mutable slice")
.copy_from_slice(&o.as_array_ref()[..channels]);
});
Ok(output)
}
fn blend_inplace(
&mut self,
mask: ndarray::ArrayView<f32, <Ix3 as Dimension>::Smaller>,
other: ndarray::ArrayView<f32, Ix3>,
alpha: f32,
) -> Result<()> {
if self.shape() != other.shape() {
return Err(NdCvError)
.attach_printable("Shapes of image and other imagge do not match");
}
if self.shape()[0] != mask.shape()[0] || self.shape()[1] != mask.shape()[1] {
return Err(NdCvError).attach_printable("Shapes of image and mask do not match");
}
let (_height, _width, channels) = self.dim();
// Zip::from(self.lanes_mut(Axis(2)))
// .and(other.lanes(Axis(2)))
// .and(mask)
// .par_for_each(|mut this, other, mask| {
// let this_wide = wide::f32x4::from(this.as_slice().expect("Invalid self array"));
// let other = wide::f32x4::from(other.as_slice().expect("Invalid other array"));
// let mask = wide::f32x4::splat(mask * alpha);
// let o = this_wide * (1.0 - mask) + other * mask;
// this.as_slice_mut()
// .expect("Failed to get mutable slice")
// .copy_from_slice(&o.as_array_ref()[..channels]);
// });
let this = self
.as_slice_mut()
.ok_or(NdCvError)
.attach_printable("Failed to get source image as a continuous slice")?;
let other = other
.as_slice()
.ok_or(NdCvError)
.attach_printable("Failed to get other image as a continuous slice")?;
let mask = mask
.as_slice()
.ok_or(NdCvError)
.attach_printable("Failed to get mask as a continuous slice")?;
use rayon::prelude::*;
this.par_chunks_exact_mut(channels)
.zip(other.par_chunks_exact(channels))
.zip(mask)
.for_each(|((this, other), mask)| {
let this_wide = wide::f32x4::from(&*this);
let other = wide::f32x4::from(other);
let mask = wide::f32x4::splat(mask * alpha);
this.copy_from_slice(
&(this_wide * (1.0 - mask) + other * mask).as_array_ref()[..channels],
);
});
// for h in 0.._height {
// for w in 0.._width {
// let mask_index = h * _width + w;
// let mask = mask[mask_index];
// let mask = wide::f32x4::splat(mask * alpha);
// let this = &mut this[mask_index * channels..(mask_index + 1) * channels];
// let other = &other[mask_index * channels..(mask_index + 1) * channels];
// let this_wide = wide::f32x4::from(&*this);
// let other = wide::f32x4::from(other);
// let o = this_wide * (1.0 - mask) + other * mask;
// this.copy_from_slice(&o.as_array_ref()[..channels]);
// }
// }
Ok(())
}
}
#[test]
pub fn test_blend() {
let img = Array3::<f32>::from_shape_fn((10, 10, 3), |(i, j, k)| match (i, j, k) {
(0..=3, _, 0) => 1f32, // red
(4..=6, _, 1) => 1f32, // green
(7..=9, _, 2) => 1f32, // blue
_ => 0f32,
});
let other = img.clone().permuted_axes([1, 0, 2]).to_owned();
let mask = Array2::<f32>::from_shape_fn((10, 10), |(_, j)| if j > 5 { 1f32 } else { 0f32 });
// let other = Array3::<f32>::zeros((10, 10, 3));
let out = img.blend(mask.view(), other.view(), 1f32).unwrap();
let out_u8 = out.mapv(|v| (v * 255f32) as u8);
let expected = Array3::<u8>::from_shape_fn((10, 10, 3), |(i, j, k)| {
match (i, j, k) {
(0..=3, 0..=5, 0) => u8::MAX, // red
(4..=6, 0..=5, 1) | (_, 6, 1) => u8::MAX, // green
(7..=9, 0..=5, 2) | (_, 7..=10, 2) => u8::MAX, // blue
_ => u8::MIN,
}
});
assert_eq!(out_u8, expected);
}
// #[test]
// pub fn test_blend_inplace() {
// let mut img = Array3::<f32>::from_shape_fn((10, 10, 3), |(i, j, k)| match (i, j, k) {
// (0..=3, _, 0) => 1f32, // red
// (4..=6, _, 1) => 1f32, // green
// (7..=9, _, 2) => 1f32, // blue
// _ => 0f32,
// });
// let other = img.clone().permuted_axes([1, 0, 2]);
// let mask = Array2::<f32>::from_shape_fn((10, 10), |(_, j)| if j > 5 { 1f32 } else { 0f32 });
// // let other = Array3::<f32>::zeros((10, 10, 3));
// img.blend_inplace(mask.view(), other.view(), 1f32).unwrap();
// let out_u8 = img.mapv(|v| (v * 255f32) as u8);
// let expected = Array3::<u8>::from_shape_fn((10, 10, 3), |(i, j, k)| {
// match (i, j, k) {
// (0..=3, 0..=5, 0) => u8::MAX, // red
// (4..=6, 0..=5, 1) | (_, 6, 1) => u8::MAX, // green
// (7..=9, 0..=5, 2) | (_, 7..=10, 2) => u8::MAX, // blue
// _ => u8::MIN,
// }
// });
// assert_eq!(out_u8, expected);
// }

View File

@@ -0,0 +1,43 @@
//! Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
//! The function calculates and returns the minimal up-right bounding rectangle for the specified point set or non-zero pixels of gray-scale image.
use crate::{prelude_::*, NdAsImage};
pub trait BoundingRect: seal::SealedInternal {
fn bounding_rect(&self) -> Result<bbox::BBox<i32>, NdCvError>;
}
mod seal {
pub trait SealedInternal {}
impl<T, S: ndarray::Data<Elem = T>> SealedInternal for ndarray::ArrayBase<S, ndarray::Ix2> {}
}
impl<S: ndarray::Data<Elem = u8>> BoundingRect for ndarray::ArrayBase<S, ndarray::Ix2> {
fn bounding_rect(&self) -> Result<bbox::BBox<i32>, NdCvError> {
let mat = self.as_image_mat()?;
let rect = opencv::imgproc::bounding_rect(mat.as_ref()).change_context(NdCvError)?;
Ok(bbox::BBox::new(rect.x, rect.y, rect.width, rect.height))
}
}
#[test]
fn test_bounding_rect_empty() {
let arr = ndarray::Array2::<u8>::zeros((10, 10));
let rect = arr.bounding_rect().unwrap();
assert_eq!(rect, bbox::BBox::new(0, 0, 0, 0));
}
#[test]
fn test_bounding_rect_valued() {
let mut arr = ndarray::Array2::<u8>::zeros((10, 10));
crate::NdRoiMut::roi_mut(&mut arr, bbox::BBox::new(1, 1, 3, 3)).fill(1);
let rect = arr.bounding_rect().unwrap();
assert_eq!(rect, bbox::BBox::new(1, 1, 3, 3));
}
#[test]
fn test_bounding_rect_complex() {
let mut arr = ndarray::Array2::<u8>::zeros((10, 10));
crate::NdRoiMut::roi_mut(&mut arr, bbox::BBox::new(1, 3, 3, 3)).fill(1);
crate::NdRoiMut::roi_mut(&mut arr, bbox::BBox::new(2, 3, 3, 5)).fill(5);
let rect = arr.bounding_rect().unwrap();
assert_eq!(rect, bbox::BBox::new(1, 3, 4, 5));
}

4
ndcv-bridge/src/codec.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod codecs;
pub mod decode;
pub mod encode;
pub mod error;

View File

@@ -0,0 +1,218 @@
use super::decode::Decoder;
use super::encode::Encoder;
use crate::conversions::matref::MatRef;
use crate::NdCvError;
use error_stack::*;
use img_parts::{
jpeg::{markers, Jpeg},
Bytes,
};
use opencv::{
core::{Mat, Vector, VectorToVec},
imgcodecs::{imdecode, imencode, ImreadModes, ImwriteFlags},
};
#[derive(Debug)]
pub enum CvEncoder {
Jpeg(CvJpegEncFlags),
Tiff(CvTiffEncFlags),
}
pub enum EncKind {
Jpeg,
Tiff,
}
impl CvEncoder {
fn kind(&self) -> EncKind {
match self {
Self::Jpeg(_) => EncKind::Jpeg,
Self::Tiff(_) => EncKind::Tiff,
}
}
fn extension(&self) -> &'static str {
match self {
Self::Jpeg(_) => ".jpg",
Self::Tiff(_) => ".tiff",
}
}
fn to_cv_param_list(&self) -> Vector<i32> {
match self {
Self::Jpeg(flags) => flags.to_cv_param_list(),
Self::Tiff(flags) => flags.to_cv_param_list(),
}
}
}
#[derive(Default, Debug)]
pub struct CvJpegEncFlags {
quality: Option<usize>,
progressive: Option<bool>,
optimize: Option<bool>,
remove_app0: Option<bool>,
}
#[derive(Default, Debug)]
pub struct CvTiffEncFlags {
compression: Option<i32>,
}
impl CvTiffEncFlags {
pub fn new() -> Self {
Self::default().with_compression(1)
}
pub fn with_compression(mut self, compression: i32) -> Self {
self.compression = Some(compression);
self
}
fn to_cv_param_list(&self) -> Vector<i32> {
let iter = [(
ImwriteFlags::IMWRITE_TIFF_COMPRESSION as i32,
self.compression.map(|i| i as i32),
)]
.into_iter()
.filter_map(|(flag, opt)| opt.map(|o| [flag, o]))
.flatten();
Vector::from_iter(iter)
}
}
impl CvJpegEncFlags {
pub fn new() -> Self {
Self::default()
}
pub fn with_quality(mut self, quality: usize) -> Self {
self.quality = Some(quality);
self
}
pub fn remove_app0_marker(mut self, val: bool) -> Self {
self.remove_app0 = Some(val);
self
}
fn to_cv_param_list(&self) -> Vector<i32> {
let iter = [
(
ImwriteFlags::IMWRITE_JPEG_QUALITY as i32,
self.quality.map(|i| i as i32),
),
(
ImwriteFlags::IMWRITE_JPEG_PROGRESSIVE as i32,
self.progressive.map(|i| i as i32),
),
(
ImwriteFlags::IMWRITE_JPEG_OPTIMIZE as i32,
self.optimize.map(|i| i as i32),
),
]
.into_iter()
.filter_map(|(flag, opt)| opt.map(|o| [flag, o]))
.flatten();
Vector::from_iter(iter)
}
}
impl Encoder for CvEncoder {
type Input<'a>
= MatRef<'a>
where
Self: 'a;
fn encode(&self, input: Self::Input<'_>) -> Result<Vec<u8>, NdCvError> {
let mut buf = Vector::default();
let params = self.to_cv_param_list();
imencode(self.extension(), &input.as_ref(), &mut buf, &params).change_context(NdCvError)?;
match self.kind() {
EncKind::Jpeg => {
let bytes = Bytes::from(buf.to_vec());
let mut jpg = Jpeg::from_bytes(bytes).change_context(NdCvError)?;
jpg.remove_segments_by_marker(markers::APP0);
let bytes = jpg.encoder().bytes();
Ok(bytes.to_vec())
}
EncKind::Tiff => Ok(buf.to_vec()),
}
}
}
pub enum CvDecoder {
Jpeg(CvJpegDecFlags),
}
impl CvDecoder {
fn to_cv_decode_flag(&self) -> i32 {
match self {
Self::Jpeg(flags) => flags.to_cv_decode_flag(),
}
}
}
#[derive(Default)]
pub enum ColorMode {
#[default]
Color,
GrayScale,
}
impl ColorMode {
fn to_cv_decode_flag(&self) -> i32 {
match self {
Self::Color => ImreadModes::IMREAD_COLOR as i32,
Self::GrayScale => ImreadModes::IMREAD_GRAYSCALE as i32,
}
}
}
#[derive(Default)]
pub struct CvJpegDecFlags {
color_mode: ColorMode,
ignore_orientation: bool,
}
impl CvJpegDecFlags {
pub fn new() -> Self {
Self::default()
}
pub fn with_color_mode(mut self, color_mode: ColorMode) -> Self {
self.color_mode = color_mode;
self
}
pub fn with_ignore_orientation(mut self, ignore_orientation: bool) -> Self {
self.ignore_orientation = ignore_orientation;
self
}
fn to_cv_decode_flag(&self) -> i32 {
let flag = self.color_mode.to_cv_decode_flag();
if self.ignore_orientation {
flag | ImreadModes::IMREAD_IGNORE_ORIENTATION as i32
} else {
flag
}
}
}
impl Decoder for CvDecoder {
type Output = Mat;
fn decode(&self, input: impl AsRef<[u8]>) -> Result<Self::Output, NdCvError> {
let flag = self.to_cv_decode_flag();
let out = imdecode(&Vector::from_slice(input.as_ref()), flag).change_context(NdCvError)?;
Ok(out)
}
}

View File

@@ -0,0 +1,53 @@
#![deny(warnings)]
use super::codecs::CvDecoder;
use super::error::ErrorReason;
use crate::NdCvError;
use crate::{conversions::NdCvConversion, NdAsImage};
use error_stack::*;
use ndarray::Array;
use std::path::Path;
pub trait Decodable<D: Decoder>: Sized {
fn decode(buf: impl AsRef<[u8]>, decoder: &D) -> Result<Self, NdCvError> {
let output = decoder.decode(buf)?;
Self::transform(output)
}
fn read(&self, path: impl AsRef<Path>, decoder: &D) -> Result<Self, NdCvError> {
let buf = std::fs::read(path)
.map_err(|e| match e.kind() {
std::io::ErrorKind::NotFound => {
Report::new(e).attach_printable(ErrorReason::ImageWriteFileNotFound)
}
std::io::ErrorKind::PermissionDenied => {
Report::new(e).attach_printable(ErrorReason::ImageWritePermissionDenied)
}
std::io::ErrorKind::OutOfMemory => {
Report::new(e).attach_printable(ErrorReason::OutOfMemory)
}
std::io::ErrorKind::StorageFull => {
Report::new(e).attach_printable(ErrorReason::OutOfStorage)
}
_ => Report::new(e).attach_printable(ErrorReason::ImageWriteOtherError),
})
.change_context(NdCvError)?;
Self::decode(buf, decoder)
}
fn transform(input: D::Output) -> Result<Self, NdCvError>;
}
pub trait Decoder {
type Output: Sized;
fn decode(&self, buf: impl AsRef<[u8]>) -> Result<Self::Output, NdCvError>;
}
impl<T: bytemuck::Pod + Copy, D: ndarray::Dimension> Decodable<CvDecoder> for Array<T, D>
where
Self: NdAsImage<T, D>,
{
fn transform(input: <CvDecoder as Decoder>::Output) -> Result<Self, NdCvError> {
Self::from_mat(input)
}
}

View File

@@ -0,0 +1,56 @@
use super::codecs::CvEncoder;
use super::error::ErrorReason;
use crate::conversions::NdAsImage;
use crate::NdCvError;
use error_stack::*;
use ndarray::ArrayBase;
use std::path::Path;
pub trait Encodable<E: Encoder> {
fn encode(&self, encoder: &E) -> Result<Vec<u8>, NdCvError> {
let input = self.transform()?;
encoder.encode(input)
}
fn write(&self, path: impl AsRef<Path>, encoder: &E) -> Result<(), NdCvError> {
let buf = self.encode(encoder)?;
std::fs::write(path, buf)
.map_err(|e| match e.kind() {
std::io::ErrorKind::NotFound => {
Report::new(e).attach_printable(ErrorReason::ImageWriteFileNotFound)
}
std::io::ErrorKind::PermissionDenied => {
Report::new(e).attach_printable(ErrorReason::ImageWritePermissionDenied)
}
std::io::ErrorKind::OutOfMemory => {
Report::new(e).attach_printable(ErrorReason::OutOfMemory)
}
std::io::ErrorKind::StorageFull => {
Report::new(e).attach_printable(ErrorReason::OutOfStorage)
}
_ => Report::new(e).attach_printable(ErrorReason::ImageWriteOtherError),
})
.change_context(NdCvError)
}
fn transform(&self) -> Result<<E as Encoder>::Input<'_>, NdCvError>;
}
pub trait Encoder {
type Input<'a>
where
Self: 'a;
fn encode(&self, input: Self::Input<'_>) -> Result<Vec<u8>, NdCvError>;
}
impl<T: bytemuck::Pod + Copy, S: ndarray::Data<Elem = T>, D: ndarray::Dimension>
Encodable<CvEncoder> for ArrayBase<S, D>
where
Self: NdAsImage<T, D>,
{
fn transform(&self) -> Result<<CvEncoder as Encoder>::Input<'_>, NdCvError> {
self.as_image_mat()
}
}

View File

@@ -0,0 +1,19 @@
#[derive(Debug)]
pub enum ErrorReason {
ImageReadFileNotFound,
ImageReadPermissionDenied,
ImageReadOtherError,
ImageWriteFileNotFound,
ImageWritePermissionDenied,
ImageWriteOtherError,
OutOfMemory,
OutOfStorage,
}
impl std::fmt::Display for ErrorReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}

View File

@@ -0,0 +1,88 @@
//! Colorspace conversion functions
//! ## Example
//! ```rust
//! let arr = Array3::<u8>::ones((100, 100, 3));
//! let out: Array3<u8> = arr.cvt::<Rgba<u8>, Rgb<u8>>()
//! ```
use crate::prelude_::*;
use ndarray::*;
pub trait ColorSpace {
type Elem: seal::Sealed;
type Dim: ndarray::Dimension;
const CHANNELS: usize;
}
mod seal {
pub trait Sealed: bytemuck::Pod {}
// impl<T> Sealed for T {}
impl Sealed for u8 {} // 0 to 255
impl Sealed for u16 {} // 0 to 65535
impl Sealed for f32 {} // 0 to 1
}
macro_rules! define_color_space {
($name:ident, $channels:expr, $depth:ty) => {
pub struct $name<T> {
__phantom: core::marker::PhantomData<T>,
}
impl<T: seal::Sealed> ColorSpace for $name<T> {
type Elem = T;
type Dim = $depth;
const CHANNELS: usize = $channels;
}
};
}
define_color_space!(Rgb, 3, Ix3);
define_color_space!(Bgr, 3, Ix3);
define_color_space!(Rgba, 4, Ix3);
pub trait NdArray<T, D: ndarray::Dimension> {}
impl<T, D: ndarray::Dimension, S: ndarray::Data<Elem = T>> NdArray<S, D> for ArrayBase<S, D> {}
pub trait ConvertColor<T, U>
where
T: ColorSpace,
U: ColorSpace,
Self: NdArray<T::Elem, T::Dim>,
{
type Output: NdArray<U::Elem, U::Dim>;
fn cvt(&self) -> Self::Output;
}
// impl<T: seal::Sealed, S: ndarray::Data<Elem = T>> ConvertColor<Rgb<T>, Bgr<T>> for ArrayBase<S, Ix3>
// where
// Self: NdArray<T, Ix3>,
// {
// type Output = ArrayView3<'a, T>;
// fn cvt(&self) -> CowArray<T, Ix3> {
// self.view().permuted_axes([2, 1, 0]).into()
// }
// }
//
// impl<T: seal::Sealed, S: ndarray::Data<Elem = T>> ConvertColor<Bgr<T>, Rgb<T>> for ArrayBase<S, Ix3>
// where
// Self: NdArray<T, Ix3>,
// {
// type Output = ArrayView3<'a, T>;
// fn cvt(&self) -> CowArray<T, Ix3> {
// self.view().permuted_axes([2, 1, 0]).into()
// }
// }
// impl<T: seal::Sealed + num::One + num::Zero, S: ndarray::Data<Elem = T>>
// ConvertColor<Rgb<T>, Rgba<T>> for ArrayBase<S, Ix3>
// {
// fn cvt(&self) -> CowArray<T, Ix3> {
// let mut out = Array3::<T>::zeros((self.height(), self.width(), 4));
// // Zip::from(&mut out).and(self).for_each(|out, &in_| {
// // out[0] = in_[0];
// // out[1] = in_[1];
// // out[2] = in_[2];
// // out[3] = T::one();
// // });
// out.into()
// }
// }

View File

@@ -0,0 +1,113 @@
use crate::{conversions::MatAsNd, prelude_::*, NdAsImage, NdAsImageMut};
pub(crate) mod seal {
pub trait ConnectedComponentOutput: Sized + Copy + bytemuck::Pod + num::Zero {
fn as_cv_type() -> i32 {
crate::type_depth::<Self>()
}
}
impl ConnectedComponentOutput for i32 {}
impl ConnectedComponentOutput for u16 {}
}
pub trait NdCvConnectedComponents<T> {
fn connected_components<O: seal::ConnectedComponentOutput>(
&self,
connectivity: Connectivity,
) -> Result<ndarray::Array2<O>, NdCvError>;
fn connected_components_with_stats<O: seal::ConnectedComponentOutput>(
&self,
connectivity: Connectivity,
) -> Result<ConnectedComponentStats<O>, NdCvError>;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Connectivity {
Four = 4,
#[default]
Eight = 8,
}
#[derive(Debug, Clone)]
pub struct ConnectedComponentStats<O: seal::ConnectedComponentOutput> {
pub num_labels: i32,
pub labels: ndarray::Array2<O>,
pub stats: ndarray::Array2<i32>,
pub centroids: ndarray::Array2<f64>,
}
// use crate::conversions::NdCvConversionRef;
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> NdCvConnectedComponents<T>
for ndarray::ArrayBase<S, ndarray::Ix2>
where
ndarray::Array2<T>: NdAsImage<T, ndarray::Ix2>,
{
fn connected_components<O: seal::ConnectedComponentOutput>(
&self,
connectivity: Connectivity,
) -> Result<ndarray::Array2<O>, NdCvError> {
let mat = self.as_image_mat()?;
let mut labels = ndarray::Array2::<O>::zeros(self.dim());
let mut cv_labels = labels.as_image_mat_mut()?;
opencv::imgproc::connected_components(
mat.as_ref(),
cv_labels.as_mut(),
connectivity as i32,
O::as_cv_type(),
)
.change_context(NdCvError)?;
Ok(labels)
}
fn connected_components_with_stats<O: seal::ConnectedComponentOutput>(
&self,
connectivity: Connectivity,
) -> Result<ConnectedComponentStats<O>, NdCvError> {
let mut labels = ndarray::Array2::<O>::zeros(self.dim());
let mut stats = opencv::core::Mat::default();
let mut centroids = opencv::core::Mat::default();
let num_labels = opencv::imgproc::connected_components_with_stats(
self.as_image_mat()?.as_ref(),
labels.as_image_mat_mut()?.as_mut(),
&mut stats,
&mut centroids,
connectivity as i32,
O::as_cv_type(),
)
.change_context(NdCvError)?;
let stats = stats.as_ndarray()?.to_owned();
let centroids = centroids.as_ndarray()?.to_owned();
Ok(ConnectedComponentStats {
labels,
stats,
centroids,
num_labels,
})
}
}
#[test]
fn test_connected_components() {
use opencv::core::MatTrait as _;
let mat = opencv::core::Mat::new_nd_with_default(&[10, 10], opencv::core::CV_8UC1, 0.into())
.expect("failed");
let roi1 = opencv::core::Rect::new(2, 2, 2, 2);
let roi2 = opencv::core::Rect::new(6, 6, 3, 3);
let mut mat1 = opencv::core::Mat::roi(&mat, roi1).expect("failed");
mat1.set_scalar(1.into()).expect("failed");
let mut mat2 = opencv::core::Mat::roi(&mat, roi2).expect("failed");
mat2.set_scalar(1.into()).expect("failed");
let array2: ndarray::ArrayView2<u8> = mat.as_ndarray().expect("failed");
let output = array2
.connected_components::<u16>(Connectivity::Four)
.expect("failed");
let expected = {
let mut expected = ndarray::Array2::zeros((10, 10));
expected.slice_mut(ndarray::s![2..4, 2..4]).fill(1);
expected.slice_mut(ndarray::s![6..9, 6..9]).fill(2);
expected
};
assert_eq!(output, expected);
}

266
ndcv-bridge/src/contours.rs Normal file
View File

@@ -0,0 +1,266 @@
//! <https://docs.rs/opencv/latest/opencv/imgproc/fn.find_contours.html>
#![deny(warnings)]
use crate::conversions::*;
use crate::prelude_::*;
use bbox::Point;
use ndarray::*;
#[repr(C)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
pub enum ContourRetrievalMode {
#[default]
External = 0, // RETR_EXTERNAL
List = 1, // RETR_LIST
CComp = 2, // RETR_CCOMP
Tree = 3, // RETR_TREE
FloodFill = 4, // RETR_FLOODFILL
}
#[repr(C)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
pub enum ContourApproximationMethod {
#[default]
None = 1, // CHAIN_APPROX_NONE
Simple = 2, // CHAIN_APPROX_SIMPLE
Tc89L1 = 3, // CHAIN_APPROX_TC89_L1
Tc89Kcos = 4, // CHAIN_APPROX_TC89_KCOS
}
#[derive(Debug, Clone)]
pub struct ContourHierarchy {
pub next: i32,
pub previous: i32,
pub first_child: i32,
pub parent: i32,
}
#[derive(Debug, Clone)]
pub struct ContourResult {
pub contours: Vec<Vec<Point<i32>>>,
pub hierarchy: Vec<ContourHierarchy>,
}
mod seal {
pub trait Sealed {}
impl Sealed for u8 {}
}
pub trait NdCvFindContours<T: bytemuck::Pod + seal::Sealed>:
crate::image::NdImage + crate::conversions::NdAsImage<T, ndarray::Ix2>
{
fn find_contours(
&self,
mode: ContourRetrievalMode,
method: ContourApproximationMethod,
) -> Result<Vec<Vec<Point<i32>>>, NdCvError>;
fn find_contours_with_hierarchy(
&self,
mode: ContourRetrievalMode,
method: ContourApproximationMethod,
) -> Result<ContourResult, NdCvError>;
fn find_contours_def(&self) -> Result<Vec<Vec<Point<i32>>>, NdCvError> {
self.find_contours(
ContourRetrievalMode::External,
ContourApproximationMethod::Simple,
)
}
fn find_contours_with_hierarchy_def(&self) -> Result<ContourResult, NdCvError> {
self.find_contours_with_hierarchy(
ContourRetrievalMode::External,
ContourApproximationMethod::Simple,
)
}
}
pub trait NdCvContourArea<T: bytemuck::Pod> {
fn contours_area(&self, oriented: bool) -> Result<f64, NdCvError>;
fn contours_area_def(&self) -> Result<f64, NdCvError> {
self.contours_area(false)
}
}
impl<T: ndarray::RawData + ndarray::Data<Elem = u8>> NdCvFindContours<u8> for ArrayBase<T, Ix2> {
fn find_contours(
&self,
mode: ContourRetrievalMode,
method: ContourApproximationMethod,
) -> Result<Vec<Vec<Point<i32>>>, NdCvError> {
let cv_self = self.as_image_mat()?;
let mut contours = opencv::core::Vector::<opencv::core::Vector<opencv::core::Point>>::new();
opencv::imgproc::find_contours(
&*cv_self,
&mut contours,
mode as i32,
method as i32,
opencv::core::Point::new(0, 0),
)
.change_context(NdCvError)
.attach_printable("Failed to find contours")?;
let mut result = Vec::new();
for i in 0..contours.len() {
let contour = contours.get(i).change_context(NdCvError)?;
let points: Vec<Point<i32>> = contour.iter().map(|pt| Point::new(pt.x, pt.y)).collect();
result.push(points);
}
Ok(result)
}
fn find_contours_with_hierarchy(
&self,
mode: ContourRetrievalMode,
method: ContourApproximationMethod,
) -> Result<ContourResult, NdCvError> {
let cv_self = self.as_image_mat()?;
let mut contours = opencv::core::Vector::<opencv::core::Vector<opencv::core::Point>>::new();
let mut hierarchy = opencv::core::Vector::<opencv::core::Vec4i>::new();
opencv::imgproc::find_contours_with_hierarchy(
&*cv_self,
&mut contours,
&mut hierarchy,
mode as i32,
method as i32,
opencv::core::Point::new(0, 0),
)
.change_context(NdCvError)
.attach_printable("Failed to find contours with hierarchy")?;
let mut contour_list = Vec::new();
for i in 0..contours.len() {
let contour = contours.get(i).change_context(NdCvError)?;
let points: Vec<Point<i32>> = contour.iter().map(|pt| Point::new(pt.x, pt.y)).collect();
contour_list.push(points);
}
let mut hierarchy_list = Vec::new();
for i in 0..hierarchy.len() {
let h = hierarchy.get(i).change_context(NdCvError)?;
hierarchy_list.push(ContourHierarchy {
next: h[0],
previous: h[1],
first_child: h[2],
parent: h[3],
});
}
Ok(ContourResult {
contours: contour_list,
hierarchy: hierarchy_list,
})
}
}
impl<T> NdCvContourArea<T> for Vec<Point<T>>
where
T: bytemuck::Pod + num::traits::AsPrimitive<i32>,
{
fn contours_area(&self, oriented: bool) -> Result<f64, NdCvError> {
if self.is_empty() {
return Ok(0.0);
}
let mut cv_contour: opencv::core::Vector<opencv::core::Point> = opencv::core::Vector::new();
self.iter().for_each(|point| {
let point = point.cast::<i32>();
cv_contour.push(opencv::core::Point::new(point.x(), point.y()));
});
opencv::imgproc::contour_area(&cv_contour, oriented)
.change_context(NdCvError)
.attach_printable("Failed to calculate contour area")
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array2;
fn simple_binary_rect_image() -> Array2<u8> {
let mut img = Array2::<u8>::zeros((10, 10));
for i in 2..8 {
for j in 3..7 {
img[(i, j)] = 255;
}
}
img
}
#[test]
fn test_find_contours_external_simple() {
let img = simple_binary_rect_image();
let contours = img
.find_contours(
ContourRetrievalMode::External,
ContourApproximationMethod::Simple,
)
.expect("Failed to find contours");
assert_eq!(contours.len(), 1);
assert!(contours[0].len() >= 4);
}
#[test]
fn test_find_contours_with_hierarchy() {
let img = simple_binary_rect_image();
let res = img
.find_contours_with_hierarchy(
ContourRetrievalMode::External,
ContourApproximationMethod::Simple,
)
.expect("Failed to find contours with hierarchy");
assert_eq!(res.contours.len(), 1);
assert_eq!(res.hierarchy.len(), 1);
let h = &res.hierarchy[0];
assert_eq!(h.parent, -1);
assert_eq!(h.first_child, -1);
}
#[test]
fn test_default_methods() {
let img = simple_binary_rect_image();
let contours = img.find_contours_def().unwrap();
let res = img.find_contours_with_hierarchy_def().unwrap();
assert_eq!(contours.len(), 1);
assert_eq!(res.contours.len(), 1);
}
#[test]
fn test_contour_area_calculation() {
let img = simple_binary_rect_image();
let contours = img.find_contours_def().unwrap();
let expected_area = 15.;
let area = contours[0].contours_area_def().unwrap();
assert!(
(area - expected_area).abs() < 1.0,
"Area mismatch: got {area}, expected {expected_area}",
);
}
#[test]
fn test_empty_input_returns_no_contours() {
let img = Array2::<u8>::zeros((10, 10));
let contours = img.find_contours_def().unwrap();
assert!(contours.is_empty());
let res = img.find_contours_with_hierarchy_def().unwrap();
assert!(res.contours.is_empty());
assert!(res.hierarchy.is_empty());
}
#[test]
fn test_contour_area_empty_contour() {
let contour: Vec<Point<i32>> = vec![];
let area = contour.contours_area_def().unwrap();
assert_eq!(area, 0.0);
}
}

View File

@@ -0,0 +1,337 @@
//! Mat <--> ndarray conversion traits
//!
//! Conversion Table
//!
//! | ndarray | Mat |
//! |--------- |----- |
//! | Array<T, Ix1> | Mat(ndims = 1, channels = 1) |
//! | Array<T, Ix2> | Mat(ndims = 2, channels = 1) |
//! | Array<T, Ix2> | Mat(ndims = 1, channels = X) |
//! | Array<T, Ix3> | Mat(ndims = 3, channels = 1) |
//! | Array<T, Ix3> | Mat(ndims = 2, channels = X) |
//! | Array<T, Ix4> | Mat(ndims = 4, channels = 1) |
//! | Array<T, Ix4> | Mat(ndims = 3, channels = X) |
//! | Array<T, Ix5> | Mat(ndims = 5, channels = 1) |
//! | Array<T, Ix5> | Mat(ndims = 4, channels = X) |
//! | Array<T, Ix6> | Mat(ndims = 6, channels = 1) |
//! | Array<T, Ix6> | Mat(ndims = 5, channels = X) |
//!
//! // X is the last dimension
use crate::type_depth;
use crate::NdCvError;
use error_stack::*;
use ndarray::{Ix2, Ix3};
use opencv::core::MatTraitConst;
mod impls;
pub(crate) mod matref;
use matref::{MatRef, MatRefMut};
pub(crate) mod seal {
pub trait SealedInternal {}
impl<T, S: ndarray::Data<Elem = T>, D> SealedInternal for ndarray::ArrayBase<S, D> {}
// impl<T, S: ndarray::DataMut<Elem = T>, D> SealedInternal for ndarray::ArrayBase<S, D> {}
}
pub trait NdCvConversion<T: bytemuck::Pod + Copy, D: ndarray::Dimension>:
seal::SealedInternal + Sized
{
fn to_mat(&self) -> Result<opencv::core::Mat, NdCvError>;
fn from_mat(
mat: opencv::core::Mat,
) -> Result<ndarray::ArrayBase<ndarray::OwnedRepr<T>, D>, NdCvError>;
}
impl<T: bytemuck::Pod + Copy, S: ndarray::Data<Elem = T>, D: ndarray::Dimension>
NdCvConversion<T, D> for ndarray::ArrayBase<S, D>
where
Self: NdAsImage<T, D>,
{
fn to_mat(&self) -> Result<opencv::core::Mat, NdCvError> {
Ok(self.as_image_mat()?.mat.clone())
}
fn from_mat(
mat: opencv::core::Mat,
) -> Result<ndarray::ArrayBase<ndarray::OwnedRepr<T>, D>, NdCvError> {
let ndarray = unsafe { impls::mat_to_ndarray::<T, D>(&mat) }.change_context(NdCvError)?;
Ok(ndarray.to_owned())
}
}
pub trait MatAsNd {
fn as_ndarray<T: bytemuck::Pod, D: ndarray::Dimension>(
&self,
) -> Result<ndarray::ArrayView<T, D>, NdCvError>;
}
impl MatAsNd for opencv::core::Mat {
fn as_ndarray<T: bytemuck::Pod, D: ndarray::Dimension>(
&self,
) -> Result<ndarray::ArrayView<T, D>, NdCvError> {
unsafe { impls::mat_to_ndarray::<T, D>(self) }.change_context(NdCvError)
}
}
pub trait NdAsMat<T: bytemuck::Pod + Copy, D: ndarray::Dimension> {
fn as_single_channel_mat(&self) -> Result<MatRef, NdCvError>;
fn as_multi_channel_mat(&self) -> Result<MatRef, NdCvError>;
}
pub trait NdAsMatMut<T: bytemuck::Pod + Copy, D: ndarray::Dimension>: NdAsMat<T, D> {
fn as_single_channel_mat_mut(&mut self) -> Result<MatRefMut, NdCvError>;
fn as_multi_channel_mat_mut(&mut self) -> Result<MatRefMut, NdCvError>;
}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>, D: ndarray::Dimension> NdAsMat<T, D>
for ndarray::ArrayBase<S, D>
{
fn as_single_channel_mat(&self) -> Result<MatRef, NdCvError> {
let mat = unsafe { impls::ndarray_to_mat_regular(self) }.change_context(NdCvError)?;
Ok(MatRef::new(mat))
}
fn as_multi_channel_mat(&self) -> Result<MatRef, NdCvError> {
let mat = unsafe { impls::ndarray_to_mat_consolidated(self) }.change_context(NdCvError)?;
Ok(MatRef::new(mat))
}
}
impl<T: bytemuck::Pod, S: ndarray::DataMut<Elem = T>, D: ndarray::Dimension> NdAsMatMut<T, D>
for ndarray::ArrayBase<S, D>
{
fn as_single_channel_mat_mut(&mut self) -> Result<MatRefMut, NdCvError> {
let mat = unsafe { impls::ndarray_to_mat_regular(self) }.change_context(NdCvError)?;
Ok(MatRefMut::new(mat))
}
fn as_multi_channel_mat_mut(&mut self) -> Result<MatRefMut, NdCvError> {
let mat = unsafe { impls::ndarray_to_mat_consolidated(self) }.change_context(NdCvError)?;
Ok(MatRefMut::new(mat))
}
}
pub trait NdAsImage<T: bytemuck::Pod, D: ndarray::Dimension> {
fn as_image_mat(&self) -> Result<MatRef, NdCvError>;
}
pub trait NdAsImageMut<T: bytemuck::Pod, D: ndarray::Dimension> {
fn as_image_mat_mut(&mut self) -> Result<MatRefMut, NdCvError>;
}
impl<T, S> NdAsImage<T, Ix2> for ndarray::ArrayBase<S, Ix2>
where
T: bytemuck::Pod + Copy,
S: ndarray::Data<Elem = T>,
{
fn as_image_mat(&self) -> Result<MatRef, NdCvError> {
self.as_single_channel_mat()
}
}
impl<T, S> NdAsImageMut<T, Ix2> for ndarray::ArrayBase<S, Ix2>
where
T: bytemuck::Pod + Copy,
S: ndarray::DataMut<Elem = T>,
{
fn as_image_mat_mut(&mut self) -> Result<MatRefMut, NdCvError> {
self.as_single_channel_mat_mut()
}
}
impl<T, S> NdAsImage<T, Ix3> for ndarray::ArrayBase<S, Ix3>
where
T: bytemuck::Pod + Copy,
S: ndarray::Data<Elem = T>,
{
fn as_image_mat(&self) -> Result<MatRef, NdCvError> {
self.as_multi_channel_mat()
}
}
impl<T, S> NdAsImageMut<T, Ix3> for ndarray::ArrayBase<S, Ix3>
where
T: bytemuck::Pod + Copy,
S: ndarray::DataMut<Elem = T>,
{
fn as_image_mat_mut(&mut self) -> Result<MatRefMut, NdCvError> {
self.as_multi_channel_mat_mut()
}
}
#[test]
fn test_1d_mat_to_ndarray() {
let mat = opencv::core::Mat::new_nd_with_default(
&[10],
opencv::core::CV_MAKE_TYPE(opencv::core::CV_8U, 1),
200.into(),
)
.expect("failed");
let array: ndarray::ArrayView1<u8> = mat.as_ndarray().expect("failed");
array.into_iter().for_each(|&x| assert_eq!(x, 200));
}
#[test]
fn test_2d_mat_to_ndarray() {
let mat = opencv::core::Mat::new_nd_with_default(
&[10],
opencv::core::CV_16SC3,
(200, 200, 200).into(),
)
.expect("failed");
let array2: ndarray::ArrayView2<i16> = mat.as_ndarray().expect("failed");
assert_eq!(array2.shape(), [10, 3]);
array2.into_iter().for_each(|&x| {
assert_eq!(x, 200);
});
}
#[test]
fn test_3d_mat_to_ndarray() {
let mat = opencv::core::Mat::new_nd_with_default(
&[20, 30],
opencv::core::CV_32FC3,
(200, 200, 200).into(),
)
.expect("failed");
let array2: ndarray::ArrayView3<f32> = mat.as_ndarray().expect("failed");
array2.into_iter().for_each(|&x| {
assert_eq!(x, 200f32);
});
}
#[test]
fn test_mat_to_dyn_ndarray() {
let mat = opencv::core::Mat::new_nd_with_default(&[10], opencv::core::CV_8UC1, 200.into())
.expect("failed");
let array2: ndarray::ArrayViewD<u8> = mat.as_ndarray().expect("failed");
array2.into_iter().for_each(|&x| assert_eq!(x, 200));
}
#[test]
fn test_3d_mat_to_ndarray_4k() {
let mat = opencv::core::Mat::new_nd_with_default(
&[4096, 4096],
opencv::core::CV_8UC3,
(255, 0, 255).into(),
)
.expect("failed");
let array2: ndarray::ArrayView3<u8> = (mat).as_ndarray().expect("failed");
array2.exact_chunks((1, 1, 3)).into_iter().for_each(|x| {
assert_eq!(x[(0, 0, 0)], 255);
assert_eq!(x[(0, 0, 1)], 0);
assert_eq!(x[(0, 0, 2)], 255);
});
}
// #[test]
// fn test_3d_mat_to_ndarray_8k() {
// let mat = opencv::core::Mat::new_nd_with_default(
// &[8192, 8192],
// opencv::core::CV_8UC3,
// (255, 0, 255).into(),
// )
// .expect("failed");
// let array2 = ndarray::Array3::<u8>::from_mat(mat).expect("failed");
// array2.exact_chunks((1, 1, 3)).into_iter().for_each(|x| {
// assert_eq!(x[(0, 0, 0)], 255);
// assert_eq!(x[(0, 0, 1)], 0);
// assert_eq!(x[(0, 0, 2)], 255);
// });
// }
#[test]
pub fn test_mat_to_nd_default_strides() {
let mat = opencv::core::Mat::new_rows_cols_with_default(
10,
10,
opencv::core::CV_8UC3,
opencv::core::VecN([10f64, 0.0, 0.0, 0.0]),
)
.expect("failed");
let array = unsafe { impls::mat_to_ndarray::<u8, Ix3>(&mat) }.expect("failed");
assert_eq!(array.shape(), [10, 10, 3]);
assert_eq!(array.strides(), [30, 3, 1]);
assert_eq!(array[(0, 0, 0)], 10);
}
#[test]
pub fn test_mat_to_nd_custom_strides() {
let mat = opencv::core::Mat::new_rows_cols_with_default(
10,
10,
opencv::core::CV_8UC3,
opencv::core::VecN([10f64, 0.0, 0.0, 0.0]),
)
.unwrap();
let mat_roi = opencv::core::Mat::roi(&mat, opencv::core::Rect::new(3, 2, 3, 5))
.expect("failed to get roi");
let array = unsafe { impls::mat_to_ndarray::<u8, Ix3>(&mat_roi) }.expect("failed");
assert_eq!(array.shape(), [5, 3, 3]);
assert_eq!(array.strides(), [30, 3, 1]);
assert_eq!(array[(0, 0, 0)], 10);
}
#[test]
pub fn test_non_continuous_3d() {
let array = ndarray::Array3::<f32>::from_shape_fn((10, 10, 4), |(i, j, k)| {
((i + 1) * (j + 1) * (k + 1)) as f32
});
let slice = array.slice(ndarray::s![3..7, 3..7, 0..4]);
let mat = unsafe { impls::ndarray_to_mat_consolidated(&slice) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, Ix3>(&mat).unwrap() };
assert!(slice == arr);
}
#[test]
pub fn test_5d_array() {
let array = ndarray::Array5::<f32>::ones((1, 2, 3, 4, 5));
let mat = unsafe { impls::ndarray_to_mat_consolidated(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix5>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
pub fn test_3d_array() {
let array = ndarray::Array3::<f32>::ones((23, 31, 33));
let mat = unsafe { impls::ndarray_to_mat_consolidated(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix3>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
pub fn test_2d_array() {
let array = ndarray::Array2::<f32>::ones((23, 31));
let mat = unsafe { impls::ndarray_to_mat_consolidated(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix2>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
#[should_panic]
pub fn test_1d_array_consolidated() {
let array = ndarray::Array1::<f32>::ones(23);
let mat = unsafe { impls::ndarray_to_mat_consolidated(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix1>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
pub fn test_1d_array_regular() {
let array = ndarray::Array1::<f32>::ones(23);
let mat = unsafe { impls::ndarray_to_mat_regular(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix1>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
pub fn test_2d_array_regular() {
let array = ndarray::Array2::<f32>::ones((23, 31));
let mat = unsafe { impls::ndarray_to_mat_regular(&array) }.unwrap();
let arr = unsafe { impls::mat_to_ndarray::<f32, ndarray::Ix2>(&mat).unwrap() };
assert_eq!(array, arr);
}
#[test]
pub fn test_ndcv_1024_1024_to_mat() {
let array = ndarray::Array2::<f32>::ones((1024, 1024));
let _mat = array.to_mat().unwrap();
}

View File

@@ -0,0 +1,168 @@
use super::*;
use core::ffi::*;
use opencv::core::prelude::*;
pub(crate) unsafe fn ndarray_to_mat_regular<
T,
S: ndarray::Data<Elem = T>,
D: ndarray::Dimension,
>(
input: &ndarray::ArrayBase<S, D>,
) -> Result<opencv::core::Mat, NdCvError> {
let shape = input.shape();
let strides = input.strides();
// let channels = shape.last().copied().unwrap_or(1);
// if channels > opencv::core::CV_CN_MAX as usize {
// Err(Report::new(NdCvError).attach_printable(format!(
// "Number of channels({channels}) exceeds CV_CN_MAX({}) use the regular version of the function", opencv::core::CV_CN_MAX
// )))?;
// }
// let size_len = shape.len();
let size = shape.iter().copied().map(|f| f as i32).collect::<Vec<_>>();
// Step len for ndarray is always 1 less than ndims
let step_len = strides.len() - 1;
let step = strides
.iter()
.take(step_len)
.copied()
.map(|f| f as usize * core::mem::size_of::<T>())
.collect::<Vec<_>>();
let data_ptr = input.as_ptr() as *const c_void;
let typ = opencv::core::CV_MAKETYPE(type_depth::<T>(), 1);
let mat = opencv::core::Mat::new_nd_with_data_unsafe(
size.as_slice(),
typ,
data_ptr.cast_mut(),
Some(step.as_slice()),
)
.change_context(NdCvError)?;
Ok(mat)
}
pub(crate) unsafe fn ndarray_to_mat_consolidated<
T,
S: ndarray::Data<Elem = T>,
D: ndarray::Dimension,
>(
input: &ndarray::ArrayBase<S, D>,
) -> Result<opencv::core::Mat, NdCvError> {
let shape = input.shape();
let strides = input.strides();
let channels = shape.last().copied().unwrap_or(1);
if channels > opencv::core::CV_CN_MAX as usize {
Err(Report::new(NdCvError).attach_printable(format!(
"Number of channels({channels}) exceeds CV_CN_MAX({}) use the regular version of the function", opencv::core::CV_CN_MAX
)))?;
}
if shape.len() > 2 {
// Basically the second last stride is used to jump from one column to next
// But opencv only keeps ndims - 1 strides so we can't have the column stride as that
// will be lost
if shape.last() != strides.get(strides.len() - 2).map(|x| *x as usize).as_ref() {
Err(Report::new(NdCvError).attach_printable(
"You cannot slice into the last axis in ndarray when converting to mat",
))?;
}
} else if shape.len() == 1 {
return Err(Report::new(NdCvError).attach_printable(
"You cannot convert a 1D array to a Mat while using the consolidated version",
));
}
// Since this is the consolidated version we should always only have ndims - 1 sizes and
// ndims - 2 strides
let size_len = shape.len() - 1; // Since we move last axis into the channel
let size = shape
.iter()
.take(size_len)
.map(|f| *f as i32)
.collect::<Vec<_>>();
let step_len = strides.len() - 1;
let step = strides
.iter()
.take(step_len)
.map(|f| *f as usize * core::mem::size_of::<T>())
.collect::<Vec<_>>();
let data_ptr = input.as_ptr() as *const c_void;
let typ = opencv::core::CV_MAKETYPE(type_depth::<T>(), channels as i32);
let mat = opencv::core::Mat::new_nd_with_data_unsafe(
size.as_slice(),
typ,
data_ptr.cast_mut(),
Some(step.as_slice()),
)
.change_context(NdCvError)?;
Ok(mat)
}
pub(crate) unsafe fn mat_to_ndarray<T: bytemuck::Pod, D: ndarray::Dimension>(
mat: &opencv::core::Mat,
) -> Result<ndarray::ArrayView<'_, T, D>, NdCvError> {
let depth = mat.depth();
if type_depth::<T>() != depth {
return Err(Report::new(NdCvError).attach_printable(format!(
"Expected type Mat<{}> ({}), got Mat<{}> ({})",
std::any::type_name::<T>(),
type_depth::<T>(),
crate::depth_type(depth),
depth,
)));
}
// Since a dims always returns >= 2 we can't use this to check if it's a 1D array
// So we compare the first axis to the total to see if its a 1D array
let is_1d = mat.total() as i32 == mat.rows();
let dims = is_1d.then_some(1).unwrap_or(mat.dims());
let channels = mat.channels();
let ndarray_size = (channels != 1).then_some(dims + 1).unwrap_or(dims) as usize;
if let Some(ndim) = D::NDIM {
// When channels is not 1,
// the last dimension is the channels
// Array1 -> Mat(ndims = 1, channels = 1)
// Array2 -> Mat(ndims = 1, channels = X)
// Array2 -> Mat(ndims = 2, channels = 1)
// Array3 -> Mat(ndims = 2, channels = X)
// Array3 -> Mat(ndims = 3, channels = 1)
// ...
if ndim != dims as usize && channels == 1 {
return Err(Report::new(NdCvError)
.attach_printable(format!("Expected {}D array, got {}D", ndim, ndarray_size)));
}
}
let mat_size = mat.mat_size();
let sizes = (0..dims)
.map(|i| mat_size.get(i).change_context(NdCvError))
.chain([Ok(channels)])
.map(|x| x.map(|x| x as usize))
.take(ndarray_size)
.collect::<Result<Vec<_>, NdCvError>>()
.change_context(NdCvError)?;
let strides = (0..(dims - 1))
.map(|i| mat.step1(i).change_context(NdCvError))
.chain([
Ok(channels as usize),
Ok((channels == 1).then_some(0).unwrap_or(1)),
])
.take(ndarray_size)
.collect::<Result<Vec<_>, NdCvError>>()
.change_context(NdCvError)?;
use ndarray::ShapeBuilder;
let shape = sizes.strides(strides);
let raw_array = ndarray::RawArrayView::from_shape_ptr(shape, mat.data() as *const T)
.into_dimensionality()
.change_context(NdCvError)?;
Ok(unsafe { raw_array.deref_into_view() })
}

View File

@@ -0,0 +1,73 @@
#[derive(Debug, Clone)]
pub struct MatRef<'a> {
pub(crate) mat: opencv::core::Mat,
pub(crate) _marker: core::marker::PhantomData<&'a ()>,
}
impl MatRef<'_> {
pub fn clone_pointee(&self) -> opencv::core::Mat {
self.mat.clone()
}
}
impl MatRef<'_> {
pub fn new<'a>(mat: opencv::core::Mat) -> MatRef<'a> {
MatRef {
mat,
_marker: core::marker::PhantomData,
}
}
}
impl AsRef<opencv::core::Mat> for MatRef<'_> {
fn as_ref(&self) -> &opencv::core::Mat {
&self.mat
}
}
impl AsRef<opencv::core::Mat> for MatRefMut<'_> {
fn as_ref(&self) -> &opencv::core::Mat {
&self.mat
}
}
impl AsMut<opencv::core::Mat> for MatRefMut<'_> {
fn as_mut(&mut self) -> &mut opencv::core::Mat {
&mut self.mat
}
}
#[derive(Debug, Clone)]
pub struct MatRefMut<'a> {
pub(crate) mat: opencv::core::Mat,
pub(crate) _marker: core::marker::PhantomData<&'a mut ()>,
}
impl MatRefMut<'_> {
pub fn new<'a>(mat: opencv::core::Mat) -> MatRefMut<'a> {
MatRefMut {
mat,
_marker: core::marker::PhantomData,
}
}
}
impl core::ops::Deref for MatRef<'_> {
type Target = opencv::core::Mat;
fn deref(&self) -> &Self::Target {
&self.mat
}
}
impl core::ops::Deref for MatRefMut<'_> {
type Target = opencv::core::Mat;
fn deref(&self) -> &Self::Target {
&self.mat
}
}
impl core::ops::DerefMut for MatRefMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.mat
}
}

262
ndcv-bridge/src/fir.rs Normal file
View File

@@ -0,0 +1,262 @@
use error_stack::*;
use fast_image_resize::*;
use images::{Image, ImageRef};
#[derive(Debug, Clone, thiserror::Error)]
#[error("NdFirError")]
pub struct NdFirError;
type Result<T, E = Report<NdFirError>> = std::result::Result<T, E>;
pub trait NdAsImage<T: seal::Sealed, D: ndarray::Dimension>: Sized {
fn as_image_ref(&self) -> Result<ImageRef<'_>>;
}
pub trait NdAsImageMut<T: seal::Sealed, D: ndarray::Dimension>: Sized {
fn as_image_ref_mut(&mut self) -> Result<Image<'_>>;
}
pub struct NdarrayImageContainer<'a, T: seal::Sealed, D: ndarray::Dimension> {
#[allow(dead_code)]
data: ndarray::ArrayView<'a, T, D>,
pub _phantom: std::marker::PhantomData<(T, D)>,
}
impl<'a, T: seal::Sealed> NdarrayImageContainer<'a, T, ndarray::Ix3> {
pub fn new<S: ndarray::Data<Elem = T>>(array: &'a ndarray::ArrayBase<S, ndarray::Ix3>) -> Self {
Self {
data: array.view(),
_phantom: std::marker::PhantomData,
}
}
}
impl<'a, T: seal::Sealed> NdarrayImageContainer<'a, T, ndarray::Ix2> {
pub fn new<S: ndarray::Data<Elem = T>>(array: &'a ndarray::ArrayBase<S, ndarray::Ix2>) -> Self {
Self {
data: array.view(),
_phantom: std::marker::PhantomData,
}
}
}
pub struct NdarrayImageContainerMut<'a, T: seal::Sealed, D: ndarray::Dimension> {
#[allow(dead_code)]
data: ndarray::ArrayViewMut<'a, T, D>,
}
impl<'a, T: seal::Sealed> NdarrayImageContainerMut<'a, T, ndarray::Ix3> {
pub fn new<S: ndarray::DataMut<Elem = T>>(
array: &'a mut ndarray::ArrayBase<S, ndarray::Ix3>,
) -> Self {
Self {
data: array.view_mut(),
}
}
}
impl<'a, T: seal::Sealed> NdarrayImageContainerMut<'a, T, ndarray::Ix2> {
pub fn new<S: ndarray::DataMut<Elem = T>>(
array: &'a mut ndarray::ArrayBase<S, ndarray::Ix2>,
) -> Self {
Self {
data: array.view_mut(),
}
}
}
pub struct NdarrayImageContainerTyped<'a, T: seal::Sealed, D: ndarray::Dimension, P: PixelTrait> {
#[allow(dead_code)]
data: ndarray::ArrayView<'a, T, D>,
__marker: std::marker::PhantomData<P>,
}
// unsafe impl<'a, T: seal::Sealed + Sync + InnerPixel, P: PixelTrait> ImageView
// for NdarrayImageContainerTyped<'a, T, ndarray::Ix3, P>
// where
// T: bytemuck::Pod,
// {
// type Pixel = P;
// fn width(&self) -> u32 {
// self.data.shape()[1] as u32
// }
// fn height(&self) -> u32 {
// self.data.shape()[0] as u32
// }
// fn iter_rows(&self, start_row: u32) -> impl Iterator<Item = &[Self::Pixel]> {
// self.data
// .rows()
// .into_iter()
// .skip(start_row as usize)
// .map(|row| {
// row.as_slice()
// .unwrap_or_default()
// .chunks_exact(P::CHANNELS as usize)
// })
// }
// }
// impl<'a, T: fast_image_resize::pixels::InnerPixel + seal::Sealed, D: ndarray::Dimension>
// fast_image_resize::IntoImageView for NdarrayImageContainer<'a, T, D>
// {
// fn pixel_type(&self) -> Option<PixelType> {
// match D::NDIM {
// Some(2) => Some(to_pixel_type::<T>(1).expect("Failed to convert to pixel type")),
// Some(3) => Some(
// to_pixel_type::<T>(self.data.shape()[2]).expect("Failed to convert to pixel type"),
// ),
// _ => None,
// }
// }
// fn width(&self) -> u32 {
// self.data.shape()[1] as u32
// }
// fn height(&self) -> u32 {
// self.data.shape()[0] as u32
// }
// fn image_view<P: PixelTrait>(&'a self) -> Option<NdarrayImageContainerTyped<'a, T, D, P>> {
// Some(NdarrayImageContainerTyped {
// data: self.data.view(),
// __marker: std::marker::PhantomData,
// })
// }
// }
pub fn to_pixel_type<T: seal::Sealed>(u: usize) -> Result<PixelType> {
match (core::any::type_name::<T>(), u) {
("u8", 1) => Ok(PixelType::U8),
("u8", 2) => Ok(PixelType::U8x2),
("u8", 3) => Ok(PixelType::U8x3),
("u8", 4) => Ok(PixelType::U8x4),
("u16", 1) => Ok(PixelType::U16),
("i32", 1) => Ok(PixelType::I32),
("f32", 1) => Ok(PixelType::F32),
("f32", 2) => Ok(PixelType::F32x2),
("f32", 3) => Ok(PixelType::F32x3),
("f32", 4) => Ok(PixelType::F32x4),
_ => Err(Report::new(NdFirError).attach_printable("Unsupported pixel type")),
}
}
mod seal {
pub trait Sealed {}
impl Sealed for u8 {}
impl Sealed for u16 {}
impl Sealed for i32 {}
impl Sealed for f32 {}
}
impl<S: ndarray::Data<Elem = T>, T: seal::Sealed + bytemuck::Pod, D: ndarray::Dimension>
NdAsImage<T, D> for ndarray::ArrayBase<S, D>
{
/// Clones self and makes a new image
fn as_image_ref(&self) -> Result<ImageRef> {
let shape = self.shape();
let rows = *shape
.first()
.ok_or_else(|| Report::new(NdFirError).attach_printable("Failed to get rows"))?
as u32;
let cols = *shape.get(1).unwrap_or(&1) as u32;
let channels = *shape.get(2).unwrap_or(&1);
let data = self
.as_slice()
.ok_or(NdFirError)
.attach_printable("The ndarray is non continuous")?;
let data_bytes: &[u8] = bytemuck::cast_slice(data);
let pixel_type = to_pixel_type::<T>(channels)?;
ImageRef::new(cols, rows, data_bytes, pixel_type)
.change_context(NdFirError)
.attach_printable("Failed to create Image from ndarray")
}
}
impl<S: ndarray::DataMut<Elem = T>, T: seal::Sealed + bytemuck::Pod, D: ndarray::Dimension>
NdAsImageMut<T, D> for ndarray::ArrayBase<S, D>
{
fn as_image_ref_mut(&mut self) -> Result<Image<'_>>
where
S: ndarray::DataMut<Elem = T>,
{
let shape = self.shape();
let rows = *shape
.first()
.ok_or_else(|| Report::new(NdFirError).attach_printable("Failed to get rows"))?
as u32;
let cols = *shape.get(1).unwrap_or(&1) as u32;
let channels = *shape.get(2).unwrap_or(&1);
let data = self
.as_slice_mut()
.ok_or(NdFirError)
.attach_printable("The ndarray is non continuous")?;
let data_bytes: &mut [u8] = bytemuck::cast_slice_mut(data);
let pixel_type = to_pixel_type::<T>(channels)?;
Image::from_slice_u8(cols, rows, data_bytes, pixel_type)
.change_context(NdFirError)
.attach_printable("Failed to create Image from ndarray")
}
}
pub trait NdFir<T, D> {
fn fast_resize<'o>(
&self,
height: usize,
width: usize,
options: impl Into<Option<&'o ResizeOptions>>,
) -> Result<ndarray::Array<T, D>>;
}
impl<T: seal::Sealed + bytemuck::Pod + num::Zero, S: ndarray::Data<Elem = T>> NdFir<T, ndarray::Ix3>
for ndarray::ArrayBase<S, ndarray::Ix3>
{
fn fast_resize<'o>(
&self,
height: usize,
width: usize,
options: impl Into<Option<&'o ResizeOptions>>,
) -> Result<ndarray::Array3<T>> {
let source = self.as_image_ref()?;
let (_height, _width, channels) = self.dim();
let mut dest = ndarray::Array3::<T>::zeros((height, width, channels));
let mut dest_image = dest.as_image_ref_mut()?;
let mut resizer = fast_image_resize::Resizer::default();
resizer
.resize(&source, &mut dest_image, options)
.change_context(NdFirError)?;
Ok(dest)
}
}
impl<T: seal::Sealed + bytemuck::Pod + num::Zero, S: ndarray::Data<Elem = T>> NdFir<T, ndarray::Ix2>
for ndarray::ArrayBase<S, ndarray::Ix2>
{
fn fast_resize<'o>(
&self,
height: usize,
width: usize,
options: impl Into<Option<&'o ResizeOptions>>,
) -> Result<ndarray::Array<T, ndarray::Ix2>> {
let source = self.as_image_ref()?;
let (_height, _width) = self.dim();
let mut dest = ndarray::Array::<T, ndarray::Ix2>::zeros((height, width));
let mut dest_image = dest.as_image_ref_mut()?;
let mut resizer = fast_image_resize::Resizer::default();
resizer
.resize(&source, &mut dest_image, options)
.change_context(NdFirError)?;
Ok(dest)
}
}
#[test]
pub fn test_ndarray_fast_image_resize_u8() {
let source_fhd = ndarray::Array3::<u8>::ones((1920, 1080, 3));
let mut resized_hd = ndarray::Array3::<u8>::zeros((1280, 720, 3));
let mut resizer = fast_image_resize::Resizer::default();
resizer
.resize(
&source_fhd.as_image_ref().unwrap(),
&mut resized_hd.as_image_ref_mut().unwrap(),
None,
)
.unwrap();
assert_eq!(resized_hd.shape(), [1280, 720, 3]);
}

307
ndcv-bridge/src/gaussian.rs Normal file
View File

@@ -0,0 +1,307 @@
//! <https://docs.rs/opencv/latest/opencv/imgproc/fn.gaussian_blur.html>
use crate::conversions::*;
use crate::prelude_::*;
use ndarray::*;
#[repr(C)]
#[derive(Default, Debug, Copy, Clone)]
pub enum BorderType {
#[default]
BorderConstant = 0,
BorderReplicate = 1,
BorderReflect = 2,
BorderWrap = 3,
BorderReflect101 = 4,
BorderTransparent = 5,
BorderIsolated = 16,
}
#[repr(C)]
#[derive(Default, Debug, Copy, Clone)]
pub enum AlgorithmHint {
#[default]
AlgoHintDefault = 0,
AlgoHintAccurate = 1,
AlgoHintApprox = 2,
}
mod seal {
pub trait Sealed {}
// src: input image; the image can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
impl Sealed for u8 {}
impl Sealed for u16 {}
impl Sealed for i16 {}
impl Sealed for f32 {}
impl Sealed for f64 {}
}
pub trait NdCvGaussianBlur<T: bytemuck::Pod + seal::Sealed, D: ndarray::Dimension>:
crate::image::NdImage + crate::conversions::NdAsImage<T, D>
{
fn gaussian_blur(
&self,
kernel_size: (u8, u8),
sigma_x: f64,
sigma_y: f64,
border_type: BorderType,
) -> Result<ndarray::Array<T, D>, NdCvError>;
fn gaussian_blur_def(
&self,
kernel: (u8, u8),
sigma_x: f64,
) -> Result<ndarray::Array<T, D>, NdCvError> {
self.gaussian_blur(kernel, sigma_x, sigma_x, BorderType::BorderConstant)
}
}
impl<
T: bytemuck::Pod + num::Zero + seal::Sealed,
S: ndarray::RawData + ndarray::Data<Elem = T>,
D: ndarray::Dimension,
> NdCvGaussianBlur<T, D> for ArrayBase<S, D>
where
ndarray::ArrayBase<S, D>: crate::image::NdImage + crate::conversions::NdAsImage<T, D>,
ndarray::Array<T, D>: crate::conversions::NdAsImageMut<T, D>,
{
fn gaussian_blur(
&self,
kernel_size: (u8, u8),
sigma_x: f64,
sigma_y: f64,
border_type: BorderType,
) -> Result<ndarray::Array<T, D>, NdCvError> {
let mut dst = ndarray::Array::zeros(self.dim());
let cv_self = self.as_image_mat()?;
let mut cv_dst = dst.as_image_mat_mut()?;
opencv::imgproc::gaussian_blur(
&*cv_self,
&mut *cv_dst,
opencv::core::Size {
width: kernel_size.0 as i32,
height: kernel_size.1 as i32,
},
sigma_x,
sigma_y,
border_type as i32,
)
.change_context(NdCvError)
.attach_printable("Failed to apply gaussian blur")?;
Ok(dst)
}
}
// impl<
// T: bytemuck::Pod + num::Zero + seal::Sealed,
// S: ndarray::RawData + ndarray::Data<Elem = T>,
// > NdCvGaussianBlur<T, Ix3> for ArrayBase<S, Ix3>
// {
// fn gaussian_blur(
// &self,
// kernel_size: (u8, u8),
// sigma_x: f64,
// sigma_y: f64,
// border_type: BorderType,
// ) -> Result<ndarray::Array<T, Ix3>, NdCvError> {
// let mut dst = ndarray::Array::zeros(self.dim());
// let cv_self = self.as_image_mat()?;
// let mut cv_dst = dst.as_image_mat_mut()?;
// opencv::imgproc::gaussian_blur(
// &*cv_self,
// &mut *cv_dst,
// opencv::core::Size {
// width: kernel_size.0 as i32,
// height: kernel_size.1 as i32,
// },
// sigma_x,
// sigma_y,
// border_type as i32,
// )
// .change_context(NdCvError)
// .attach_printable("Failed to apply gaussian blur")?;
// Ok(dst)
// }
// }
//
// impl<
// T: bytemuck::Pod + num::Zero + seal::Sealed,
// S: ndarray::RawData + ndarray::Data<Elem = T>,
// > NdCvGaussianBlur<T, Ix2> for ArrayBase<S, Ix2>
// {
// fn gaussian_blur(
// &self,
// kernel_size: (u8, u8),
// sigma_x: f64,
// sigma_y: f64,
// border_type: BorderType,
// ) -> Result<ndarray::Array<T, Ix2>, NdCvError> {
// let mut dst = ndarray::Array::zeros(self.dim());
// let cv_self = self.as_image_mat()?;
// let mut cv_dst = dst.as_image_mat_mut()?;
// opencv::imgproc::gaussian_blur(
// &*cv_self,
// &mut *cv_dst,
// opencv::core::Size {
// width: kernel_size.0 as i32,
// height: kernel_size.1 as i32,
// },
// sigma_x,
// sigma_y,
// border_type as i32,
// )
// .change_context(NdCvError)
// .attach_printable("Failed to apply gaussian blur")?;
// Ok(dst)
// }
// }
/// For smaller values it is faster to use the allocated version
/// For example in a 4k f32 image this is about 50% faster than the allocated one
pub trait NdCvGaussianBlurInPlace<T: bytemuck::Pod + seal::Sealed, D: ndarray::Dimension>:
crate::image::NdImage + crate::conversions::NdAsImageMut<T, D>
{
fn gaussian_blur_inplace(
&mut self,
kernel_size: (u8, u8),
sigma_x: f64,
sigma_y: f64,
border_type: BorderType,
) -> Result<&mut Self, NdCvError>;
fn gaussian_blur_def_inplace(
&mut self,
kernel: (u8, u8),
sigma_x: f64,
) -> Result<&mut Self, NdCvError> {
self.gaussian_blur_inplace(kernel, sigma_x, sigma_x, BorderType::BorderConstant)
}
}
impl<
T: bytemuck::Pod + num::Zero + seal::Sealed,
S: ndarray::RawData + ndarray::DataMut<Elem = T>,
D: ndarray::Dimension,
> NdCvGaussianBlurInPlace<T, D> for ArrayBase<S, D>
where
Self: crate::image::NdImage + crate::conversions::NdAsImageMut<T, D>,
{
fn gaussian_blur_inplace(
&mut self,
kernel_size: (u8, u8),
sigma_x: f64,
sigma_y: f64,
border_type: BorderType,
) -> Result<&mut Self, NdCvError> {
let mut cv_self = self.as_image_mat_mut()?;
unsafe {
crate::inplace::op_inplace(&mut *cv_self, |this, out| {
opencv::imgproc::gaussian_blur(
this,
out,
opencv::core::Size {
width: kernel_size.0 as i32,
height: kernel_size.1 as i32,
},
sigma_x,
sigma_y,
border_type as i32,
)
})
}
.change_context(NdCvError)
.attach_printable("Failed to apply gaussian blur")?;
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array3;
#[test]
fn test_gaussian_basic() {
let arr = Array3::<u8>::ones((10, 10, 3));
let kernel_size = (3, 3);
let sigma_x = 0.0;
let sigma_y = 0.0;
let border_type = BorderType::BorderConstant;
let res = arr
.gaussian_blur(kernel_size, sigma_x, sigma_y, border_type)
.unwrap();
assert_eq!(res.shape(), &[10, 10, 3]);
}
#[test]
fn test_gaussian_edge_preservation() {
// Create an image with a sharp edge
let mut arr = Array3::<u8>::zeros((10, 10, 3));
arr.slice_mut(s![..5, .., ..]).fill(255); // Top half white, bottom half black
let res = arr
.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
// Check that the middle row (edge) has intermediate values
let middle_row = res.slice(s![4..6, 5, 0]);
assert!(middle_row.iter().all(|&x| x > 0 && x < 255));
}
#[test]
fn test_gaussian_different_kernel_sizes() {
let arr = Array3::<u8>::ones((20, 20, 3));
// Test different kernel sizes
let kernel_sizes = [(3, 3), (5, 5), (7, 7)];
for &kernel_size in &kernel_sizes {
let res = arr
.gaussian_blur(kernel_size, 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
assert_eq!(res.shape(), &[20, 20, 3]);
}
}
#[test]
fn test_gaussian_different_border_types() {
let mut arr = Array3::<u8>::zeros((10, 10, 3));
arr.slice_mut(s![4..7, 4..7, ..]).fill(255); // White square in center
let border_types = [
BorderType::BorderConstant,
BorderType::BorderReplicate,
BorderType::BorderReflect,
BorderType::BorderReflect101,
];
for border_type in border_types {
let res = arr.gaussian_blur((3, 3), 1.0, 1.0, border_type).unwrap();
assert_eq!(res.shape(), &[10, 10, 3]);
}
}
#[test]
fn test_gaussian_different_types() {
// Test with different numeric types
let arr_u8 = Array3::<u8>::ones((10, 10, 3));
let arr_f32 = Array3::<f32>::ones((10, 10, 3));
let res_u8 = arr_u8
.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
let res_f32 = arr_f32
.gaussian_blur((3, 3), 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
assert_eq!(res_u8.shape(), &[10, 10, 3]);
assert_eq!(res_f32.shape(), &[10, 10, 3]);
}
#[test]
#[should_panic]
fn test_gaussian_invalid_kernel_size() {
let arr = Array3::<u8>::ones((10, 10, 3));
// Even kernel sizes should fail
let _ = arr
.gaussian_blur((2, 2), 1.0, 1.0, BorderType::BorderConstant)
.unwrap();
}
}

30
ndcv-bridge/src/image.rs Normal file
View File

@@ -0,0 +1,30 @@
use ndarray::*;
pub trait NdImage {
fn width(&self) -> usize;
fn height(&self) -> usize;
fn channels(&self) -> usize;
}
impl<T, S: RawData<Elem = T>> NdImage for ArrayBase<S, Ix3> {
fn width(&self) -> usize {
self.dim().1
}
fn height(&self) -> usize {
self.dim().0
}
fn channels(&self) -> usize {
self.dim().2
}
}
impl<T, S: RawData<Elem = T>> NdImage for ArrayBase<S, Ix2> {
fn width(&self) -> usize {
self.dim().1
}
fn height(&self) -> usize {
self.dim().0
}
fn channels(&self) -> usize {
1
}
}

View File

@@ -0,0 +1,14 @@
use opencv::core::Mat;
use opencv::prelude::*;
use opencv::Result;
#[inline(always)]
pub(crate) unsafe fn op_inplace<T>(
m: &mut Mat,
f: impl FnOnce(&Mat, &mut Mat) -> Result<T>,
) -> Result<T> {
let mut m_alias = Mat::from_raw(m.as_raw_mut());
let out = f(m, &mut m_alias);
let _ = m_alias.into_raw();
out
}

83
ndcv-bridge/src/lib.rs Normal file
View File

@@ -0,0 +1,83 @@
//! Methods and type conversions for ndarray to opencv and vice versa
mod blend;
// mod dilate;
pub mod fir;
mod image;
mod inplace;
pub mod percentile;
mod roi;
#[cfg(feature = "opencv")]
pub mod bounding_rect;
// #[cfg(feature = "opencv")]
// pub mod color_space;
#[cfg(feature = "opencv")]
pub mod connected_components;
#[cfg(feature = "opencv")]
pub mod contours;
#[cfg(feature = "opencv")]
pub mod conversions;
#[cfg(feature = "opencv")]
pub mod gaussian;
#[cfg(feature = "opencv")]
pub mod resize;
pub mod codec;
pub mod orient;
pub use blend::NdBlend;
pub use fast_image_resize::{FilterType, ResizeAlg, ResizeOptions, Resizer};
pub use fir::NdFir;
pub use gaussian::{BorderType, NdCvGaussianBlur, NdCvGaussianBlurInPlace};
pub use roi::{NdRoi, NdRoiMut, NdRoiZeroPadded};
#[cfg(feature = "opencv")]
pub use contours::{
ContourApproximationMethod, ContourHierarchy, ContourResult, ContourRetrievalMode,
NdCvContourArea, NdCvFindContours,
};
#[cfg(feature = "opencv")]
pub use bounding_rect::BoundingRect;
#[cfg(feature = "opencv")]
pub use connected_components::{Connectivity, NdCvConnectedComponents};
#[cfg(feature = "opencv")]
pub use conversions::{MatAsNd, NdAsImage, NdAsImageMut, NdAsMat, NdAsMatMut, NdCvConversion};
#[cfg(feature = "opencv")]
pub use resize::{Interpolation, NdCvResize};
pub(crate) mod prelude_ {
pub use crate::NdCvError;
pub use error_stack::*;
}
#[derive(Debug, thiserror::Error)]
#[error("NdCvError")]
pub struct NdCvError;
#[cfg(feature = "opencv")]
pub fn type_depth<T>() -> i32 {
match std::any::type_name::<T>() {
"u8" => opencv::core::CV_8U,
"i8" => opencv::core::CV_8S,
"u16" => opencv::core::CV_16U,
"i16" => opencv::core::CV_16S,
"i32" => opencv::core::CV_32S,
"f32" => opencv::core::CV_32F,
"f64" => opencv::core::CV_64F,
_ => panic!("Unsupported type"),
}
}
#[cfg(feature = "opencv")]
pub fn depth_type(depth: i32) -> &'static str {
match depth {
opencv::core::CV_8U => "u8",
opencv::core::CV_8S => "i8",
opencv::core::CV_16U => "u16",
opencv::core::CV_16S => "i16",
opencv::core::CV_32S => "i32",
opencv::core::CV_32F => "f32",
opencv::core::CV_64F => "f64",
_ => panic!("Unsupported depth"),
}
}

188
ndcv-bridge/src/orient.rs Normal file
View File

@@ -0,0 +1,188 @@
use ndarray::{Array, ArrayBase, ArrayView};
#[derive(Clone, Copy)]
pub enum Orientation {
NoRotation,
Mirror,
Clock180,
Water,
MirrorClock270,
Clock90,
MirrorClock90,
Clock270,
Unknown,
}
impl Orientation {
pub fn inverse(&self) -> Self {
match self {
Self::Clock90 => Self::Clock270,
Self::Clock270 => Self::Clock90,
_ => *self,
}
}
}
impl Orientation {
pub fn from_raw(flip: u8) -> Self {
match flip {
1 => Orientation::NoRotation,
2 => Orientation::Mirror,
3 => Orientation::Clock180,
4 => Orientation::Water,
5 => Orientation::MirrorClock270,
6 => Orientation::Clock90,
7 => Orientation::MirrorClock90,
8 => Orientation::Clock270,
_ => Orientation::Unknown,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RotationFlag {
Clock90,
Clock180,
Clock270,
}
impl RotationFlag {
pub fn neg(&self) -> Self {
match self {
RotationFlag::Clock90 => RotationFlag::Clock270,
RotationFlag::Clock180 => RotationFlag::Clock180,
RotationFlag::Clock270 => RotationFlag::Clock90,
}
}
pub fn to_orientation(&self) -> Orientation {
match self {
RotationFlag::Clock90 => Orientation::Clock90,
RotationFlag::Clock180 => Orientation::Clock180,
RotationFlag::Clock270 => Orientation::Clock270,
}
}
}
#[derive(Clone, Copy)]
pub enum FlipFlag {
Mirror,
Water,
Both,
}
pub trait Orient<T: bytemuck::Pod, D: ndarray::Dimension> {
fn flip(&self, flip: FlipFlag) -> Array<T, D>;
fn rotate(&self, rotation: RotationFlag) -> Array<T, D>;
fn owned(&self) -> Array<T, D>;
fn unorient(&self, orientation: Orientation) -> Array<T, D>
where
Array<T, D>: Orient<T, D>,
Self: ToOwned<Owned = Array<T, D>>,
{
let inverse_orientation = orientation.inverse();
self.orient(inverse_orientation)
// match orientation {
// Orientation::NoRotation | Orientation::Unknown => self.to_owned(),
// Orientation::Mirror => self.flip(FlipFlag::Mirror).to_owned(),
// Orientation::Clock180 => self.rotate(RotationFlag::Clock180),
// Orientation::Water => self.flip(FlipFlag::Water).to_owned(),
// Orientation::MirrorClock270 => self
// .rotate(RotationFlag::Clock90)
// .flip(FlipFlag::Mirror)
// .to_owned(),
// Orientation::Clock90 => self.rotate(RotationFlag::Clock270),
// Orientation::MirrorClock90 => self
// .rotate(RotationFlag::Clock270)
// .flip(FlipFlag::Mirror)
// .to_owned(),
// Orientation::Clock270 => self.rotate(RotationFlag::Clock90),
// }
}
fn orient(&self, orientation: Orientation) -> Array<T, D>
where
Array<T, D>: Orient<T, D>,
{
match orientation {
Orientation::NoRotation | Orientation::Unknown => self.owned(),
Orientation::Mirror => self.flip(FlipFlag::Mirror).to_owned(),
Orientation::Clock180 => self.rotate(RotationFlag::Clock180),
Orientation::Water => self.flip(FlipFlag::Water).to_owned(),
Orientation::MirrorClock270 => self
.flip(FlipFlag::Mirror)
.rotate(RotationFlag::Clock270)
.to_owned(),
Orientation::Clock90 => self.rotate(RotationFlag::Clock90),
Orientation::MirrorClock90 => self
.flip(FlipFlag::Mirror)
.rotate(RotationFlag::Clock90)
.to_owned(),
Orientation::Clock270 => self.rotate(RotationFlag::Clock270),
}
.as_standard_layout()
.to_owned()
}
}
impl<T: bytemuck::Pod + Copy, S: ndarray::Data<Elem = T>> Orient<T, ndarray::Ix3>
for ArrayBase<S, ndarray::Ix3>
{
fn flip(&self, flip: FlipFlag) -> Array<T, ndarray::Ix3> {
match flip {
FlipFlag::Mirror => self.slice(ndarray::s![.., ..;-1, ..]),
FlipFlag::Water => self.slice(ndarray::s![..;-1, .., ..]),
FlipFlag::Both => self.slice(ndarray::s![..;-1, ..;-1, ..]),
}
.as_standard_layout()
.to_owned()
}
fn owned(&self) -> Array<T, ndarray::Ix3> {
self.to_owned()
}
fn rotate(&self, rotation: RotationFlag) -> Array<T, ndarray::Ix3> {
match rotation {
RotationFlag::Clock90 => self
.view()
.permuted_axes([1, 0, 2])
.flip(FlipFlag::Mirror)
.to_owned(),
RotationFlag::Clock180 => self.flip(FlipFlag::Both).to_owned(),
RotationFlag::Clock270 => self
.view()
.permuted_axes([1, 0, 2])
.flip(FlipFlag::Water)
.to_owned(),
}
}
}
impl<T: bytemuck::Pod + Copy, S: ndarray::Data<Elem = T>> Orient<T, ndarray::Ix2>
for ArrayBase<S, ndarray::Ix2>
{
fn flip(&self, flip: FlipFlag) -> Array<T, ndarray::Ix2> {
match flip {
FlipFlag::Mirror => self.slice(ndarray::s![.., ..;-1,]),
FlipFlag::Water => self.slice(ndarray::s![..;-1, ..,]),
FlipFlag::Both => self.slice(ndarray::s![..;-1, ..;-1,]),
}
.as_standard_layout()
.to_owned()
}
fn owned(&self) -> Array<T, ndarray::Ix2> {
self.to_owned()
}
fn rotate(&self, rotation: RotationFlag) -> Array<T, ndarray::Ix2> {
match rotation {
RotationFlag::Clock90 => self.t().flip(FlipFlag::Mirror).to_owned(),
RotationFlag::Clock180 => self.flip(FlipFlag::Both).to_owned(),
RotationFlag::Clock270 => self.t().flip(FlipFlag::Water).to_owned(),
}
}
}

View File

@@ -0,0 +1,63 @@
use error_stack::*;
use ndarray::{ArrayBase, Ix1};
use num::cast::AsPrimitive;
use crate::NdCvError;
pub trait Percentile {
fn percentile(&self, qth_percentile: f64) -> Result<f64, NdCvError>;
}
impl<T: std::cmp::Ord + Clone + AsPrimitive<f64>, S: ndarray::Data<Elem = T>> Percentile
for ArrayBase<S, Ix1>
{
fn percentile(&self, qth_percentile: f64) -> Result<f64, NdCvError> {
if self.len() == 0 {
return Err(error_stack::Report::new(NdCvError).attach_printable("Empty Input"));
}
if !(0_f64..1_f64).contains(&qth_percentile) {
return Err(error_stack::Report::new(NdCvError)
.attach_printable("Qth percentile must be between 0 and 1"));
}
let mut standard_array = self.as_standard_layout();
let mut raw_data = standard_array
.as_slice_mut()
.expect("An array in standard layout will always return its inner slice");
raw_data.sort();
let actual_index = qth_percentile * (raw_data.len() - 1) as f64;
let lower_index = (actual_index.floor() as usize).clamp(0, raw_data.len() - 1);
let upper_index = (actual_index.ceil() as usize).clamp(0, raw_data.len() - 1);
if lower_index == upper_index {
Ok(raw_data[lower_index].as_())
} else {
let weight = actual_index - lower_index as f64;
Ok(raw_data[lower_index].as_() * (1.0 - weight) + raw_data[upper_index].as_() * weight)
}
}
}
// fn percentile(data: &Array1<f64>, p: f64) -> f64 {
// if data.len() == 0 {
// return 0.0;
// }
//
// let mut sorted_data = data.to_vec();
// sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap());
//
// let index = (p / 100.0) * (sorted_data.len() - 1) as f64;
// let lower = index.floor() as usize;
// let upper = index.ceil() as usize;
//
// if lower == upper {
// sorted_data[lower] as f64
// } else {
// let weight = index - lower as f64;
// sorted_data[lower] as f64 * (1.0 - weight) + sorted_data[upper] as f64 * weight
// }
// }

108
ndcv-bridge/src/resize.rs Normal file
View File

@@ -0,0 +1,108 @@
use crate::{prelude_::*, NdAsImage, NdAsImageMut};
/// Resize ndarray using OpenCV resize functions
pub trait NdCvResize<T, D>: seal::SealedInternal {
/// The input array must be a continuous 2D or 3D ndarray
fn resize(
&self,
height: u16,
width: u16,
interpolation: Interpolation,
) -> Result<ndarray::ArrayBase<ndarray::OwnedRepr<T>, D>, NdCvError>;
}
#[repr(i32)]
#[derive(Debug, Copy, Clone)]
pub enum Interpolation {
Linear = opencv::imgproc::INTER_LINEAR,
LinearExact = opencv::imgproc::INTER_LINEAR_EXACT,
Max = opencv::imgproc::INTER_MAX,
Area = opencv::imgproc::INTER_AREA,
Cubic = opencv::imgproc::INTER_CUBIC,
Nearest = opencv::imgproc::INTER_NEAREST,
NearestExact = opencv::imgproc::INTER_NEAREST_EXACT,
Lanczos4 = opencv::imgproc::INTER_LANCZOS4,
}
mod seal {
pub trait SealedInternal {}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> SealedInternal
for ndarray::ArrayBase<S, ndarray::Ix3>
{
}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> SealedInternal
for ndarray::ArrayBase<S, ndarray::Ix2>
{
}
}
impl<T: bytemuck::Pod + num::Zero, S: ndarray::Data<Elem = T>> NdCvResize<T, ndarray::Ix2>
for ndarray::ArrayBase<S, ndarray::Ix2>
{
fn resize(
&self,
height: u16,
width: u16,
interpolation: Interpolation,
) -> Result<ndarray::Array2<T>, NdCvError> {
let mat = self.as_image_mat()?;
let mut dest = ndarray::Array2::zeros((height.into(), width.into()));
let mut dest_mat = dest.as_image_mat_mut()?;
opencv::imgproc::resize(
mat.as_ref(),
dest_mat.as_mut(),
opencv::core::Size {
height: height.into(),
width: width.into(),
},
0.,
0.,
interpolation as i32,
)
.change_context(NdCvError)?;
Ok(dest)
}
}
impl<T: bytemuck::Pod + num::Zero, S: ndarray::Data<Elem = T>> NdCvResize<T, ndarray::Ix3>
for ndarray::ArrayBase<S, ndarray::Ix3>
{
fn resize(
&self,
height: u16,
width: u16,
interpolation: Interpolation,
) -> Result<ndarray::ArrayBase<ndarray::OwnedRepr<T>, ndarray::Ix3>, NdCvError> {
let mat = self.as_image_mat()?;
let mut dest =
ndarray::Array3::zeros((height.into(), width.into(), self.len_of(ndarray::Axis(2))));
let mut dest_mat = dest.as_image_mat_mut()?;
opencv::imgproc::resize(
mat.as_ref(),
dest_mat.as_mut(),
opencv::core::Size {
height: height.into(),
width: width.into(),
},
0.,
0.,
interpolation as i32,
)
.change_context(NdCvError)?;
Ok(dest)
}
}
#[test]
fn test_resize_simple() {
let foo = ndarray::Array2::<u8>::ones((10, 10));
let foo_resized = foo.resize(15, 20, Interpolation::Linear).unwrap();
assert_eq!(foo_resized, ndarray::Array2::<u8>::ones((15, 20)));
}
#[test]
fn test_resize_3d() {
let foo = ndarray::Array3::<u8>::ones((10, 10, 3));
let foo_resized = foo.resize(15, 20, Interpolation::Linear).unwrap();
assert_eq!(foo_resized, ndarray::Array3::<u8>::ones((15, 20, 3)));
}

200
ndcv-bridge/src/roi.rs Normal file
View File

@@ -0,0 +1,200 @@
pub trait NdRoi<T, D>: seal::Sealed {
fn roi(&self, rect: bbox::BBox<usize>) -> ndarray::ArrayView<T, D>;
}
pub trait NdRoiMut<T, D>: seal::Sealed {
fn roi_mut(&mut self, rect: bbox::BBox<usize>) -> ndarray::ArrayViewMut<T, D>;
}
mod seal {
use ndarray::{Ix2, Ix3};
pub trait Sealed {}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> Sealed for ndarray::ArrayBase<S, Ix2> {}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> Sealed for ndarray::ArrayBase<S, Ix3> {}
}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> NdRoi<T, ndarray::Ix3>
for ndarray::ArrayBase<S, ndarray::Ix3>
{
fn roi(&self, rect: bbox::BBox<usize>) -> ndarray::ArrayView3<T> {
let y1 = rect.y1();
let y2 = rect.y2();
let x1 = rect.x1();
let x2 = rect.x2();
self.slice(ndarray::s![y1..y2, x1..x2, ..])
}
}
impl<T: bytemuck::Pod, S: ndarray::DataMut<Elem = T>> NdRoiMut<T, ndarray::Ix3>
for ndarray::ArrayBase<S, ndarray::Ix3>
{
fn roi_mut(&mut self, rect: bbox::BBox<usize>) -> ndarray::ArrayViewMut3<T> {
let y1 = rect.y1();
let y2 = rect.y2();
let x1 = rect.x1();
let x2 = rect.x2();
self.slice_mut(ndarray::s![y1..y2, x1..x2, ..])
}
}
impl<T: bytemuck::Pod, S: ndarray::Data<Elem = T>> NdRoi<T, ndarray::Ix2>
for ndarray::ArrayBase<S, ndarray::Ix2>
{
fn roi(&self, rect: bbox::BBox<usize>) -> ndarray::ArrayView2<T> {
let y1 = rect.y1();
let y2 = rect.y2();
let x1 = rect.x1();
let x2 = rect.x2();
self.slice(ndarray::s![y1..y2, x1..x2])
}
}
impl<T: bytemuck::Pod, S: ndarray::DataMut<Elem = T>> NdRoiMut<T, ndarray::Ix2>
for ndarray::ArrayBase<S, ndarray::Ix2>
{
fn roi_mut(&mut self, rect: bbox::BBox<usize>) -> ndarray::ArrayViewMut2<T> {
let y1 = rect.y1();
let y2 = rect.y2();
let x1 = rect.x1();
let x2 = rect.x2();
self.slice_mut(ndarray::s![y1..y2, x1..x2])
}
}
#[test]
fn test_roi() {
let arr = ndarray::Array3::<u8>::zeros((10, 10, 3));
let rect = bbox::BBox::new(1, 1, 3, 3);
let roi = arr.roi(rect);
assert_eq!(roi.shape(), &[3, 3, 3]);
}
#[test]
fn test_roi_2d() {
let arr = ndarray::Array2::<u8>::zeros((10, 10));
let rect = bbox::BBox::new(1, 1, 3, 3);
let roi = arr.roi(rect);
assert_eq!(roi.shape(), &[3, 3]);
}
/// ```text
/// ┌──────────────────┐
/// │ padded │
/// │ ┌────────┐ │
/// │ │ │ │
/// │ │original│ │
/// │ │ │ │
/// │ └────────┘ │
/// │ zeroed │
/// └──────────────────┘
/// ```
///
/// Returns the padded bounding box and the padded segment
/// The padded is the padded bounding box
/// The original is the original bounding box
/// Returns the padded bounding box as zeros and the original bbox as the original segment
pub trait NdRoiZeroPadded<T, D: ndarray::Dimension>: seal::Sealed {
fn roi_zero_padded(
&self,
original: bbox::BBox<usize>,
padded: bbox::BBox<usize>,
) -> (bbox::BBox<usize>, ndarray::Array<T, D>);
}
impl<T: bytemuck::Pod + num::Zero> NdRoiZeroPadded<T, ndarray::Ix2> for ndarray::Array2<T> {
fn roi_zero_padded(
&self,
original: bbox::BBox<usize>,
padded: bbox::BBox<usize>,
) -> (bbox::BBox<usize>, ndarray::Array2<T>) {
// The co-ordinates of both the original and the padded bounding boxes must be contained in
// self or it will panic
let self_bbox = bbox::BBox::new(0, 0, self.shape()[1], self.shape()[0]);
if !self_bbox.contains_bbox(original) {
panic!("original bounding box is not contained in self");
}
if !self_bbox.contains_bbox(padded) {
panic!("padded bounding box is not contained in self");
}
let original_roi_in_padded =
original.with_top_left(original.top_left().with_origin(padded.top_left()));
let original_segment = self.roi(original);
let mut padded_segment = padded.zeros_ndarray_2d::<T>();
padded_segment
.roi_mut(original_roi_in_padded)
.assign(&original_segment);
(padded, padded_segment)
}
}
impl<T: bytemuck::Pod + num::Zero> NdRoiZeroPadded<T, ndarray::Ix3> for ndarray::Array3<T> {
fn roi_zero_padded(
&self,
original: bbox::BBox<usize>,
padded: bbox::BBox<usize>,
) -> (bbox::BBox<usize>, ndarray::Array3<T>) {
let self_bbox = bbox::BBox::new(0, 0, self.shape()[1], self.shape()[0]);
if !self_bbox.contains_bbox(original) {
panic!("original bounding box is not contained in self");
}
if !self_bbox.contains_bbox(padded) {
panic!("padded bounding box is not contained in self");
}
let original_roi_in_padded =
original.with_top_left(original.top_left().with_origin(padded.top_left()));
let original_segment = self.roi(original);
let mut padded_segment = padded.zeros_ndarray_3d::<T>(self.len_of(ndarray::Axis(2)));
padded_segment
.roi_mut(original_roi_in_padded)
.assign(&original_segment);
(padded, padded_segment)
}
}
#[test]
fn test_roi_zero_padded() {
let arr = ndarray::Array2::<u8>::ones((10, 10));
let original = bbox::BBox::new(1, 1, 3, 3);
let clamp = bbox::BBox::new(0, 0, 10, 10);
let padded = original.padding(2).clamp_box(clamp);
let (padded, padded_segment) = arr.roi_zero_padded(original.cast(), padded.cast());
assert_eq!(padded, bbox::BBox::new(0, 0, 6, 6));
assert_eq!(padded_segment.shape(), &[6, 6]);
}
#[test]
pub fn bbox_clamp_failure_preload() {
let segment_mask = ndarray::Array2::<u8>::zeros((512, 512));
let og = bbox::BBox::new(475.0, 79.625, 37.0, 282.15);
let clamp = bbox::BBox::new(0.0, 0.0, 512.0, 512.0);
let padded = og.scale(1.2).clamp_box(clamp);
let padded = padded.round();
let (_bbox, _segment) = segment_mask.roi_zero_padded(og.cast(), padded.cast());
}
#[test]
pub fn bbox_clamp_failure_preload_2() {
let segment_mask = ndarray::Array2::<u8>::zeros((512, 512));
let bbox = bbox::BBox::new(354.25, 98.5, 116.25, 413.5);
// let padded = bbox::BBox::new(342.625, 57.150000000000006, 139.5, 454.85);
let clamp = bbox::BBox::new(0.0, 0.0, 512.0, 512.0);
let padded = bbox.scale(1.2).clamp_box(clamp);
let padded = padded.round();
let (_bbox, _segment) = segment_mask.roi_zero_padded(bbox.round().cast(), padded.cast());
}
#[test]
fn test_roi_zero_padded_3d() {
let arr = ndarray::Array3::<u8>::ones((10, 10, 3));
let original = bbox::BBox::new(1, 1, 3, 3);
let clamp = bbox::BBox::new(0, 0, 10, 10);
let padded = original.padding(2).clamp_box(clamp);
let (padded, padded_segment) = arr.roi_zero_padded(original.cast(), padded.cast());
assert_eq!(padded, bbox::BBox::new(0, 0, 6, 6));
assert_eq!(padded_segment.shape(), &[6, 6, 3]);
}

View File

@@ -1,10 +1,12 @@
fn main() -> Result<(), Box<dyn std::error::Error>> { use detector::errors::*;
fn main() -> Result<()> {
// Initialize logging // Initialize logging
tracing_subscriber::fmt() tracing_subscriber::fmt()
.with_env_filter("info") .with_env_filter("info")
.with_thread_ids(true) // .with_thread_ids(true)
.with_thread_names(true) // .with_file(true)
.with_target(false) // .with_thread_names(true)
.with_target(true)
.init(); .init();
// Run the GUI // Run the GUI

View File

@@ -3,6 +3,8 @@ use std::path::PathBuf;
use crate::facedet::{FaceDetectionConfig, FaceDetector, retinaface}; use crate::facedet::{FaceDetectionConfig, FaceDetector, retinaface};
use crate::faceembed::facenet; use crate::faceembed::facenet;
use crate::gui::app::{ComparisonResult, DetectionResult, ExecutorType}; use crate::gui::app::{ComparisonResult, DetectionResult, ExecutorType};
use bounding_box::Aabb2;
use error_stack::ResultExt;
use ndarray_image::ImageToNdarray; use ndarray_image::ImageToNdarray;
const RETINAFACE_MODEL_MNN: &[u8] = include_bytes!("../../models/retinaface.mnn"); const RETINAFACE_MODEL_MNN: &[u8] = include_bytes!("../../models/retinaface.mnn");
@@ -227,42 +229,6 @@ impl FaceDetectionBridge {
// Extract face crops and generate embeddings // Extract face crops and generate embeddings
let mut best_similarity = 0.0f32; let mut best_similarity = 0.0f32;
for bbox1 in &faces1.bbox {
let crop1 = Self::crop_face_from_image(&img1, bbox1)?;
let crop1_array = ndarray::Array::from_shape_vec(
(1, crop1.height() as usize, crop1.width() as usize, 3),
crop1
.pixels()
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
.collect(),
)?;
let embedding1 = embedder
.run_models(crop1_array.view())
.map_err(|e| format!("Embedding generation failed: {}", e))?;
for bbox2 in &faces2.bbox {
let crop2 = Self::crop_face_from_image(&img2, bbox2)?;
let crop2_array = ndarray::Array::from_shape_vec(
(1, crop2.height() as usize, crop2.width() as usize, 3),
crop2
.pixels()
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
.collect(),
)?;
let embedding2 = embedder
.run_models(crop2_array.view())
.map_err(|e| format!("Embedding generation failed: {}", e))?;
let similarity = Self::cosine_similarity(
embedding1.row(0).as_slice().unwrap(),
embedding2.row(0).as_slice().unwrap(),
);
best_similarity = best_similarity.max(similarity);
}
}
(faces1, faces2, best_similarity) (faces1, faces2, best_similarity)
} }
ExecutorType::OnnxCpu => { ExecutorType::OnnxCpu => {
@@ -287,40 +253,14 @@ impl FaceDetectionBridge {
// Extract face crops and generate embeddings // Extract face crops and generate embeddings
let mut best_similarity = 0.0f32; let mut best_similarity = 0.0f32;
for bbox1 in &faces1.bbox { if faces1.bbox.is_empty() || faces2.bbox.is_empty() {
let crop1 = Self::crop_face_from_image(&img1, bbox1)?; return Ok((faces1.bbox.len(), faces2.bbox.len(), 0.0));
let crop1_array = ndarray::Array::from_shape_vec(
(1, crop1.height() as usize, crop1.width() as usize, 3),
crop1
.pixels()
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
.collect(),
)?;
let embedding1 = embedder
.run_models(crop1_array.view())
.map_err(|e| format!("Embedding generation failed: {}", e))?;
for bbox2 in &faces2.bbox {
let crop2 = Self::crop_face_from_image(&img2, bbox2)?;
let crop2_array = ndarray::Array::from_shape_vec(
(1, crop2.height() as usize, crop2.width() as usize, 3),
crop2
.pixels()
.flat_map(|p| [p.0[0], p.0[1], p.0[2]])
.collect(),
)?;
let embedding2 = embedder
.run_models(crop2_array.view())
.map_err(|e| format!("Embedding generation failed: {}", e))?;
let similarity = Self::cosine_similarity(
embedding1.row(0).as_slice().unwrap(),
embedding2.row(0).as_slice().unwrap(),
);
best_similarity = best_similarity.max(similarity);
} }
if faces1.bbox.len() != faces2.bbox.len() {
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Face count mismatch between images",
)));
} }
(faces1, faces2, best_similarity) (faces1, faces2, best_similarity)
@@ -329,39 +269,55 @@ impl FaceDetectionBridge {
Ok((faces1.bbox.len(), faces2.bbox.len(), best_similarity)) Ok((faces1.bbox.len(), faces2.bbox.len(), best_similarity))
} }
fn crop_face_from_image(
img: &image::RgbImage,
bbox: &bounding_box::Aabb2<usize>,
) -> Result<image::RgbImage, Box<dyn std::error::Error + Send + Sync>> {
let min_point = bbox.min_vertex();
let size = bbox.size();
let x = min_point.x as u32;
let y = min_point.y as u32;
let width = size.x as u32;
let height = size.y as u32;
// Ensure crop bounds are within image
let img_width = img.width();
let img_height = img.height();
let crop_x = x.min(img_width.saturating_sub(1));
let crop_y = y.min(img_height.saturating_sub(1));
let crop_width = width.min(img_width - crop_x);
let crop_height = height.min(img_height - crop_y);
Ok(image::imageops::crop_imm(img, crop_x, crop_y, crop_width, crop_height).to_image())
} }
fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 { // for bbox1 in &faces1.bbox {
let dot_product: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum(); // let crop1 = Self::crop_face_from_image(&img1, bbox1)?;
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt(); // let crop1_array = ndarray::Array::from_shape_vec(
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt(); // (1, crop1.height() as usize, crop1.width() as usize, 3),
// crop1
// .pixels()
// .flat_map(|p| [p.0[0], p.0[1], p.0[2]])
// .collect(),
// )?;
if norm_a == 0.0 || norm_b == 0.0 { // let embedding1 = embedder
0.0 // .run_models(crop1_array.view())
} else { // .map_err(|e| format!("Embedding generation failed: {}", e))?;
dot_product / (norm_a * norm_b)
} // for bbox2 in &faces2.bbox {
// let crop2 = Self::crop_face_from_image(&img2, bbox2)?;
// let crop2_array = ndarray::Array::from_shape_vec(
// (1, crop2.height() as usize, crop2.width() as usize, 3),
// crop2
// .pixels()
// .flat_map(|p| [p.0[0], p.0[1], p.0[2]])
// .collect(),
// )?;
// let embedding2 = embedder
// .run_models(crop2_array.view())
// .map_err(|e| format!("Embedding generation failed: {}", e))?;
// let similarity = Self::cosine_similarity(
// embedding1.row(0).as_slice().unwrap(),
// embedding2.row(0).as_slice().unwrap(),
// );
// best_similarity = best_similarity.max(similarity);
// }
// }
use crate::errors::Error;
pub fn compare_faces(
image1: &[Aabb2<usize>],
image2: &[Aabb2<usize>],
) -> Result<f32, error_stack::Report<crate::errors::Error>> {
use error_stack::Report;
if image1.is_empty() || image2.is_empty() {
Err(Report::new(crate::errors::Error))
.change_context(Report::new(crate::errors::Error))
.attach_printable("One or both images have no detected faces")
} }
Ok(0.0)
} }