edge_impulse_runner/
lib.rs

1//! # Edge Impulse Runner
2//!
3//! A Rust library for running inference with Edge Impulse models and uploading data to
4//! Edge Impulse projects. This crate provides safe and easy-to-use interfaces for:
5//! - Running machine learning models on Linux and macOS
6//! - Uploading training, testing and anomaly data to Edge Impulse projects
7//!
8//! ## Inference Modes
9//!
10//! The crate supports two inference modes:
11//!
12//! ### EIM Mode (Default)
13//! - Run Edge Impulse models (.eim files) using binary communication
14//! - Requires model files to be present on the filesystem
15//! - Compatible with all Edge Impulse deployment targets
16//!
17//! ### FFI Mode
18//! - Direct FFI calls to the Edge Impulse C++ SDK
19//! - Models are compiled into the binary
20//! - Faster startup and inference times
21//! - Requires the `ffi` feature to be enabled
22//!
23//! ## Model Support
24//!
25//! - **Classification models**: Multi-class and binary classification
26//! - **Object detection models**: Bounding box detection with labels
27//! - **Anomaly detection**: Visual and sensor-based anomaly detection
28//! - **Sensor types**: Camera, microphone, accelerometer, positional sensors
29//! - **Continuous mode**: Real-time streaming inference support
30//!
31//! ### Data Ingestion
32//! - Upload data to Edge Impulse projects
33//! - Support for multiple data categories:
34//!   - Training data
35//!   - Testing data
36//!   - Anomaly data
37//! - Handle various file formats:
38//!   - Images (JPG, PNG)
39//!   - Audio (WAV)
40//!   - Video (MP4, AVI)
41//!   - Sensor data (CBOR, JSON, CSV)
42//!
43//! ## Quick Start Examples
44//!
45//! ### EIM Mode (Default)
46//! ```no_run
47//! use edge_impulse_runner::{EdgeImpulseModel, InferenceResult};
48//!
49//! fn main() -> Result<(), Box<dyn std::error::Error>> {
50//!     // Create a new model instance with EIM file
51//!     let mut model = EdgeImpulseModel::new("path/to/model.eim")?;
52//!
53//!     // Prepare normalized features (e.g., image pixels, audio samples)
54//!     let features: Vec<f32> = vec![0.1, 0.2, 0.3];
55//!
56//!     // Run inference
57//!     let result = model.infer(features, None)?;
58//!
59//!     // Process results
60//!     match result.result {
61//!         InferenceResult::Classification { classification } => {
62//!             println!("Classification: {:?}", classification);
63//!         }
64//!         InferenceResult::ObjectDetection {
65//!             bounding_boxes,
66//!             classification,
67//!         } => {
68//!             println!("Detected objects: {:?}", bounding_boxes);
69//!             if !classification.is_empty() {
70//!                 println!("Classification: {:?}", classification);
71//!             }
72//!         }
73//!         InferenceResult::VisualAnomaly {
74//!             visual_anomaly_grid,
75//!             visual_anomaly_max,
76//!             visual_anomaly_mean,
77//!             anomaly,
78//!         } => {
79//!             let (normalized_anomaly, normalized_max, normalized_mean, normalized_regions) =
80//!                 model.normalize_visual_anomaly(
81//!                     anomaly,
82//!                     visual_anomaly_max,
83//!                     visual_anomaly_mean,
84//!                     &visual_anomaly_grid.iter()
85//!                         .map(|bbox| (bbox.value, bbox.x as u32, bbox.y as u32, bbox.width as u32, bbox.height as u32))
86//!                         .collect::<Vec<_>>()
87//!                 );
88//!             println!("Anomaly score: {:.2}%", normalized_anomaly * 100.0);
89//!             println!("Maximum score: {:.2}%", normalized_max * 100.0);
90//!             println!("Mean score: {:.2}%", normalized_mean * 100.0);
91//!             for (value, x, y, w, h) in normalized_regions {
92//!                 println!("Region: score={:.2}%, x={}, y={}, width={}, height={}",
93//!                     value * 100.0, x, y, w, h);
94//!             }
95//!         }
96//!     }
97//!     Ok(())
98//! }
99//! ```
100//!
101//! ### FFI Mode
102//! ```no_run
103//! use edge_impulse_runner::{EdgeImpulseModel, InferenceResult};
104//!
105//! fn main() -> Result<(), Box<dyn std::error::Error>> {
106//!     // Create a new model instance with FFI mode
107//!     let mut model = EdgeImpulseModel::new_ffi(false)?;
108//!
109//!     // Prepare normalized features (e.g., image pixels, audio samples)
110//!     let features: Vec<f32> = vec![0.1, 0.2, 0.3];
111//!
112//!     // Run inference
113//!     let result = model.infer(features, None)?;
114//!
115//!     // Process results (same as EIM mode)
116//!     match result.result {
117//!         InferenceResult::Classification { classification } => {
118//!             println!("Classification: {:?}", classification);
119//!         }
120//!         InferenceResult::ObjectDetection {
121//!             bounding_boxes,
122//!             classification,
123//!         } => {
124//!             println!("Detected objects: {:?}", bounding_boxes);
125//!             if !classification.is_empty() {
126//!                 println!("Classification: {:?}", classification);
127//!             }
128//!         }
129//!         InferenceResult::VisualAnomaly { .. } => {
130//!             println!("Anomaly detection result");
131//!         }
132//!     }
133//!     Ok(())
134//! }
135//! ```
136//!
137//! ### Data Upload
138//! ```no_run
139//! use edge_impulse_runner::ingestion::{Category, Ingestion, UploadOptions};
140//!
141//! # async fn run() -> Result<(), Box<dyn std::error::Error>> {
142//! // Create client with API key
143//! let ingestion = Ingestion::new("your-api-key".to_string());
144//!
145//! // Upload a file
146//! let result = ingestion
147//!     .upload_file(
148//!         "data.jpg",
149//!         Category::Training,
150//!         Some("label".to_string()),
151//!         Some(UploadOptions {
152//!             disallow_duplicates: true,
153//!             add_date_id: true,
154//!         }),
155//!     )
156//!     .await?;
157//! # Ok(())
158//! # }
159//! ```
160//!
161//! ## Architecture
162//!
163//! ### Backend Abstraction
164//! The crate uses a trait-based backend system that allows switching between different
165//! inference modes:
166//!
167//! - **EIM Backend**: Uses Unix socket-based IPC to communicate with Edge Impulse model
168//!   processes. The protocol is JSON-based and follows a request-response pattern for
169//!   model initialization, classification requests, and error handling.
170//!
171//! - **FFI Backend**: Direct FFI calls to the Edge Impulse C++ SDK, providing faster
172//!   startup times and lower latency by eliminating IPC overhead.
173//!
174//! ### Ingestion API
175//! The ingestion module interfaces with the Edge Impulse Ingestion API over HTTPS, supporting
176//! both data and file endpoints for uploading samples to Edge Impulse projects.
177//!
178//! ## Prerequisites
179//!
180//! Some functionality (particularly video capture) requires GStreamer to be installed:
181//! - **macOS**: Install both runtime and development packages from gstreamer.freedesktop.org
182//! - **Linux**: Install required packages (libgstreamer1.0-dev and related packages)
183//!
184//! ## Error Handling
185//!
186//! The crate uses the `EdgeImpulseError` type to provide detailed error information:
187//! ```no_run
188//! use edge_impulse_runner::{EdgeImpulseModel, EdgeImpulseError};
189//!
190//! // Match on model creation
191//! match EdgeImpulseModel::new("model.eim") {
192//!     Ok(mut model) => {
193//!         // Match on classification
194//!         match model.infer(vec![0.1, 0.2, 0.3], None) {
195//!             Ok(result) => println!("Success!"),
196//!             Err(EdgeImpulseError::InvalidInput(msg)) => println!("Invalid input: {}", msg),
197//!             Err(e) => println!("Other error: {}", e),
198//!         }
199//!     },
200//!     Err(e) => println!("Failed to load model: {}", e),
201//! }
202//! ```
203//!
204//! ## Modules
205//!
206//! - `backends`: Backend abstraction and implementations (EIM, FFI)
207//! - `error`: Error types and handling
208//! - `ffi`: Safe Rust bindings for Edge Impulse C++ SDK (when `ffi` feature is enabled)
209//! - `inference`: Model management and inference functionality
210//! - `ingestion`: Data upload and project management
211//! - `types`: Common types and parameters
212//!
213//! ## Cargo Features
214//!
215//! - **`eim`** (default): Enable EIM binary communication mode
216//! - **`ffi`**: Enable FFI direct mode (requires `edge-impulse-ffi-rs` dependency)
217//!
218//! Only one backend should be enabled at a time. Enabling both features simultaneously
219//! is not supported and may cause conflicts.
220
221pub mod backends;
222pub mod error;
223pub mod ffi;
224pub mod inference;
225pub mod ingestion;
226pub mod types;
227
228pub use inference::messages::{InferenceResponse, InferenceResult};
229
230pub use error::EdgeImpulseError;
231pub use inference::model::EdgeImpulseModel;
232pub use types::BoundingBox;
233pub use types::ModelParameters;
234pub use types::ProjectInfo;
235pub use types::SensorType;
236pub use types::TimingInfo;