edge_impulse_runner/types.rs
1//! Common types and parameters used throughout the Edge Impulse Runner.
2//!
3//! This module contains the core data structures that define model configuration,
4//! project information, and performance metrics. These types are used to configure
5//! the model and interpret its outputs.
6
7use serde::Deserialize;
8use serde::Serialize;
9
10/// Enum representing different types of anomaly detection supported by the model
11#[derive(Debug, Clone, Copy, PartialEq, Eq)]
12pub enum RunnerHelloHasAnomaly {
13 None = 0,
14 KMeans = 1,
15 GMM = 2,
16 VisualGMM = 3,
17}
18
19impl From<u32> for RunnerHelloHasAnomaly {
20 fn from(value: u32) -> Self {
21 match value {
22 0 => Self::None,
23 1 => Self::KMeans,
24 2 => Self::GMM,
25 3 => Self::VisualGMM,
26 _ => Self::None,
27 }
28 }
29}
30
31/// Parameters that define a model's configuration and capabilities.
32///
33/// These parameters are received from the model during initialization and describe
34/// the model's input requirements, processing settings, and output characteristics.
35#[derive(Debug, Deserialize, Clone)]
36pub struct ModelParameters {
37 /// Number of axes for motion/positional data (e.g., 3 for xyz accelerometer)
38 pub axis_count: u32,
39 /// Sampling frequency in Hz for time-series data
40 pub frequency: f32,
41 /// Indicates if the model supports anomaly detection
42 #[serde(deserialize_with = "deserialize_anomaly_type")]
43 pub has_anomaly: RunnerHelloHasAnomaly,
44 /// Indicates if the model supports object tracking (0 = no, 1 = yes)
45 #[serde(default)]
46 pub has_object_tracking: bool,
47 /// Number of color channels in input images (1 = grayscale, 3 = RGB)
48 pub image_channel_count: u32,
49 /// Number of consecutive frames required for video input
50 pub image_input_frames: u32,
51 /// Required height of input images in pixels
52 pub image_input_height: u32,
53 /// Required width of input images in pixels
54 pub image_input_width: u32,
55 /// Method used to resize input images ("fit" or "fill")
56 pub image_resize_mode: String,
57 /// Type of inferencing engine (0 = TensorFlow Lite, 1 = TensorFlow.js)
58 pub inferencing_engine: u32,
59 /// Total number of input features expected by the model
60 pub input_features_count: u32,
61 /// Time interval between samples in milliseconds
62 pub interval_ms: f32,
63 /// Number of classification labels
64 pub label_count: u32,
65 /// Vector of classification labels
66 pub labels: Vec<String>,
67 /// Type of model ("classification", "object-detection", etc.)
68 pub model_type: String,
69 /// Type of input sensor (see SensorType enum)
70 pub sensor: i32,
71 /// Size of the processing window for time-series data
72 pub slice_size: u32,
73 /// Vector of thresholds for different types of detections
74 #[serde(default)]
75 pub thresholds: Vec<ModelThreshold>,
76 /// Whether the model supports continuous mode operation
77 pub use_continuous_mode: bool,
78}
79
80impl Default for ModelParameters {
81 fn default() -> Self {
82 Self {
83 axis_count: 0,
84 frequency: 0.0,
85 has_anomaly: RunnerHelloHasAnomaly::None,
86 has_object_tracking: false,
87 image_channel_count: 0,
88 image_input_frames: 1,
89 image_input_height: 0,
90 image_input_width: 0,
91 image_resize_mode: String::from("fit"),
92 inferencing_engine: 0,
93 input_features_count: 0,
94 interval_ms: 0.0,
95 label_count: 0,
96 labels: Vec::new(),
97 model_type: String::from("classification"),
98 sensor: -1,
99 slice_size: 0,
100 thresholds: Vec::new(),
101 use_continuous_mode: false,
102 }
103 }
104}
105
106fn deserialize_anomaly_type<'de, D>(deserializer: D) -> Result<RunnerHelloHasAnomaly, D::Error>
107where
108 D: serde::Deserializer<'de>,
109{
110 let value = u32::deserialize(deserializer)?;
111 Ok(RunnerHelloHasAnomaly::from(value))
112}
113
114#[derive(Debug, Deserialize, Clone)]
115#[serde(tag = "type")]
116pub enum ModelThreshold {
117 #[serde(rename = "object_detection")]
118 ObjectDetection { id: u32, min_score: f32 },
119 #[serde(rename = "anomaly_gmm")]
120 AnomalyGMM { id: u32, min_anomaly_score: f32 },
121 #[serde(rename = "object_tracking")]
122 ObjectTracking {
123 id: u32,
124 keep_grace: u32,
125 max_observations: u32,
126 threshold: f32,
127 },
128 #[serde(rename = "unknown")]
129 Unknown { id: u32, unknown: f32 },
130}
131
132impl Default for ModelThreshold {
133 fn default() -> Self {
134 Self::ObjectDetection {
135 id: 0,
136 min_score: 0.5,
137 }
138 }
139}
140
141/// Information about the Edge Impulse project that created the model.
142///
143/// Contains metadata about the project's origin and version.
144#[derive(Deserialize, Debug)]
145pub struct ProjectInfo {
146 /// Version number of the deployment
147 pub deploy_version: u32,
148 /// Unique project identifier
149 pub id: u32,
150 /// Name of the project
151 pub name: String,
152 /// Username of the project owner
153 pub owner: String,
154}
155
156/// Performance timing information for different processing stages.
157///
158/// Provides detailed timing breakdowns for each step of the inference pipeline,
159/// useful for performance monitoring and optimization.
160#[derive(Deserialize, Debug)]
161pub struct TimingInfo {
162 /// Time spent on digital signal processing (DSP) in microseconds
163 pub dsp: u32,
164 /// Time spent on classification inference in microseconds
165 pub classification: u32,
166 /// Time spent on anomaly detection in microseconds
167 pub anomaly: u32,
168 /// Time spent on JSON serialization/deserialization in microseconds
169 pub json: u32,
170 /// Time spent reading from standard input in microseconds
171 pub stdin: u32,
172}
173
174/// Represents a detected object's location and classification.
175///
176/// Used in object detection models to specify where objects were found
177/// in an image and their classification details.
178#[derive(Debug, Deserialize, Serialize)]
179pub struct BoundingBox {
180 /// Height of the bounding box in pixels
181 pub height: i32,
182 /// Classification label for the detected object
183 pub label: String,
184 /// Confidence score for the detection (0.0 to 1.0)
185 pub value: f32,
186 /// Width of the bounding box in pixels
187 pub width: i32,
188 /// X-coordinate of the top-left corner
189 pub x: i32,
190 /// Y-coordinate of the top-left corner
191 pub y: i32,
192}
193
194/// Represents the normalized results of visual anomaly detection
195pub type VisualAnomalyResult = (f32, f32, f32, Vec<(f32, u32, u32, u32, u32)>);
196
197/// Represents the type of sensor used for data collection.
198///
199/// This enum defines the supported sensor types for Edge Impulse models,
200/// mapping to the numeric values used in the protocol:
201/// - -1 or unknown: Unknown
202/// - 1: Microphone
203/// - 2: Accelerometer
204/// - 3: Camera
205/// - 4: Positional
206#[derive(Debug, Clone, Copy, PartialEq)]
207pub enum SensorType {
208 /// Unknown or unsupported sensor type (-1 or default)
209 Unknown = -1,
210 /// Microphone sensor for audio input (1)
211 Microphone = 1,
212 /// Accelerometer sensor for motion data (2)
213 Accelerometer = 2,
214 /// Camera sensor for image/video input (3)
215 Camera = 3,
216 /// Positional sensor for location/orientation data (4)
217 Positional = 4,
218}
219
220impl From<i32> for SensorType {
221 fn from(value: i32) -> Self {
222 match value {
223 -1 => SensorType::Unknown,
224 1 => SensorType::Microphone,
225 2 => SensorType::Accelerometer,
226 3 => SensorType::Camera,
227 4 => SensorType::Positional,
228 _ => SensorType::Unknown,
229 }
230 }
231}