|
2 | 2 | from collections import namedtuple
|
3 | 3 | from dataclasses import dataclass
|
4 | 4 | from enum import Enum
|
5 |
| -from typing import List, Optional |
| 5 | +from typing import Annotated, List, Literal, Optional, Tuple, Union |
6 | 6 |
|
7 | 7 | from inference_exp.entities import ImageDimensions
|
8 | 8 | from inference_exp.errors import CorruptedModelPackageError
|
9 | 9 | from inference_exp.utils.file_system import read_json, stream_file_lines
|
| 10 | +from pydantic import BaseModel, Field |
10 | 11 |
|
11 | 12 |
|
12 | 13 | def parse_class_names_file(class_names_path: str) -> List[str]:
|
@@ -259,3 +260,178 @@ def parse_class_map_from_environment_file(environment_file_path: str) -> List[st
|
259 | 260 | f"verify its consistency in docs.",
|
260 | 261 | help_url="https://todo",
|
261 | 262 | ) from error
|
| 263 | + |
| 264 | + |
| 265 | +# non_nms_config = { |
| 266 | +# "image_pre_processing": image_pre_processing, |
| 267 | +# "network_input": { |
| 268 | +# "training_input_size": {"height": self.imgsz, "width": self.imgsz}, |
| 269 | +# "dynamic_spatial_size_supported": False, |
| 270 | +# "color_mode": "rgb", |
| 271 | +# "resize_mode": resize_mode, |
| 272 | +# "padding_value": padding_value, |
| 273 | +# "input_channels": 3, |
| 274 | +# "scaling_factor": 255, |
| 275 | +# "normalization": None, |
| 276 | +# }, |
| 277 | +# "post_processing": { |
| 278 | +# "type": "nms", |
| 279 | +# "fused": False, |
| 280 | +# } |
| 281 | +# } |
| 282 | + |
| 283 | + |
| 284 | +class AutoOrient(BaseModel): |
| 285 | + enabled: bool |
| 286 | + |
| 287 | + |
| 288 | +class StaticCrop(BaseModel): |
| 289 | + enabled: bool |
| 290 | + x_min: int |
| 291 | + x_max: int |
| 292 | + y_min: int |
| 293 | + y_max: int |
| 294 | + |
| 295 | + |
| 296 | +class ContrastType(Enum, str): |
| 297 | + ADAPTIVE_EQUALIZATION = "Adaptive Equalization" |
| 298 | + CONTRAST_STRETCHING = "Contrast Stretching" |
| 299 | + HISTOGRAM_EQUALIZATION = "Histogram Equalization" |
| 300 | + |
| 301 | + |
| 302 | +class Contrast(BaseModel): |
| 303 | + enabled: bool |
| 304 | + type: ContrastType |
| 305 | + |
| 306 | + |
| 307 | +class Grayscale(BaseModel): |
| 308 | + enabled: bool |
| 309 | + |
| 310 | + |
| 311 | +class ImagePreProcessing(BaseModel): |
| 312 | + auto_orient: Optional[AutoOrient] = Field(alias="auto-orient", default=None) |
| 313 | + static_crop: Optional[StaticCrop] = Field(alias="static-crop", default=None) |
| 314 | + contrast: Optional[Contrast] = Field(default=None) |
| 315 | + grayscale: Optional[Grayscale] = Field(default=None) |
| 316 | + |
| 317 | + |
| 318 | +class TrainingInputSize(BaseModel): |
| 319 | + height: int |
| 320 | + width: int |
| 321 | + |
| 322 | + |
| 323 | +class DivisiblePadding(BaseModel): |
| 324 | + type: Literal["pad-to-be-divisible"] |
| 325 | + value: int |
| 326 | + |
| 327 | + |
| 328 | +class AnySizePadding(BaseModel): |
| 329 | + type: Literal["any-size"] |
| 330 | + |
| 331 | + |
| 332 | +class ColorMode(Enum, str): |
| 333 | + BGR = "bgr" |
| 334 | + RGB = "rgb" |
| 335 | + |
| 336 | + |
| 337 | +# non_nms_config = { |
| 338 | +# "image_pre_processing": image_pre_processing, |
| 339 | +# "network_input": { |
| 340 | +# "training_input_size": {"height": self.imgsz, "width": self.imgsz}, |
| 341 | +# "dynamic_spatial_size_supported": False, |
| 342 | +# "color_mode": "rgb", |
| 343 | +# "resize_mode": resize_mode, |
| 344 | +# "padding_value": padding_value, |
| 345 | +# "input_channels": 3, |
| 346 | +# "scaling_factor": 255, |
| 347 | +# "normalization": None, |
| 348 | +# }, |
| 349 | +# "post_processing": { |
| 350 | +# "type": "nms", |
| 351 | +# "fused": False, |
| 352 | +# } |
| 353 | +# } |
| 354 | +# |
| 355 | +# "post_processing": { |
| 356 | +# "type": "nms", |
| 357 | +# "fused": True, |
| 358 | +# "nms_parameters": nms_parameters_cleaned, |
| 359 | +# } |
| 360 | +class ResizeMode(Enum, str): |
| 361 | + STRETCH_TO = "Stretch to" |
| 362 | + LETTERBOX = "letterbox" |
| 363 | + CENTER_CROP = "center-crop" |
| 364 | + FIT_LONGER_EDGE = "fit-longer-edge" |
| 365 | + LETTERBOX_REFLECT_EDGES = "letterbox-reflect-edges" |
| 366 | + |
| 367 | + |
| 368 | +Number = Union[int, float] |
| 369 | + |
| 370 | + |
| 371 | +class NetworkInput(BaseModel): |
| 372 | + training_input_size: TrainingInputSize |
| 373 | + dynamic_spatial_size_supported: bool |
| 374 | + dynamic_spatial_size_mode: Optional[Union[DivisiblePadding, AnySizePadding]] = ( |
| 375 | + Field(discriminator="type", default=None) |
| 376 | + ) |
| 377 | + color_mode: ColorMode |
| 378 | + resize_mode: ResizeMode |
| 379 | + padding_value: Optional[int] |
| 380 | + input_channels: int |
| 381 | + scaling_factor: Optional[Number] = Field(default=None) |
| 382 | + normalization: Optional[ |
| 383 | + Tuple[Tuple[Number, Number, Number], Tuple[Number, Number, Number]] |
| 384 | + ] = Field(default=None) |
| 385 | + |
| 386 | + |
| 387 | +class ForwardPassConfiguration(BaseModel): |
| 388 | + max_dynamic_batch_size: int |
| 389 | + |
| 390 | + |
| 391 | +class FusedNMSParameters(BaseModel): |
| 392 | + max_detections: int |
| 393 | + confidence_threshold: float |
| 394 | + iou_threshold: float |
| 395 | + class_agnostic: int |
| 396 | + |
| 397 | + |
| 398 | +class NMSPostProcessing(BaseModel): |
| 399 | + type: Literal["nms"] |
| 400 | + fused: bool |
| 401 | + nms_parameters: Optional[FusedNMSParameters] = Field(default=None) |
| 402 | + |
| 403 | + |
| 404 | +class SigmoidPostProcessing(BaseModel): |
| 405 | + type: Literal["sigmoid"] |
| 406 | + fused: bool |
| 407 | + |
| 408 | + |
| 409 | +class SoftMaxPostProcessing(BaseModel): |
| 410 | + type: Literal["softmax"] |
| 411 | + fused: bool |
| 412 | + |
| 413 | + |
| 414 | +class InferenceConfig(BaseModel): |
| 415 | + image_pre_processing: ImagePreProcessing |
| 416 | + network_input: NetworkInput |
| 417 | + forward_pass: Optional[ForwardPassConfiguration] = Field(default=None) |
| 418 | + post_processing: Optional[ |
| 419 | + Union[NMSPostProcessing, SoftMaxPostProcessing, SigmoidPostProcessing] |
| 420 | + ] = Field(default=None, discriminator="type") |
| 421 | + |
| 422 | + |
| 423 | +def parse_inference_config(config_path: str) -> None: |
| 424 | + try: |
| 425 | + parsed_config = read_json(path=config_path) |
| 426 | + if not isinstance(parsed_config, dict): |
| 427 | + raise ValueError( |
| 428 | + f"Expected config format is dict, found {type(parsed_config)} instead" |
| 429 | + ) |
| 430 | + except (IOError, OSError, ValueError) as error: |
| 431 | + raise CorruptedModelPackageError( |
| 432 | + message=f"Inference config file of the model package is malformed: " |
| 433 | + f"{error}. In case that the package is " |
| 434 | + f"hosted on the Roboflow platform - contact support. If you created model package manually, please " |
| 435 | + f"verify its consistency in docs.", |
| 436 | + help_url="https://todo", |
| 437 | + ) from error |
0 commit comments