diff --git a/polystar_cv/config/settings.toml b/polystar_cv/config/settings.toml index c81c609731b41f3cdeaab76d868056e0641be6ef..a0c98c27549d97897da2773e718f14af62f38884 100644 --- a/polystar_cv/config/settings.toml +++ b/polystar_cv/config/settings.toml @@ -1,9 +1,8 @@ [default] -CAMERA_WIDTH = 1920 -CAMERA_HEIGHT = 1080 -CAMERA_HORIZONTAL_FOV = 120 +CAMERA = 'RASPI_V2' MODEL_NAME = 'robots/TRT_ssd_mobilenet_v2_roco.bin' +OBJECTS_DETECTION_MODEL = "ssd_mobilenet_v2_roco_2018_03_29_20200314_015411_TWITCH_TEMP_733_IMGS_29595steps" [development] diff --git a/polystar_cv/polystar/common/dependency_injection.py b/polystar_cv/polystar/common/dependency_injection.py index 88eca3aadbaef5d55dbce6cb2492254d271eb408..843a7bb28e0a3854af893da1186477273c2a6951 100644 --- a/polystar_cv/polystar/common/dependency_injection.py +++ b/polystar_cv/polystar/common/dependency_injection.py @@ -1,13 +1,32 @@ from dataclasses import dataclass -from math import pi +from typing import List -from dynaconf import LazySettings from injector import Injector, Module, multiprovider, provider, singleton +from numpy.core._multiarray_umath import deg2rad +from tensorflow.python import saved_model -from polystar.common.constants import LABEL_MAP_PATH +from polystar.common.communication.print_target_sender import PrintTargetSender +from polystar.common.communication.target_sender_abc import TargetSenderABC +from polystar.common.constants import LABEL_MAP_PATH, MODELS_DIR from polystar.common.models.camera import Camera from polystar.common.models.label_map import LabelMap -from polystar.common.settings import settings +from polystar.common.settings import Settings, settings +from polystar.common.target_pipeline.armors_descriptors.armors_color_descriptor import ArmorsColorDescriptor +from polystar.common.target_pipeline.armors_descriptors.armors_descriptor_abc import ArmorsDescriptorABC +from polystar.common.target_pipeline.detected_objects.detected_objects_factory import DetectedObjectFactory +from polystar.common.target_pipeline.detected_objects.detected_robot import DetectedRobot +from polystar.common.target_pipeline.object_selectors.closest_object_selector import ClosestObjectSelector +from polystar.common.target_pipeline.object_selectors.object_selector_abc import ObjectSelectorABC +from polystar.common.target_pipeline.objects_detectors.objects_detector_abc import ObjectsDetectorABC +from polystar.common.target_pipeline.objects_detectors.tf_model_objects_detector import TFModelObjectsDetector +from polystar.common.target_pipeline.objects_linker.objects_linker_abs import ObjectsLinkerABC +from polystar.common.target_pipeline.objects_linker.simple_objects_linker import SimpleObjectsLinker +from polystar.common.target_pipeline.objects_validators.confidence_object_validator import ConfidenceObjectValidator +from polystar.common.target_pipeline.objects_validators.objects_validator_abc import ObjectsValidatorABC +from polystar.common.target_pipeline.target_factories.ratio_simple_target_factory import RatioSimpleTargetFactory +from polystar.common.target_pipeline.target_factories.target_factory_abc import TargetFactoryABC +from research.robots.armor_color.pipeline import ArmorColorPipeline +from research.robots.armor_color.scripts.benchmark import MeanChannels, RedBlueComparisonClassifier def make_injector() -> Injector: @@ -16,16 +35,61 @@ def make_injector() -> Injector: @dataclass class CommonModule(Module): - settings: LazySettings + settings: Settings @provider @singleton def provide_camera(self) -> Camera: - return Camera( - self.settings.CAMERA_HORIZONTAL_FOV / 180 * pi, self.settings.CAMERA_WIDTH, self.settings.CAMERA_HEIGHT - ) + if settings.CAMERA == "RASPI_V2": + return Camera( + horizontal_fov=deg2rad(62.2), + vertical_fov=deg2rad(48.8), + pixel_size_m=1.12e-6, + focal_m=3.04e-3, + vertical_resolution=720, + horizontal_resolution=1_280, + ) + raise ValueError(f"Camera {settings.CAMERA} not recognized") @multiprovider @singleton def provide_label_map(self) -> LabelMap: return LabelMap.from_file(LABEL_MAP_PATH) + + @provider + @singleton + def provide_objects_detector(self, object_factory: DetectedObjectFactory) -> ObjectsDetectorABC: + if self.settings.is_dev: + tf_model = saved_model.load(str(MODELS_DIR / "robots" / settings.OBJECTS_DETECTION_MODEL / "saved_model")) + return TFModelObjectsDetector(object_factory, tf_model.signatures["serving_default"]) + raise NotImplementedError() + + @multiprovider + @singleton + def provide_armor_descriptors(self) -> List[ArmorsDescriptorABC]: + return [ArmorsColorDescriptor(ArmorColorPipeline.from_pipes([MeanChannels(), RedBlueComparisonClassifier()]))] + + @multiprovider + @singleton + def provide_objects_validators(self) -> List[ObjectsValidatorABC[DetectedRobot]]: + return [ConfidenceObjectValidator(0.6)] + + @provider + @singleton + def provide_object_selector(self) -> ObjectSelectorABC: + return ClosestObjectSelector() + + @provider + @singleton + def provide_target_factory(self, camera: Camera) -> TargetFactoryABC: + return RatioSimpleTargetFactory(camera, 0.3, 0.1) + + @provider + @singleton + def provide_target_sender(self) -> TargetSenderABC: + return PrintTargetSender() + + @provider + @singleton + def provide_objects_linker(self) -> ObjectsLinkerABC: + return SimpleObjectsLinker(min_percentage_intersection=0.8) diff --git a/polystar_cv/polystar/common/models/camera.py b/polystar_cv/polystar/common/models/camera.py index 9e0c28f7c00e079e1d4808057f6353079def7df1..7da33a32b0ee61fa59f89afe27a16200f8aea707 100644 --- a/polystar_cv/polystar/common/models/camera.py +++ b/polystar_cv/polystar/common/models/camera.py @@ -3,7 +3,11 @@ from dataclasses import dataclass @dataclass class Camera: - horizontal_angle: float + horizontal_fov: float + vertical_fov: float - w: int - h: int + pixel_size_m: float + focal_m: float + + vertical_resolution: int + horizontal_resolution: int diff --git a/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py b/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py index 1f8a3917b372eb22ab265813558dba2ddfbad018..8b953049a1c22c3d927c1374452004f43842b1bc 100644 --- a/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py +++ b/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py @@ -1,5 +1,5 @@ from abc import ABC -from math import sin, asin, sqrt, atan2 +from math import atan2, pi, tan from typing import Tuple import numpy as np @@ -10,17 +10,16 @@ from polystar.common.target_pipeline.target_factories.target_factory_abc import class RatioTargetFactoryABC(TargetFactoryABC, ABC): - def __init__(self, camera: Camera, real_width: float, real_height: float): - self._ratio_w = real_width * camera.w // 2 / sin(camera.horizontal_angle) - # self._ratio_h = real_height * camera.h // 2 / sin(camera.vertical_angle) - - self._coef_angle = sin(camera.horizontal_angle) / (camera.w // 2) + def __init__(self, camera: Camera, real_width_m: float, real_height_m: float): + self._vertical_distance_coef = camera.focal_m * real_height_m * camera.pixel_size_m + self._vertical_angle_distance = camera.vertical_resolution / (2 * tan(camera.vertical_fov)) + self._horizontal_angle_distance = camera.horizontal_resolution / (2 * tan(camera.horizontal_fov)) def _calculate_angles(self, obj: DetectedObject, image: np.ndarray) -> Tuple[float, float]: x, y = obj.box.x + obj.box.w // 2 - image.shape[1] // 2, image.shape[0] // 2 - obj.box.y + obj.box.h // 2 - phi = asin(sqrt(x ** 2 + y ** 2) * self._coef_angle) - theta = atan2(y, x) + phi = -atan2(x, self._horizontal_angle_distance) + theta = pi / 2 + atan2(y, self._vertical_angle_distance) return phi, theta def _calculate_distance(self, obj: DetectedObject) -> float: - return self._ratio_w / obj.box.w + return obj.box.h / self._vertical_distance_coef