diff --git a/.gitignore b/.gitignore
index 6ad15ab1ede0cbec7aa83b0963be13732409c647..436330e149a06dc985deda5a362185c2f2db9758 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
 # experiments
 /experiments
 /resources/models
+/pipelines
 
 # TF models
 /models
diff --git a/common/doc/change_venv.png b/common/doc/change_venv.png
deleted file mode 100644
index 3e44e4a2ecb8359fc2a5d18ce2a945f2360aa71a..0000000000000000000000000000000000000000
Binary files a/common/doc/change_venv.png and /dev/null differ
diff --git a/dataset/dji_roco/robomaster_Final Tournament/digits/.changes b/dataset/dji_roco/robomaster_Final Tournament/digits/.changes
index 094f578582ac0df9123e392b0be3aec31d5a695b..9edf650edeb7e75cd18f7d73a48f7889cafe0cf2 100644
Binary files a/dataset/dji_roco/robomaster_Final Tournament/digits/.changes and b/dataset/dji_roco/robomaster_Final Tournament/digits/.changes differ
diff --git a/poetry.lock b/poetry.lock
index deddcd7ff49e9e80124967f26303ac8ffa6db9b9..d853b41c27feb9d0eaed9b23e008abaeed50ccfb 100644
Binary files a/poetry.lock and b/poetry.lock differ
diff --git a/common/README.md b/polystar_cv/README.md
similarity index 100%
rename from common/README.md
rename to polystar_cv/README.md
diff --git a/common/polystar/common/__init__.py b/polystar_cv/__init__.py
similarity index 100%
rename from common/polystar/common/__init__.py
rename to polystar_cv/__init__.py
diff --git a/common/config/settings.toml b/polystar_cv/config/settings.toml
similarity index 67%
rename from common/config/settings.toml
rename to polystar_cv/config/settings.toml
index 9e84f3bb1f4390635761e6a4b5dfef9b874e46b7..c81c609731b41f3cdeaab76d868056e0641be6ef 100644
--- a/common/config/settings.toml
+++ b/polystar_cv/config/settings.toml
@@ -3,6 +3,8 @@ CAMERA_WIDTH = 1920
 CAMERA_HEIGHT = 1080
 CAMERA_HORIZONTAL_FOV = 120
 
+MODEL_NAME = 'robots/TRT_ssd_mobilenet_v2_roco.bin'
+
 [development]
 
 [production]
diff --git a/common/polystar/common/communication/__init__.py b/polystar_cv/polystar/__init__.py
similarity index 100%
rename from common/polystar/common/communication/__init__.py
rename to polystar_cv/polystar/__init__.py
diff --git a/common/polystar/common/filters/__init__.py b/polystar_cv/polystar/common/__init__.py
similarity index 100%
rename from common/polystar/common/filters/__init__.py
rename to polystar_cv/polystar/common/__init__.py
diff --git a/common/polystar/common/frame_generators/__init__.py b/polystar_cv/polystar/common/communication/__init__.py
similarity index 100%
rename from common/polystar/common/frame_generators/__init__.py
rename to polystar_cv/polystar/common/communication/__init__.py
diff --git a/common/polystar/common/communication/file_descriptor_target_sender.py b/polystar_cv/polystar/common/communication/file_descriptor_target_sender.py
similarity index 100%
rename from common/polystar/common/communication/file_descriptor_target_sender.py
rename to polystar_cv/polystar/common/communication/file_descriptor_target_sender.py
diff --git a/common/polystar/common/communication/print_target_sender.py b/polystar_cv/polystar/common/communication/print_target_sender.py
similarity index 100%
rename from common/polystar/common/communication/print_target_sender.py
rename to polystar_cv/polystar/common/communication/print_target_sender.py
diff --git a/common/polystar/common/communication/target_sender_abc.py b/polystar_cv/polystar/common/communication/target_sender_abc.py
similarity index 100%
rename from common/polystar/common/communication/target_sender_abc.py
rename to polystar_cv/polystar/common/communication/target_sender_abc.py
diff --git a/common/polystar/common/communication/usb_target_sender.py b/polystar_cv/polystar/common/communication/usb_target_sender.py
similarity index 100%
rename from common/polystar/common/communication/usb_target_sender.py
rename to polystar_cv/polystar/common/communication/usb_target_sender.py
diff --git a/common/polystar/common/constants.py b/polystar_cv/polystar/common/constants.py
similarity index 100%
rename from common/polystar/common/constants.py
rename to polystar_cv/polystar/common/constants.py
diff --git a/common/polystar/common/dependency_injection.py b/polystar_cv/polystar/common/dependency_injection.py
similarity index 80%
rename from common/polystar/common/dependency_injection.py
rename to polystar_cv/polystar/common/dependency_injection.py
index fd22dcb3a7723e8cb922fd8325b907eec27b58bd..88eca3aadbaef5d55dbce6cb2492254d271eb408 100644
--- a/common/polystar/common/dependency_injection.py
+++ b/polystar_cv/polystar/common/dependency_injection.py
@@ -1,16 +1,16 @@
+from dataclasses import dataclass
 from math import pi
 
-from dataclasses import dataclass
 from dynaconf import LazySettings
-from injector import Module, provider, singleton, multiprovider, Injector
+from injector import Injector, Module, multiprovider, provider, singleton
 
 from polystar.common.constants import LABEL_MAP_PATH
 from polystar.common.models.camera import Camera
 from polystar.common.models.label_map import LabelMap
-from polystar.robots_at_robots.globals import settings
+from polystar.common.settings import settings
 
 
-def make_common_injector() -> Injector:
+def make_injector() -> Injector:
     return Injector(modules=[CommonModule(settings)])
 
 
diff --git a/common/polystar/common/image_pipeline/__init__.py b/polystar_cv/polystar/common/filters/__init__.py
similarity index 100%
rename from common/polystar/common/image_pipeline/__init__.py
rename to polystar_cv/polystar/common/filters/__init__.py
diff --git a/common/polystar/common/filters/exclude_filter.py b/polystar_cv/polystar/common/filters/exclude_filter.py
similarity index 100%
rename from common/polystar/common/filters/exclude_filter.py
rename to polystar_cv/polystar/common/filters/exclude_filter.py
diff --git a/common/polystar/common/filters/filter_abc.py b/polystar_cv/polystar/common/filters/filter_abc.py
similarity index 100%
rename from common/polystar/common/filters/filter_abc.py
rename to polystar_cv/polystar/common/filters/filter_abc.py
diff --git a/common/polystar/common/filters/keep_filter.py b/polystar_cv/polystar/common/filters/keep_filter.py
similarity index 100%
rename from common/polystar/common/filters/keep_filter.py
rename to polystar_cv/polystar/common/filters/keep_filter.py
diff --git a/common/polystar/common/filters/pass_through_filter.py b/polystar_cv/polystar/common/filters/pass_through_filter.py
similarity index 100%
rename from common/polystar/common/filters/pass_through_filter.py
rename to polystar_cv/polystar/common/filters/pass_through_filter.py
diff --git a/common/polystar/common/image_pipeline/featurizers/__init__.py b/polystar_cv/polystar/common/frame_generators/__init__.py
similarity index 100%
rename from common/polystar/common/image_pipeline/featurizers/__init__.py
rename to polystar_cv/polystar/common/frame_generators/__init__.py
diff --git a/common/polystar/common/frame_generators/camera_frame_generator.py b/polystar_cv/polystar/common/frame_generators/camera_frame_generator.py
similarity index 100%
rename from common/polystar/common/frame_generators/camera_frame_generator.py
rename to polystar_cv/polystar/common/frame_generators/camera_frame_generator.py
diff --git a/common/polystar/common/frame_generators/cv2_frame_generator_abc.py b/polystar_cv/polystar/common/frame_generators/cv2_frame_generator_abc.py
similarity index 100%
rename from common/polystar/common/frame_generators/cv2_frame_generator_abc.py
rename to polystar_cv/polystar/common/frame_generators/cv2_frame_generator_abc.py
diff --git a/common/polystar/common/frame_generators/fps_video_frame_generator.py b/polystar_cv/polystar/common/frame_generators/fps_video_frame_generator.py
similarity index 100%
rename from common/polystar/common/frame_generators/fps_video_frame_generator.py
rename to polystar_cv/polystar/common/frame_generators/fps_video_frame_generator.py
diff --git a/common/polystar/common/frame_generators/frames_generator_abc.py b/polystar_cv/polystar/common/frame_generators/frames_generator_abc.py
similarity index 100%
rename from common/polystar/common/frame_generators/frames_generator_abc.py
rename to polystar_cv/polystar/common/frame_generators/frames_generator_abc.py
diff --git a/common/polystar/common/frame_generators/video_frame_generator.py b/polystar_cv/polystar/common/frame_generators/video_frame_generator.py
similarity index 100%
rename from common/polystar/common/frame_generators/video_frame_generator.py
rename to polystar_cv/polystar/common/frame_generators/video_frame_generator.py
diff --git a/common/polystar/common/image_pipeline/preprocessors/__init__.py b/polystar_cv/polystar/common/models/__init__.py
similarity index 100%
rename from common/polystar/common/image_pipeline/preprocessors/__init__.py
rename to polystar_cv/polystar/common/models/__init__.py
diff --git a/common/polystar/common/models/box.py b/polystar_cv/polystar/common/models/box.py
similarity index 100%
rename from common/polystar/common/models/box.py
rename to polystar_cv/polystar/common/models/box.py
diff --git a/common/polystar/common/models/camera.py b/polystar_cv/polystar/common/models/camera.py
similarity index 100%
rename from common/polystar/common/models/camera.py
rename to polystar_cv/polystar/common/models/camera.py
diff --git a/common/polystar/common/models/image.py b/polystar_cv/polystar/common/models/image.py
similarity index 82%
rename from common/polystar/common/models/image.py
rename to polystar_cv/polystar/common/models/image.py
index 29a0b13b3f7af5e2d689098932e252af79f446a4..2653d6e7a02e5ac4660f82ec2dd7e8391689c2e9 100644
--- a/common/polystar/common/models/image.py
+++ b/polystar_cv/polystar/common/models/image.py
@@ -5,6 +5,8 @@ from typing import Iterable, List
 import cv2
 import numpy as np
 
+from polystar.common.constants import PROJECT_DIR
+
 Image = np.ndarray
 
 
@@ -20,6 +22,13 @@ class FileImage:
     def __array__(self) -> np.ndarray:
         return self.image
 
+    def __getstate__(self) -> str:
+        return str(self.path.relative_to(PROJECT_DIR))
+
+    def __setstate__(self, rel_path: str):
+        self.path = PROJECT_DIR / rel_path
+        self.image = load_image(self.path)
+
 
 def load_image(image_path: Path, conversion: int = cv2.COLOR_BGR2RGB) -> Image:
     return cv2.cvtColor(cv2.imread(str(image_path), cv2.IMREAD_UNCHANGED), conversion)
diff --git a/common/polystar/common/models/image_annotation.py b/polystar_cv/polystar/common/models/image_annotation.py
similarity index 100%
rename from common/polystar/common/models/image_annotation.py
rename to polystar_cv/polystar/common/models/image_annotation.py
diff --git a/common/polystar/common/models/label_map.py b/polystar_cv/polystar/common/models/label_map.py
similarity index 100%
rename from common/polystar/common/models/label_map.py
rename to polystar_cv/polystar/common/models/label_map.py
diff --git a/common/polystar/common/models/object.py b/polystar_cv/polystar/common/models/object.py
similarity index 81%
rename from common/polystar/common/models/object.py
rename to polystar_cv/polystar/common/models/object.py
index 06727be6f2c591a8a17eb04b85bb1b2faca50286..68860b6d5fc1ccdef9bfea31a77b131faa60ec18 100644
--- a/common/polystar/common/models/object.py
+++ b/polystar_cv/polystar/common/models/object.py
@@ -11,39 +11,41 @@ ArmorNumber = NewType("ArmorNumber", int)
 
 
 class ArmorColor(NoCaseEnum):
-    Grey = auto()
-    Blue = auto()
-    Red = auto()
+    GREY = auto()
+    BLUE = auto()
+    RED = auto()
 
-    Unknown = auto()
+    UNKNOWN = auto()
 
     def __str__(self):
         return self.name.lower()
 
     @property
     def short(self) -> str:
-        return self.name[0] if self != ArmorColor.Unknown else "?"
+        return self.name[0] if self != ArmorColor.UNKNOWN else "?"
 
 
 class ArmorDigit(NoCaseEnum):  # CHANGING
     # Those have real numbers
-    HERO = 1
-    ENGINEER = 2
-    STANDARD_1 = 3
-    STANDARD_2 = 4
-    STANDARD_3 = 5
+    HERO = auto()
+    # ENGINEER = 2
+    STANDARD_1 = auto()
+    STANDARD_2 = auto()
+    # STANDARD_3 = 5
+
     # Those have symbols
-    OUTPOST = auto()
-    BASE = auto()
-    SENTRY = auto()
+    # OUTPOST = auto()
+    # BASE = auto()
+    # SENTRY = auto()
 
     UNKNOWN = auto()
     OUTDATED = auto()  # Old labelisation
 
     def __str__(self) -> str:
-        if self.value <= 5:
-            return f"{self.value} ({self.name.title()})"
-        return self.name.title()
+        # if self.value <= 5:
+        #     return f"{self.value} ({self.name.title()})"
+        # return self.name.title()
+        return f"{self.value + (self.value >= 2)} ({self.name.title()})"  # hacky, but a number is missing (2)
 
     @property
     def short(self) -> str:
diff --git a/common/polystar/common/models/tf_model.py b/polystar_cv/polystar/common/models/tf_model.py
similarity index 100%
rename from common/polystar/common/models/tf_model.py
rename to polystar_cv/polystar/common/models/tf_model.py
diff --git a/common/polystar/common/models/trt_model.py b/polystar_cv/polystar/common/models/trt_model.py
similarity index 100%
rename from common/polystar/common/models/trt_model.py
rename to polystar_cv/polystar/common/models/trt_model.py
diff --git a/common/polystar/common/models/__init__.py b/polystar_cv/polystar/common/pipeline/__init__.py
similarity index 100%
rename from common/polystar/common/models/__init__.py
rename to polystar_cv/polystar/common/pipeline/__init__.py
diff --git a/common/polystar/common/pipeline/__init__.py b/polystar_cv/polystar/common/pipeline/classification/__init__.py
similarity index 100%
rename from common/polystar/common/pipeline/__init__.py
rename to polystar_cv/polystar/common/pipeline/classification/__init__.py
diff --git a/common/polystar/common/pipeline/classification/classification_pipeline.py b/polystar_cv/polystar/common/pipeline/classification/classification_pipeline.py
similarity index 81%
rename from common/polystar/common/pipeline/classification/classification_pipeline.py
rename to polystar_cv/polystar/common/pipeline/classification/classification_pipeline.py
index 56086c97eaa11a2d85369f04b1e2920fcda610dc..c85c7d666a7149fbcb5e83c5178aa9ba583e4591 100644
--- a/common/polystar/common/pipeline/classification/classification_pipeline.py
+++ b/polystar_cv/polystar/common/pipeline/classification/classification_pipeline.py
@@ -1,6 +1,6 @@
 from abc import ABC
 from enum import IntEnum
-from typing import ClassVar, Generic, List, Sequence, Tuple, TypeVar
+from typing import ClassVar, Generic, List, Sequence, Tuple, Type, TypeVar
 
 from numpy import asarray, ndarray, pad
 
@@ -12,11 +12,19 @@ EnumT = TypeVar("EnumT", bound=IntEnum)
 
 
 class ClassificationPipeline(Pipeline, Generic[IT, EnumT], ABC):
-    enum: ClassVar[EnumT]
+    enum: ClassVar[Type[EnumT]]
+
+    classes: ClassVar[List[EnumT]]
+    n_classes: ClassVar[int]
+
+    def __init_subclass__(cls):
+        if hasattr(cls, "enum"):
+            cls.classes = [klass for klass in cls.enum if klass.name not in {"OUTDATED", "UNKNOWN"}]
+            cls.n_classes = len(cls.classes)
 
     def __init__(self, steps: List[Tuple[str, PipeABC]]):
         super().__init__(steps)
-        self.classifier.n_classes = len(self.enum)
+        self.classifier.n_classes = self.n_classes
 
     @property
     def classifier(self) -> ClassifierABC:
@@ -41,20 +49,13 @@ class ClassificationPipeline(Pipeline, Generic[IT, EnumT], ABC):
     def predict_proba_and_classes(self, x: Sequence[IT]) -> Tuple[ndarray, List[EnumT]]:
         proba = asarray(self.predict_proba(x))
         indices = proba.argmax(axis=1)
-        classes = [self.classes_[i] for i in indices]
+        classes = [self.classes[i] for i in indices]
         return proba, classes
 
     def score(self, x: Sequence[IT], y: List[EnumT], **score_params) -> float:
         """It is needed to have a proper CV"""
         return super().score(x, _labels_to_indices(y), **score_params)
 
-    @property
-    def classes_(self) -> List[EnumT]:
-        return list(self.enum)
-
-    def __init_subclass__(cls, **kwargs):
-        assert hasattr(cls, "enum"), f"You need to provide an `enum` ClassVar for {cls.__name__}"
-
 
 def _labels_to_indices(labels: List[EnumT]) -> ndarray:
     return asarray([label.value - 1 for label in labels])
diff --git a/common/polystar/common/pipeline/classification/classifier_abc.py b/polystar_cv/polystar/common/pipeline/classification/classifier_abc.py
similarity index 100%
rename from common/polystar/common/pipeline/classification/classifier_abc.py
rename to polystar_cv/polystar/common/pipeline/classification/classifier_abc.py
diff --git a/polystar_cv/polystar/common/pipeline/classification/keras_classification_pipeline.py b/polystar_cv/polystar/common/pipeline/classification/keras_classification_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..388e33dcbad7083144dcd902bf98e50c3c644330
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/classification/keras_classification_pipeline.py
@@ -0,0 +1,179 @@
+from os.path import join
+from typing import Callable, Dict, List, Sequence, Tuple, Union
+
+from hypertune import HyperTune
+from tensorflow.python.keras.callbacks import Callback, EarlyStopping, TensorBoard
+from tensorflow.python.keras.losses import CategoricalCrossentropy
+from tensorflow.python.keras.metrics import categorical_accuracy
+from tensorflow.python.keras.models import Model
+from tensorflow.python.keras.optimizer_v2.adam import Adam
+
+from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
+from polystar.common.pipeline.keras.classifier import KerasClassifier
+from polystar.common.pipeline.keras.cnn import make_cnn_model
+from polystar.common.pipeline.keras.compilation_parameters import KerasCompilationParameters
+from polystar.common.pipeline.keras.data_preparator import KerasDataPreparator
+from polystar.common.pipeline.keras.distillation import DistillationLoss, DistillationMetric, Distiller
+from polystar.common.pipeline.keras.trainer import KerasTrainer
+from polystar.common.pipeline.keras.transfer_learning import make_transfer_learning_model
+from polystar.common.pipeline.preprocessors.normalise import Normalise
+from polystar.common.pipeline.preprocessors.resize import Resize
+
+
+class KerasClassificationPipeline(ClassificationPipeline):
+    @classmethod
+    def from_model(cls, model: Model, trainer: KerasTrainer, input_shape: Tuple[int, int], name: str):
+        return cls.from_pipes(
+            [Resize(input_shape), Normalise(), KerasClassifier(model=model, trainer=trainer)], name=name
+        )
+
+    @classmethod
+    def from_transfer_learning(
+        cls,
+        logs_dir: str,
+        input_size: int,
+        model_factory: Callable[..., Model],
+        dropout: float,
+        dense_size: int,
+        lr: float,
+        verbose: int = 0,
+        name: str = None,
+    ):
+        input_shape = (input_size, input_size)
+        name = name or f"{model_factory.__name__} ({input_size}) - lr {lr:.1e} - drop {dropout:.1%} - {dense_size}"
+        return cls.from_model(
+            model=make_transfer_learning_model(
+                input_shape=input_shape,
+                n_classes=cls.n_classes,
+                model_factory=model_factory,
+                dropout=dropout,
+                dense_size=dense_size,
+            ),
+            trainer=make_classification_trainer(
+                lr=lr, logs_dir=logs_dir, name=name, verbose=verbose, batch_size=32, steps_per_epoch=100
+            ),
+            name=name,
+            input_shape=input_shape,
+        )
+
+    @classmethod
+    def from_custom_cnn(
+        cls,
+        logs_dir: str,
+        input_size: int,
+        conv_blocks: Sequence[Sequence[int]],
+        dropout: float,
+        dense_size: int,
+        lr: float,
+        verbose: int = 0,
+        name: str = None,
+        batch_size: int = 32,
+        steps_per_epoch: Union[str, int] = 100,
+    ) -> ClassificationPipeline:
+        name = name or (
+            f"cnn - ({input_size}) - lr {lr:.1e} - drop {dropout:.1%} - "
+            + " ".join("_".join(map(str, sizes)) for sizes in conv_blocks)
+            + f" - {dense_size}"
+        )
+        input_shape = (input_size, input_size)
+        return cls.from_model(
+            make_cnn_model(
+                input_shape=input_shape,
+                conv_blocks=conv_blocks,
+                dense_size=dense_size,
+                output_size=cls.n_classes,
+                dropout=dropout,
+            ),
+            trainer=make_classification_trainer(
+                lr=lr,
+                logs_dir=logs_dir,
+                name=name,
+                verbose=verbose,
+                batch_size=batch_size,
+                steps_per_epoch=steps_per_epoch,
+            ),
+            name=name,
+            input_shape=input_shape,
+        )
+
+    @classmethod
+    def from_distillation(
+        cls,
+        teacher_pipeline: ClassificationPipeline,
+        logs_dir: str,
+        conv_blocks: Sequence[Sequence[int]],
+        dropout: float,
+        dense_size: int,
+        lr: float,
+        temperature: float,
+        verbose: int = 0,
+        name: str = None,
+    ):
+        input_shape: Tuple[int, int] = teacher_pipeline.named_steps["Resize"].size
+        name = name or (
+            f"distiled - temp {temperature:.1e}"
+            f" - cnn - ({input_shape[0]}) - lr {lr:.1e} - drop {dropout:.1%} - "
+            + " ".join("_".join(map(str, sizes)) for sizes in conv_blocks)
+            + f" - {dense_size}"
+        )
+        return cls.from_model(
+            model=make_cnn_model(
+                input_shape, conv_blocks=conv_blocks, dense_size=dense_size, output_size=cls.n_classes, dropout=dropout,
+            ),
+            trainer=KerasTrainer(
+                model_preparator=Distiller(temperature=temperature, teacher_model=teacher_pipeline.classifier.model),
+                compilation_parameters=KerasCompilationParameters(
+                    loss=DistillationLoss(temperature=temperature, n_classes=cls.n_classes),
+                    metrics=[DistillationMetric(categorical_accuracy, n_classes=cls.n_classes)],
+                    optimizer=Adam(lr),
+                ),
+                callbacks=make_classification_callbacks(join(logs_dir, name)),
+                verbose=verbose,
+            ),
+            name=name,
+            input_shape=input_shape,
+        )
+
+
+def make_classification_callbacks(log_dir: str) -> List[Callback]:
+    return [
+        EarlyStopping(verbose=0, patience=7, restore_best_weights=True, monitor="val_categorical_accuracy"),
+        TensorBoard(log_dir=log_dir),
+        # HyperTuneClassificationCallback(),
+    ]
+
+
+def make_classification_trainer(
+    lr: float, logs_dir: str, name: str, verbose: int, batch_size: int, steps_per_epoch: Union[str, int]
+) -> KerasTrainer:
+    return KerasTrainer(
+        compilation_parameters=KerasCompilationParameters(
+            loss=CategoricalCrossentropy(from_logits=False), metrics=[categorical_accuracy], optimizer=Adam(lr)
+        ),
+        data_preparator=KerasDataPreparator(batch_size=batch_size, steps=steps_per_epoch),
+        callbacks=make_classification_callbacks(join(logs_dir, name)),
+        verbose=verbose,
+    )
+
+
+class HyperTuneClassificationCallback(Callback):
+    def __init__(self):
+        super().__init__()
+        self.hpt = HyperTune()
+        self.best_accuracy_epoch = (0, -1)
+
+    def on_epoch_end(self, epoch: int, logs: Dict = None):
+        accuracy = logs["val_categorical_accuracy"]
+        self._report(accuracy, epoch)
+        self.best_accuracy_epoch = max(self.best_accuracy_epoch, (accuracy, epoch))
+
+    def on_train_begin(self, logs=None):
+        self.best_accuracy_epoch = (0, -1)
+
+    def on_train_end(self, logs=None):
+        self._report(*self.best_accuracy_epoch)
+
+    def _report(self, accuracy: float, epoch: int):
+        self.hpt.report_hyperparameter_tuning_metric(
+            hyperparameter_metric_tag="val_accuracy", metric_value=accuracy, global_step=epoch
+        )
diff --git a/common/polystar/common/pipeline/classification/random_model.py b/polystar_cv/polystar/common/pipeline/classification/random_model.py
similarity index 100%
rename from common/polystar/common/pipeline/classification/random_model.py
rename to polystar_cv/polystar/common/pipeline/classification/random_model.py
diff --git a/common/polystar/common/pipeline/classification/rule_based_classifier.py b/polystar_cv/polystar/common/pipeline/classification/rule_based_classifier.py
similarity index 100%
rename from common/polystar/common/pipeline/classification/rule_based_classifier.py
rename to polystar_cv/polystar/common/pipeline/classification/rule_based_classifier.py
diff --git a/common/polystar/common/pipeline/concat.py b/polystar_cv/polystar/common/pipeline/concat.py
similarity index 100%
rename from common/polystar/common/pipeline/concat.py
rename to polystar_cv/polystar/common/pipeline/concat.py
diff --git a/common/polystar/common/pipeline/classification/__init__.py b/polystar_cv/polystar/common/pipeline/featurizers/__init__.py
similarity index 100%
rename from common/polystar/common/pipeline/classification/__init__.py
rename to polystar_cv/polystar/common/pipeline/featurizers/__init__.py
diff --git a/common/polystar/common/image_pipeline/featurizers/histogram_2d.py b/polystar_cv/polystar/common/pipeline/featurizers/histogram_2d.py
similarity index 51%
rename from common/polystar/common/image_pipeline/featurizers/histogram_2d.py
rename to polystar_cv/polystar/common/pipeline/featurizers/histogram_2d.py
index adbf0e4845db408f936e3976779d11fefa2fc3cc..d20d6ca2bea58516fa54c7d848fe134bc23ebc95 100644
--- a/common/polystar/common/image_pipeline/featurizers/histogram_2d.py
+++ b/polystar_cv/polystar/common/pipeline/featurizers/histogram_2d.py
@@ -12,12 +12,15 @@ class Histogram2D(PipeABC):
     bins: int = 8
 
     def transform_single(self, image: Image) -> ndarray:
-        return array([self._channel_hist(image, channel) for channel in range(3)]).ravel()
-
-    def _channel_hist(self, image: Image, channel: int) -> ndarray:
-        hist = cv2.calcHist([image], [channel], None, [self.bins], [0, 256], accumulate=False).ravel()
-        return hist / hist.sum()
+        return array(
+            [calculate_normalized_channel_histogram(image, channel, self.bins) for channel in range(3)]
+        ).ravel()
 
     @property
     def name(self) -> str:
         return "hist"
+
+
+def calculate_normalized_channel_histogram(image: Image, channel: int, bins: int) -> ndarray:
+    hist = cv2.calcHist([image], [channel], None, [bins], [0, 256], accumulate=False).ravel()
+    return hist / hist.sum()
diff --git a/polystar_cv/polystar/common/pipeline/featurizers/histogram_blocs_2d.py b/polystar_cv/polystar/common/pipeline/featurizers/histogram_blocs_2d.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcef9d04ba80d397609e5936c2c6b5ccb37e08e2
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/featurizers/histogram_blocs_2d.py
@@ -0,0 +1,30 @@
+from dataclasses import dataclass
+from itertools import chain
+from typing import Iterable
+
+from numpy import array_split
+from numpy.core._multiarray_umath import array, ndarray
+
+from polystar.common.models.image import Image
+from polystar.common.pipeline.featurizers.histogram_2d import calculate_normalized_channel_histogram
+from polystar.common.pipeline.pipe_abc import PipeABC
+
+
+@dataclass
+class HistogramBlocs2D(PipeABC):
+    bins: int = 8
+    rows: int = 2
+    cols: int = 3
+
+    def transform_single(self, image: Image) -> ndarray:
+        return array(
+            [
+                calculate_normalized_channel_histogram(bloc, channel, self.bins)
+                for channel in range(3)
+                for bloc in _split_images_in_blocs(image, self.rows, self.cols)
+            ]
+        ).ravel()
+
+
+def _split_images_in_blocs(image: Image, n_rows: int, n_cols: int) -> Iterable[Image]:
+    return chain.from_iterable(array_split(column, n_rows, axis=0) for column in array_split(image, n_cols, axis=1))
diff --git a/common/polystar/common/target_pipeline/__init__.py b/polystar_cv/polystar/common/pipeline/keras/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/__init__.py
rename to polystar_cv/polystar/common/pipeline/keras/__init__.py
diff --git a/polystar_cv/polystar/common/pipeline/keras/classifier.py b/polystar_cv/polystar/common/pipeline/keras/classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..8568a878692d293ec2423d74573ddd66600ff7e7
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/classifier.py
@@ -0,0 +1,54 @@
+from copy import copy
+from tempfile import NamedTemporaryFile
+from typing import Dict, List, Optional, Sequence
+
+from numpy import asarray
+from tensorflow.python.keras.models import Model, load_model
+from tensorflow.python.keras.utils.np_utils import to_categorical
+
+from polystar.common.models.image import Image
+from polystar.common.pipeline.classification.classifier_abc import ClassifierABC
+from polystar.common.pipeline.keras.trainer import KerasTrainer
+from polystar.common.utils.registry import registry
+
+
+@registry.register()
+class KerasClassifier(ClassifierABC):
+    def __init__(self, model: Model, trainer: KerasTrainer):
+        self.model = model
+        self.trainer: Optional[KerasTrainer] = trainer
+
+    def fit(self, images: List[Image], labels: List[int], validation_size: int) -> "KerasClassifier":
+        assert self.trainable, "You can't train an un-pickled classifier"
+        images = asarray(images)
+        labels = to_categorical(asarray(labels), self.n_classes)
+        train_images, train_labels = images[:-validation_size], labels[:-validation_size]
+        val_images, val_labels = images[-validation_size:], labels[-validation_size:]
+
+        self.trainer.train(self.model, train_images, train_labels, val_images, val_labels)
+
+        return self
+
+    def predict_proba(self, examples: List[Image]) -> Sequence[float]:
+        return self.model.predict(asarray(examples))
+
+    def __getstate__(self) -> Dict:
+        with NamedTemporaryFile(suffix=".hdf5", delete=True) as fd:
+            self.model.save(fd.name, overwrite=True, include_optimizer=False)
+            model_str = fd.read()
+        state = copy(self.__dict__)
+        state.pop("model")
+        state.pop("trainer")
+        return {**state, "model_str": model_str}
+
+    def __setstate__(self, state: Dict):
+        self.__dict__.update(state)
+        with NamedTemporaryFile(suffix=".hdf5", delete=True) as fd:
+            fd.write(state.pop("model_str"))
+            fd.flush()
+            self.model = load_model(fd.name)
+        self.trainer = None
+
+    @property
+    def trainable(self) -> bool:
+        return self.trainer is not None
diff --git a/polystar_cv/polystar/common/pipeline/keras/cnn.py b/polystar_cv/polystar/common/pipeline/keras/cnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e3cecfaacf29b9229d9e62827015c3cf509f250
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/cnn.py
@@ -0,0 +1,27 @@
+from typing import Sequence, Tuple
+
+from tensorflow.python.keras import Input, Sequential
+from tensorflow.python.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax
+
+
+def make_cnn_model(
+    input_shape: Tuple[int, int],
+    conv_blocks: Sequence[Sequence[int]],
+    dense_size: int,
+    output_size: int,
+    dropout: float,
+) -> Sequential:
+    model = Sequential()
+    model.add(Input((*input_shape, 3)))
+
+    for conv_sizes in conv_blocks:
+        for size in conv_sizes:
+            model.add(Conv2D(size, (3, 3), activation="relu"))
+        model.add(MaxPooling2D())
+
+    model.add(Flatten())
+    model.add(Dense(dense_size))
+    model.add(Dropout(dropout))
+    model.add(Dense(output_size))
+    model.add(Softmax())
+    return model
diff --git a/polystar_cv/polystar/common/pipeline/keras/compilation_parameters.py b/polystar_cv/polystar/common/pipeline/keras/compilation_parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bca53cc4457f9d8d0d216353a4c6bdda44b88d6
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/compilation_parameters.py
@@ -0,0 +1,14 @@
+from dataclasses import dataclass
+from typing import Callable, Dict, List, Optional, Union
+
+from tensorflow.python.keras.losses import Loss
+from tensorflow.python.keras.metrics import Metric
+from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
+
+
+@dataclass
+class KerasCompilationParameters:
+    optimizer: Union[str, OptimizerV2]
+    loss: Union[str, Callable, Loss]
+    metrics: List[Union[str, Callable, Metric]]
+    loss_weights: Optional[Dict[str, float]] = None
diff --git a/polystar_cv/polystar/common/pipeline/keras/data_preparator.py b/polystar_cv/polystar/common/pipeline/keras/data_preparator.py
new file mode 100644
index 0000000000000000000000000000000000000000..1120ac0b580862fe88666e752077b403083633bd
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/data_preparator.py
@@ -0,0 +1,15 @@
+from typing import Any, Tuple, Union
+
+from numpy.core._multiarray_umath import ndarray
+from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
+
+
+class KerasDataPreparator:
+    def __init__(self, batch_size: int, steps: Union[str, int]):
+        self.steps = steps
+        self.batch_size = batch_size
+
+    def prepare_training_data(self, images: ndarray, labels: ndarray) -> Tuple[Any, int]:
+        train_datagen = ImageDataGenerator()
+        steps = self.steps if isinstance(self.steps, int) else len(images) / self.batch_size
+        return train_datagen.flow(images, labels, batch_size=self.batch_size, shuffle=True), steps
diff --git a/polystar_cv/polystar/common/pipeline/keras/distillation.py b/polystar_cv/polystar/common/pipeline/keras/distillation.py
new file mode 100644
index 0000000000000000000000000000000000000000..79b42c630feaedb9e76f591d4748e54efac8922a
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/distillation.py
@@ -0,0 +1,58 @@
+from typing import Callable
+
+from tensorflow.python.keras import Input, Model, Sequential
+from tensorflow.python.keras.layers import Softmax, concatenate
+from tensorflow.python.keras.losses import KLDivergence
+from tensorflow.python.keras.models import Model
+from tensorflow.python.ops.nn_ops import softmax
+
+from polystar.common.pipeline.keras.model_preparator import KerasModelPreparator
+
+
+class DistillationLoss(KLDivergence):
+    def __init__(self, temperature: float, n_classes: int):
+        super().__init__()
+        self.n_classes = n_classes
+        self.temperature = temperature
+
+    def __call__(self, y_true, y_pred, sample_weight=None):
+        teacher_logits, student_logits = y_pred[:, : self.n_classes], y_pred[:, self.n_classes :]
+        return super().__call__(
+            softmax(teacher_logits / self.temperature, axis=1),
+            softmax(student_logits / self.temperature, axis=1),
+            sample_weight=sample_weight,
+        )
+
+
+class DistillationMetric:
+    def __init__(self, metric: Callable, n_classes: int):
+        self.n_classes = n_classes
+        self.metric = metric
+        self.__name__ = metric.__name__
+
+    def __call__(self, y_true, y_pred):
+        teacher_logits, student_logits = y_pred[:, : self.n_classes], y_pred[:, self.n_classes :]
+        return self.metric(y_true, student_logits)
+
+
+class Distiller(KerasModelPreparator):
+    def __init__(
+        self, teacher_model: Model, temperature: float,
+    ):
+        self.teacher_model = teacher_model
+        self.temperature = temperature
+        assert isinstance(teacher_model.layers[-1], Softmax)
+
+    def prepare_model(self, model: Model) -> Model:
+        assert isinstance(model.layers[-1], Softmax)
+
+        self.teacher_model.trainable = False
+
+        inputs = Input(shape=model.input.shape[1:])
+
+        return Model(
+            inputs=inputs,
+            outputs=concatenate(
+                [Sequential(self.teacher_model.layers[:-1])(inputs), Sequential(model.layers[:-1])(inputs)]
+            ),
+        )
diff --git a/polystar_cv/polystar/common/pipeline/keras/model_preparator.py b/polystar_cv/polystar/common/pipeline/keras/model_preparator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e41e03fab23f261e8b1e119bad019c95d21205fd
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/model_preparator.py
@@ -0,0 +1,6 @@
+from tensorflow.python.keras.models import Model
+
+
+class KerasModelPreparator:
+    def prepare_model(self, model: Model) -> Model:
+        return model
diff --git a/polystar_cv/polystar/common/pipeline/keras/trainer.py b/polystar_cv/polystar/common/pipeline/keras/trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9db4ba7716ccb2efb09e03440cb0400ff4de40e3
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/trainer.py
@@ -0,0 +1,41 @@
+from dataclasses import dataclass, field
+from typing import List
+
+from numpy.core._multiarray_umath import ndarray
+from tensorflow.python.keras.callbacks import Callback
+from tensorflow.python.keras.models import Model
+
+from polystar.common.pipeline.keras.compilation_parameters import KerasCompilationParameters
+from polystar.common.pipeline.keras.data_preparator import KerasDataPreparator
+from polystar.common.pipeline.keras.model_preparator import KerasModelPreparator
+
+
+@dataclass
+class KerasTrainer:
+    compilation_parameters: KerasCompilationParameters
+    callbacks: List[Callback]
+    data_preparator: KerasDataPreparator = field(default_factory=KerasDataPreparator)
+    model_preparator: KerasModelPreparator = field(default_factory=KerasModelPreparator)
+    max_epochs: int = 300
+    verbose: int = 0
+
+    def train(
+        self,
+        model: Model,
+        train_images: ndarray,
+        train_labels: ndarray,
+        validation_images: ndarray,
+        validation_labels: ndarray,
+    ):
+        model = self.model_preparator.prepare_model(model)
+        model.compile(**self.compilation_parameters.__dict__)
+        train_data, steps = self.data_preparator.prepare_training_data(train_images, train_labels)
+
+        model.fit(
+            x=train_data,
+            validation_data=(validation_images, validation_labels),
+            steps_per_epoch=steps,
+            epochs=self.max_epochs,
+            callbacks=self.callbacks,
+            verbose=self.verbose,
+        )
diff --git a/polystar_cv/polystar/common/pipeline/keras/transfer_learning.py b/polystar_cv/polystar/common/pipeline/keras/transfer_learning.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6a7b4747e2186f9f61672538359bd51e2fdd291
--- /dev/null
+++ b/polystar_cv/polystar/common/pipeline/keras/transfer_learning.py
@@ -0,0 +1,24 @@
+from typing import Callable, Tuple
+
+from tensorflow.python.keras import Input, Model, Sequential
+from tensorflow.python.keras.layers import Dense, Dropout, Flatten, Softmax
+from tensorflow.python.keras.models import Model
+
+
+def make_transfer_learning_model(
+    input_shape: Tuple[int, int], n_classes: int, model_factory: Callable[..., Model], dropout: float, dense_size: int,
+) -> Sequential:
+    input_shape = (*input_shape, 3)
+    base_model: Model = model_factory(weights="imagenet", input_shape=input_shape, include_top=False)
+
+    return Sequential(
+        [
+            Input(input_shape),
+            base_model,
+            Flatten(),
+            Dense(dense_size, activation="relu"),
+            Dropout(dropout),
+            Dense(n_classes),
+            Softmax(),
+        ]
+    )
diff --git a/common/polystar/common/pipeline/pipe_abc.py b/polystar_cv/polystar/common/pipeline/pipe_abc.py
similarity index 100%
rename from common/polystar/common/pipeline/pipe_abc.py
rename to polystar_cv/polystar/common/pipeline/pipe_abc.py
diff --git a/common/polystar/common/pipeline/pipeline.py b/polystar_cv/polystar/common/pipeline/pipeline.py
similarity index 81%
rename from common/polystar/common/pipeline/pipeline.py
rename to polystar_cv/polystar/common/pipeline/pipeline.py
index 52ed2e2380d8933b3b6ca8ab861d2e37742924e2..e09d1305f9aa1ec5e0054ac788b174949c8d3c57 100644
--- a/common/polystar/common/pipeline/pipeline.py
+++ b/polystar_cv/polystar/common/pipeline/pipeline.py
@@ -6,10 +6,12 @@ from polystar.common.pipeline.classification.classifier_abc import ClassifierABC
 from polystar.common.pipeline.pipe_abc import PipeABC, get_pipes_names_without_repetitions
 from polystar.common.utils.named_mixin import NamedMixin
 
+Pipes = List[Union[PipeABC, ClassifierABC]]
+
 
 class Pipeline(Pipeline, NamedMixin):
     @classmethod
-    def from_pipes(cls, pipes: List[Union[PipeABC, ClassifierABC]], name: str = None) -> "Pipeline":
+    def from_pipes(cls, pipes: Pipes, name: str = None) -> "Pipeline":
         names = get_pipes_names_without_repetitions(pipes)
         rv = cls(list(zip(names, pipes)))
         rv.name = name or "-".join(names)
diff --git a/common/polystar/common/target_pipeline/armors_descriptors/__init__.py b/polystar_cv/polystar/common/pipeline/preprocessors/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/armors_descriptors/__init__.py
rename to polystar_cv/polystar/common/pipeline/preprocessors/__init__.py
diff --git a/common/polystar/common/image_pipeline/preprocessors/normalise.py b/polystar_cv/polystar/common/pipeline/preprocessors/normalise.py
similarity index 74%
rename from common/polystar/common/image_pipeline/preprocessors/normalise.py
rename to polystar_cv/polystar/common/pipeline/preprocessors/normalise.py
index a00c8d0d31d6445b476e88ad5fea20eb7bf09e3d..0f840293630b85c8e8ab9261e2d05e827339ed1b 100644
--- a/common/polystar/common/image_pipeline/preprocessors/normalise.py
+++ b/polystar_cv/polystar/common/pipeline/preprocessors/normalise.py
@@ -1,7 +1,9 @@
 from polystar.common.models.image import Image
 from polystar.common.pipeline.pipe_abc import PipeABC
+from polystar.common.utils.registry import registry
 
 
+@registry.register()
 class Normalise(PipeABC):
     def transform_single(self, image: Image) -> Image:
         return image / 255
diff --git a/common/polystar/common/image_pipeline/preprocessors/resize.py b/polystar_cv/polystar/common/pipeline/preprocessors/resize.py
similarity index 82%
rename from common/polystar/common/image_pipeline/preprocessors/resize.py
rename to polystar_cv/polystar/common/pipeline/preprocessors/resize.py
index 6afbc2b112b787659de43c8575d61e806d969ae7..c239e0a2be3a6bfe66d011b1d97cf862f9cdc707 100644
--- a/common/polystar/common/image_pipeline/preprocessors/resize.py
+++ b/polystar_cv/polystar/common/pipeline/preprocessors/resize.py
@@ -4,8 +4,10 @@ from cv2.cv2 import resize
 
 from polystar.common.models.image import Image
 from polystar.common.pipeline.pipe_abc import PipeABC
+from polystar.common.utils.registry import registry
 
 
+@registry.register()
 class Resize(PipeABC):
     def __init__(self, size: Tuple[int, int]):
         self.size = size
diff --git a/common/polystar/common/image_pipeline/preprocessors/rgb_to_hsv.py b/polystar_cv/polystar/common/pipeline/preprocessors/rgb_to_hsv.py
similarity index 100%
rename from common/polystar/common/image_pipeline/preprocessors/rgb_to_hsv.py
rename to polystar_cv/polystar/common/pipeline/preprocessors/rgb_to_hsv.py
diff --git a/common/polystar/common/settings.py b/polystar_cv/polystar/common/settings.py
similarity index 69%
rename from common/polystar/common/settings.py
rename to polystar_cv/polystar/common/settings.py
index 943fa5fb1d23ba5a280356f49e659b37138272c7..23b13f2a08ec612b40073c131cdc3e172147f516 100644
--- a/common/polystar/common/settings.py
+++ b/polystar_cv/polystar/common/settings.py
@@ -17,12 +17,15 @@ class Settings(LazySettings):
 
 
 def _config_file_for_project(project_name: str) -> Path:
-    return PROJECT_DIR / project_name / "config" / "settings.toml"
+    return PROJECT_DIR / "config" / "settings.toml"
 
 
-def make_settings(project_name: str) -> LazySettings:
+def make_settings() -> LazySettings:
     return LazySettings(
         SILENT_ERRORS_FOR_DYNACONF=False,
-        SETTINGS_FILE_FOR_DYNACONF=f"{_config_file_for_project('common')},{_config_file_for_project(project_name)}",
+        SETTINGS_FILE_FOR_DYNACONF=f"{PROJECT_DIR  / 'config' / 'settings.toml'}",
         ENV_SWITCHER_FOR_DYNACONF="POLYSTAR_ENV",
     )
+
+
+settings = make_settings()
diff --git a/common/polystar/common/target_pipeline/detected_objects/__init__.py b/polystar_cv/polystar/common/target_pipeline/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/detected_objects/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/__init__.py
diff --git a/common/polystar/common/target_pipeline/object_selectors/__init__.py b/polystar_cv/polystar/common/target_pipeline/armors_descriptors/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/object_selectors/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/armors_descriptors/__init__.py
diff --git a/common/polystar/common/target_pipeline/armors_descriptors/armors_color_descriptor.py b/polystar_cv/polystar/common/target_pipeline/armors_descriptors/armors_color_descriptor.py
similarity index 100%
rename from common/polystar/common/target_pipeline/armors_descriptors/armors_color_descriptor.py
rename to polystar_cv/polystar/common/target_pipeline/armors_descriptors/armors_color_descriptor.py
diff --git a/common/polystar/common/target_pipeline/armors_descriptors/armors_descriptor_abc.py b/polystar_cv/polystar/common/target_pipeline/armors_descriptors/armors_descriptor_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/armors_descriptors/armors_descriptor_abc.py
rename to polystar_cv/polystar/common/target_pipeline/armors_descriptors/armors_descriptor_abc.py
diff --git a/common/polystar/common/target_pipeline/debug_pipeline.py b/polystar_cv/polystar/common/target_pipeline/debug_pipeline.py
similarity index 100%
rename from common/polystar/common/target_pipeline/debug_pipeline.py
rename to polystar_cv/polystar/common/target_pipeline/debug_pipeline.py
diff --git a/common/polystar/common/target_pipeline/objects_detectors/__init__.py b/polystar_cv/polystar/common/target_pipeline/detected_objects/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_detectors/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/detected_objects/__init__.py
diff --git a/common/polystar/common/target_pipeline/detected_objects/detected_armor.py b/polystar_cv/polystar/common/target_pipeline/detected_objects/detected_armor.py
similarity index 97%
rename from common/polystar/common/target_pipeline/detected_objects/detected_armor.py
rename to polystar_cv/polystar/common/target_pipeline/detected_objects/detected_armor.py
index 613385b307eff37a5ae5f2484bda366e2f3e96e5..3bfaf2b654f30647057b271891f414db70970413 100644
--- a/common/polystar/common/target_pipeline/detected_objects/detected_armor.py
+++ b/polystar_cv/polystar/common/target_pipeline/detected_objects/detected_armor.py
@@ -27,7 +27,7 @@ class DetectedArmor(DetectedObject):
             self._color = ArmorColor(self.colors_proba.argmax() + 1)
             return self._color
 
-        return ArmorColor.Unknown
+        return ArmorColor.UNKNOWN
 
     @property
     def digit(self) -> ArmorDigit:
diff --git a/common/polystar/common/target_pipeline/detected_objects/detected_object.py b/polystar_cv/polystar/common/target_pipeline/detected_objects/detected_object.py
similarity index 100%
rename from common/polystar/common/target_pipeline/detected_objects/detected_object.py
rename to polystar_cv/polystar/common/target_pipeline/detected_objects/detected_object.py
diff --git a/common/polystar/common/target_pipeline/detected_objects/detected_objects_factory.py b/polystar_cv/polystar/common/target_pipeline/detected_objects/detected_objects_factory.py
similarity index 100%
rename from common/polystar/common/target_pipeline/detected_objects/detected_objects_factory.py
rename to polystar_cv/polystar/common/target_pipeline/detected_objects/detected_objects_factory.py
diff --git a/common/polystar/common/target_pipeline/detected_objects/detected_robot.py b/polystar_cv/polystar/common/target_pipeline/detected_objects/detected_robot.py
similarity index 100%
rename from common/polystar/common/target_pipeline/detected_objects/detected_robot.py
rename to polystar_cv/polystar/common/target_pipeline/detected_objects/detected_robot.py
diff --git a/common/polystar/common/target_pipeline/objects_linker/__init__.py b/polystar_cv/polystar/common/target_pipeline/object_selectors/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_linker/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/object_selectors/__init__.py
diff --git a/common/polystar/common/target_pipeline/object_selectors/closest_object_selector.py b/polystar_cv/polystar/common/target_pipeline/object_selectors/closest_object_selector.py
similarity index 100%
rename from common/polystar/common/target_pipeline/object_selectors/closest_object_selector.py
rename to polystar_cv/polystar/common/target_pipeline/object_selectors/closest_object_selector.py
diff --git a/common/polystar/common/target_pipeline/object_selectors/object_selector_abc.py b/polystar_cv/polystar/common/target_pipeline/object_selectors/object_selector_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/object_selectors/object_selector_abc.py
rename to polystar_cv/polystar/common/target_pipeline/object_selectors/object_selector_abc.py
diff --git a/common/polystar/common/target_pipeline/object_selectors/scored_object_selector_abc.py b/polystar_cv/polystar/common/target_pipeline/object_selectors/scored_object_selector_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/object_selectors/scored_object_selector_abc.py
rename to polystar_cv/polystar/common/target_pipeline/object_selectors/scored_object_selector_abc.py
diff --git a/common/polystar/common/target_pipeline/objects_trackers/__init__.py b/polystar_cv/polystar/common/target_pipeline/objects_detectors/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_trackers/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/objects_detectors/__init__.py
diff --git a/common/polystar/common/target_pipeline/objects_detectors/objects_detector_abc.py b/polystar_cv/polystar/common/target_pipeline/objects_detectors/objects_detector_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_detectors/objects_detector_abc.py
rename to polystar_cv/polystar/common/target_pipeline/objects_detectors/objects_detector_abc.py
diff --git a/common/polystar/common/target_pipeline/objects_detectors/tf_model_objects_detector.py b/polystar_cv/polystar/common/target_pipeline/objects_detectors/tf_model_objects_detector.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_detectors/tf_model_objects_detector.py
rename to polystar_cv/polystar/common/target_pipeline/objects_detectors/tf_model_objects_detector.py
diff --git a/common/polystar/common/target_pipeline/objects_detectors/trt_model_object_detector.py b/polystar_cv/polystar/common/target_pipeline/objects_detectors/trt_model_object_detector.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_detectors/trt_model_object_detector.py
rename to polystar_cv/polystar/common/target_pipeline/objects_detectors/trt_model_object_detector.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/__init__.py b/polystar_cv/polystar/common/target_pipeline/objects_linker/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/objects_linker/__init__.py
diff --git a/common/polystar/common/target_pipeline/objects_linker/objects_linker_abs.py b/polystar_cv/polystar/common/target_pipeline/objects_linker/objects_linker_abs.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_linker/objects_linker_abs.py
rename to polystar_cv/polystar/common/target_pipeline/objects_linker/objects_linker_abs.py
diff --git a/common/polystar/common/target_pipeline/objects_linker/simple_objects_linker.py b/polystar_cv/polystar/common/target_pipeline/objects_linker/simple_objects_linker.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_linker/simple_objects_linker.py
rename to polystar_cv/polystar/common/target_pipeline/objects_linker/simple_objects_linker.py
diff --git a/common/polystar/common/target_pipeline/target_factories/__init__.py b/polystar_cv/polystar/common/target_pipeline/objects_trackers/__init__.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_factories/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/objects_trackers/__init__.py
diff --git a/common/polystar/common/target_pipeline/objects_trackers/object_track.py b/polystar_cv/polystar/common/target_pipeline/objects_trackers/object_track.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_trackers/object_track.py
rename to polystar_cv/polystar/common/target_pipeline/objects_trackers/object_track.py
diff --git a/common/polystar/common/target_pipeline/objects_trackers/objects_tracker_abc.py b/polystar_cv/polystar/common/target_pipeline/objects_trackers/objects_tracker_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_trackers/objects_tracker_abc.py
rename to polystar_cv/polystar/common/target_pipeline/objects_trackers/objects_tracker_abc.py
diff --git a/common/polystar/common/utils/__init__.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/__init__.py
similarity index 100%
rename from common/polystar/common/utils/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/__init__.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/confidence_object_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/confidence_object_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/confidence_object_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/confidence_object_validator.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/contains_box_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/contains_box_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/contains_box_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/contains_box_validator.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/in_box_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/in_box_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/in_box_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/in_box_validator.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/negation_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/negation_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/negation_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/negation_validator.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/objects_validator_abc.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/objects_validator_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/objects_validator_abc.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/objects_validator_abc.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/robot_color_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/robot_color_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/robot_color_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/robot_color_validator.py
diff --git a/common/polystar/common/target_pipeline/objects_validators/type_object_validator.py b/polystar_cv/polystar/common/target_pipeline/objects_validators/type_object_validator.py
similarity index 100%
rename from common/polystar/common/target_pipeline/objects_validators/type_object_validator.py
rename to polystar_cv/polystar/common/target_pipeline/objects_validators/type_object_validator.py
diff --git a/common/polystar/common/target_pipeline/target_abc.py b/polystar_cv/polystar/common/target_pipeline/target_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_abc.py
rename to polystar_cv/polystar/common/target_pipeline/target_abc.py
diff --git a/common/polystar/common/view/__init__.py b/polystar_cv/polystar/common/target_pipeline/target_factories/__init__.py
similarity index 100%
rename from common/polystar/common/view/__init__.py
rename to polystar_cv/polystar/common/target_pipeline/target_factories/__init__.py
diff --git a/common/polystar/common/target_pipeline/target_factories/ratio_simple_target_factory.py b/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_simple_target_factory.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_factories/ratio_simple_target_factory.py
rename to polystar_cv/polystar/common/target_pipeline/target_factories/ratio_simple_target_factory.py
diff --git a/common/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py b/polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py
rename to polystar_cv/polystar/common/target_pipeline/target_factories/ratio_target_factory_abc.py
diff --git a/common/polystar/common/target_pipeline/target_factories/target_factory_abc.py b/polystar_cv/polystar/common/target_pipeline/target_factories/target_factory_abc.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_factories/target_factory_abc.py
rename to polystar_cv/polystar/common/target_pipeline/target_factories/target_factory_abc.py
diff --git a/common/polystar/common/target_pipeline/target_pipeline.py b/polystar_cv/polystar/common/target_pipeline/target_pipeline.py
similarity index 100%
rename from common/polystar/common/target_pipeline/target_pipeline.py
rename to polystar_cv/polystar/common/target_pipeline/target_pipeline.py
diff --git a/common/polystar/common/target_pipeline/tracking_target_pipeline.py b/polystar_cv/polystar/common/target_pipeline/tracking_target_pipeline.py
similarity index 100%
rename from common/polystar/common/target_pipeline/tracking_target_pipeline.py
rename to polystar_cv/polystar/common/target_pipeline/tracking_target_pipeline.py
diff --git a/common/research/common/__init__.py b/polystar_cv/polystar/common/utils/__init__.py
similarity index 100%
rename from common/research/common/__init__.py
rename to polystar_cv/polystar/common/utils/__init__.py
diff --git a/common/polystar/common/utils/dataframe.py b/polystar_cv/polystar/common/utils/dataframe.py
similarity index 100%
rename from common/polystar/common/utils/dataframe.py
rename to polystar_cv/polystar/common/utils/dataframe.py
diff --git a/common/polystar/common/utils/git.py b/polystar_cv/polystar/common/utils/git.py
similarity index 100%
rename from common/polystar/common/utils/git.py
rename to polystar_cv/polystar/common/utils/git.py
diff --git a/common/polystar/common/utils/iterable_utils.py b/polystar_cv/polystar/common/utils/iterable_utils.py
similarity index 100%
rename from common/polystar/common/utils/iterable_utils.py
rename to polystar_cv/polystar/common/utils/iterable_utils.py
diff --git a/common/research/common/dataset/__init__.py b/polystar_cv/polystar/common/utils/logs.py
similarity index 100%
rename from common/research/common/dataset/__init__.py
rename to polystar_cv/polystar/common/utils/logs.py
diff --git a/common/polystar/common/utils/markdown.py b/polystar_cv/polystar/common/utils/markdown.py
similarity index 94%
rename from common/polystar/common/utils/markdown.py
rename to polystar_cv/polystar/common/utils/markdown.py
index 791bef04474c1a2b08962ed043cffd8c19fdbaf4..77530fe4c289f7b72572546efbeee730a2e3c6c6 100644
--- a/common/polystar/common/utils/markdown.py
+++ b/polystar_cv/polystar/common/utils/markdown.py
@@ -3,6 +3,7 @@ from typing import Any, Iterable, TextIO
 
 from markdown.core import markdown
 from matplotlib.figure import Figure
+from matplotlib.pyplot import close
 from pandas import DataFrame
 from tabulate import tabulate
 from xhtml2pdf.document import pisaDocument
@@ -40,9 +41,10 @@ class MarkdownFile:
         self.paragraph(f"![{alt}]({str(relative_path).replace(' ', '%20')})")
         return self
 
-    def figure(self, figure: Figure, name: str, alt: str = "img"):
-        name = name.replace(" ", "_")
+    def figure(self, figure: Figure, name: str, alt: str = "img", close_after: bool = True):
+        name = name.replace(" ", "_").replace("%", "p")
         figure.savefig(self.markdown_path.parent / name)
+        close(figure)
         return self.image(name, alt)
 
     def table(self, data: DataFrame) -> "MarkdownFile":
diff --git a/common/polystar/common/utils/misc.py b/polystar_cv/polystar/common/utils/misc.py
similarity index 100%
rename from common/polystar/common/utils/misc.py
rename to polystar_cv/polystar/common/utils/misc.py
diff --git a/common/polystar/common/utils/named_mixin.py b/polystar_cv/polystar/common/utils/named_mixin.py
similarity index 100%
rename from common/polystar/common/utils/named_mixin.py
rename to polystar_cv/polystar/common/utils/named_mixin.py
diff --git a/common/polystar/common/utils/no_case_enum.py b/polystar_cv/polystar/common/utils/no_case_enum.py
similarity index 72%
rename from common/polystar/common/utils/no_case_enum.py
rename to polystar_cv/polystar/common/utils/no_case_enum.py
index b89a03f504613253cf10cf0c3f95facf8084d2a2..b55371d6c680a75fb95de701ce779917bc336c80 100644
--- a/common/polystar/common/utils/no_case_enum.py
+++ b/polystar_cv/polystar/common/utils/no_case_enum.py
@@ -4,4 +4,4 @@ from enum import IntEnum
 class NoCaseEnum(IntEnum):
     @classmethod
     def _missing_(cls, key):
-        return cls[key.capitalize()]
+        return cls[key.upper()]
diff --git a/polystar_cv/polystar/common/utils/registry.py b/polystar_cv/polystar/common/utils/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3eb717765cce5030ff5c394be76ff06d23f7c69
--- /dev/null
+++ b/polystar_cv/polystar/common/utils/registry.py
@@ -0,0 +1,18 @@
+from itertools import chain
+from typing import Dict, Sequence, Type
+
+from polystar.common.utils.singleton import Singleton
+
+
+class Registry(Dict[str, Type], Singleton):
+    def register(self, previous_names: Sequence[str] = ()):
+        def decorator(class_: Type):
+            for name in chain((class_.__name__,), previous_names):
+                assert name not in self, f"{name} is already registered"
+                self[name] = class_
+            return class_
+
+        return decorator
+
+
+registry = Registry()
diff --git a/polystar_cv/polystar/common/utils/serialization.py b/polystar_cv/polystar/common/utils/serialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7aff565aa7b14072beab802b41eae37ca224726
--- /dev/null
+++ b/polystar_cv/polystar/common/utils/serialization.py
@@ -0,0 +1,25 @@
+import logging
+import pickle
+from pathlib import Path
+from typing import Any, Type
+
+from polystar.common.utils.registry import registry
+
+
+class UnpicklerWithRegistry(pickle.Unpickler):
+    def find_class(self, module: str, name: str) -> Type:
+        try:
+            return registry[name]
+        except KeyError:
+            return super().find_class(module, name)
+
+
+def pkl_load(file_path: Path):
+    with file_path.with_suffix(".pkl").open("rb") as f:
+        return UnpicklerWithRegistry(f).load()
+
+
+def pkl_dump(obj: Any, file_path: Path):
+    file_path_with_suffix = file_path.with_suffix(".pkl")
+    file_path_with_suffix.write_bytes(pickle.dumps(obj))
+    logging.info(f"{obj} saved at {file_path_with_suffix}")
diff --git a/polystar_cv/polystar/common/utils/singleton.py b/polystar_cv/polystar/common/utils/singleton.py
new file mode 100644
index 0000000000000000000000000000000000000000..488b92cac10a200128666e59e6f5f3d8a0121214
--- /dev/null
+++ b/polystar_cv/polystar/common/utils/singleton.py
@@ -0,0 +1,16 @@
+class SingletonMetaclass(type):
+    _instances = {}
+
+    def __call__(cls, *args, **kwargs):
+        if cls not in cls._instances:
+            cls._instances[cls] = super(SingletonMetaclass, cls).__call__(*args, **kwargs)
+        return cls._instances[cls]
+
+
+class Singleton:
+    _instance = None
+
+    def __new__(cls, *args, **kwargs):
+        if not isinstance(cls._instance, cls):
+            cls._instance = object.__new__(cls, *args, **kwargs)
+        return cls._instance
diff --git a/common/polystar/common/utils/str_utils.py b/polystar_cv/polystar/common/utils/str_utils.py
similarity index 100%
rename from common/polystar/common/utils/str_utils.py
rename to polystar_cv/polystar/common/utils/str_utils.py
diff --git a/common/polystar/common/utils/tensorflow.py b/polystar_cv/polystar/common/utils/tensorflow.py
similarity index 100%
rename from common/polystar/common/utils/tensorflow.py
rename to polystar_cv/polystar/common/utils/tensorflow.py
diff --git a/common/polystar/common/utils/time.py b/polystar_cv/polystar/common/utils/time.py
similarity index 100%
rename from common/polystar/common/utils/time.py
rename to polystar_cv/polystar/common/utils/time.py
diff --git a/common/polystar/common/utils/tqdm.py b/polystar_cv/polystar/common/utils/tqdm.py
similarity index 100%
rename from common/polystar/common/utils/tqdm.py
rename to polystar_cv/polystar/common/utils/tqdm.py
diff --git a/common/polystar/common/utils/working_directory.py b/polystar_cv/polystar/common/utils/working_directory.py
similarity index 100%
rename from common/polystar/common/utils/working_directory.py
rename to polystar_cv/polystar/common/utils/working_directory.py
diff --git a/common/research/common/dataset/cleaning/__init__.py b/polystar_cv/polystar/common/view/__init__.py
similarity index 100%
rename from common/research/common/dataset/cleaning/__init__.py
rename to polystar_cv/polystar/common/view/__init__.py
diff --git a/common/polystar/common/view/cv2_results_viewer.py b/polystar_cv/polystar/common/view/cv2_results_viewer.py
similarity index 100%
rename from common/polystar/common/view/cv2_results_viewer.py
rename to polystar_cv/polystar/common/view/cv2_results_viewer.py
diff --git a/common/polystar/common/view/plt_results_viewer.py b/polystar_cv/polystar/common/view/plt_results_viewer.py
similarity index 100%
rename from common/polystar/common/view/plt_results_viewer.py
rename to polystar_cv/polystar/common/view/plt_results_viewer.py
diff --git a/common/polystar/common/view/results_viewer_abc.py b/polystar_cv/polystar/common/view/results_viewer_abc.py
similarity index 100%
rename from common/polystar/common/view/results_viewer_abc.py
rename to polystar_cv/polystar/common/view/results_viewer_abc.py
diff --git a/common/research/common/dataset/improvement/__init__.py b/polystar_cv/research/__init__.py
similarity index 100%
rename from common/research/common/dataset/improvement/__init__.py
rename to polystar_cv/research/__init__.py
diff --git a/common/research/common/dataset/perturbations/__init__.py b/polystar_cv/research/common/__init__.py
similarity index 100%
rename from common/research/common/dataset/perturbations/__init__.py
rename to polystar_cv/research/common/__init__.py
diff --git a/common/research/common/constants.py b/polystar_cv/research/common/constants.py
similarity index 100%
rename from common/research/common/constants.py
rename to polystar_cv/research/common/constants.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/__init__.py b/polystar_cv/research/common/dataset/__init__.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/__init__.py
rename to polystar_cv/research/common/dataset/__init__.py
diff --git a/common/research/common/dataset/twitch/__init__.py b/polystar_cv/research/common/dataset/cleaning/__init__.py
similarity index 100%
rename from common/research/common/dataset/twitch/__init__.py
rename to polystar_cv/research/common/dataset/cleaning/__init__.py
diff --git a/common/research/common/dataset/cleaning/dataset_changes.py b/polystar_cv/research/common/dataset/cleaning/dataset_changes.py
similarity index 77%
rename from common/research/common/dataset/cleaning/dataset_changes.py
rename to polystar_cv/research/common/dataset/cleaning/dataset_changes.py
index 3375476bd78b5556d16e0664b5c298bb65877a1d..bdeb4741876923230fc2f1f823bf1c6ddbec0062 100644
--- a/common/research/common/dataset/cleaning/dataset_changes.py
+++ b/polystar_cv/research/common/dataset/cleaning/dataset_changes.py
@@ -1,4 +1,5 @@
 import json
+from contextlib import suppress
 from pathlib import Path
 from typing import Dict, List, Set
 
@@ -6,6 +7,7 @@ from more_itertools import flatten
 
 from polystar.common.utils.git import get_git_username
 from polystar.common.utils.time import create_time_id
+from research.common.gcloud.gcloud_storage import GCStorages
 
 INVALIDATED_KEY: str = "invalidated"
 
@@ -13,6 +15,8 @@ INVALIDATED_KEY: str = "invalidated"
 class DatasetChanges:
     def __init__(self, dataset_directory: Path):
         self.changes_file: Path = dataset_directory / ".changes"
+        with suppress(FileNotFoundError):
+            GCStorages.DEV.download_file_if_missing(self.changes_file)
 
     @property
     def invalidated(self) -> Set[str]:
@@ -30,3 +34,7 @@ class DatasetChanges:
         changes[INVALIDATED_KEY][entry_id] = names
         self.changes_file.write_text(json.dumps(changes, indent=2))
         print(f"changes saved, see entry {entry_id} in file://{self.changes_file}")
+        self.upload()
+
+    def upload(self):
+        GCStorages.DEV.upload_file(self.changes_file)
diff --git a/common/research/common/dataset/cleaning/dataset_cleaner_app.py b/polystar_cv/research/common/dataset/cleaning/dataset_cleaner_app.py
similarity index 100%
rename from common/research/common/dataset/cleaning/dataset_cleaner_app.py
rename to polystar_cv/research/common/dataset/cleaning/dataset_cleaner_app.py
diff --git a/common/research/common/datasets/__init__.py b/polystar_cv/research/common/dataset/improvement/__init__.py
similarity index 100%
rename from common/research/common/datasets/__init__.py
rename to polystar_cv/research/common/dataset/improvement/__init__.py
diff --git a/common/research/common/dataset/improvement/zoom.py b/polystar_cv/research/common/dataset/improvement/zoom.py
similarity index 100%
rename from common/research/common/dataset/improvement/zoom.py
rename to polystar_cv/research/common/dataset/improvement/zoom.py
diff --git a/common/research/common/datasets/roco/__init__.py b/polystar_cv/research/common/dataset/perturbations/__init__.py
similarity index 100%
rename from common/research/common/datasets/roco/__init__.py
rename to polystar_cv/research/common/dataset/perturbations/__init__.py
diff --git a/common/research/common/dataset/perturbations/examples/.gitignore b/polystar_cv/research/common/dataset/perturbations/examples/.gitignore
similarity index 100%
rename from common/research/common/dataset/perturbations/examples/.gitignore
rename to polystar_cv/research/common/dataset/perturbations/examples/.gitignore
diff --git a/common/research/common/dataset/perturbations/examples/test.png b/polystar_cv/research/common/dataset/perturbations/examples/test.png
similarity index 100%
rename from common/research/common/dataset/perturbations/examples/test.png
rename to polystar_cv/research/common/dataset/perturbations/examples/test.png
diff --git a/common/research/common/datasets/roco/zoo/__init__.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/__init__.py
similarity index 100%
rename from common/research/common/datasets/roco/zoo/__init__.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/__init__.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/brightness.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/brightness.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/brightness.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/brightness.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/contrast.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/contrast.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/contrast.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/contrast.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/gaussian_blur.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/gaussian_blur.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/gaussian_blur.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/gaussian_blur.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/gaussian_noise.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/gaussian_noise.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/gaussian_noise.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/gaussian_noise.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/horizontal_blur.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/horizontal_blur.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/horizontal_blur.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/horizontal_blur.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/image_modifier_abc.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/image_modifier_abc.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/image_modifier_abc.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/image_modifier_abc.py
diff --git a/common/research/common/dataset/perturbations/image_modifiers/saturation.py b/polystar_cv/research/common/dataset/perturbations/image_modifiers/saturation.py
similarity index 100%
rename from common/research/common/dataset/perturbations/image_modifiers/saturation.py
rename to polystar_cv/research/common/dataset/perturbations/image_modifiers/saturation.py
diff --git a/common/research/common/dataset/perturbations/perturbator.py b/polystar_cv/research/common/dataset/perturbations/perturbator.py
similarity index 100%
rename from common/research/common/dataset/perturbations/perturbator.py
rename to polystar_cv/research/common/dataset/perturbations/perturbator.py
diff --git a/common/research/common/dataset/perturbations/utils.py b/polystar_cv/research/common/dataset/perturbations/utils.py
similarity index 100%
rename from common/research/common/dataset/perturbations/utils.py
rename to polystar_cv/research/common/dataset/perturbations/utils.py
diff --git a/common/research/common/dataset/tensorflow_record.py b/polystar_cv/research/common/dataset/tensorflow_record.py
similarity index 100%
rename from common/research/common/dataset/tensorflow_record.py
rename to polystar_cv/research/common/dataset/tensorflow_record.py
diff --git a/common/research/common/scripts/__init__.py b/polystar_cv/research/common/dataset/twitch/__init__.py
similarity index 100%
rename from common/research/common/scripts/__init__.py
rename to polystar_cv/research/common/dataset/twitch/__init__.py
diff --git a/common/research/common/dataset/twitch/aerial_view_detector.py b/polystar_cv/research/common/dataset/twitch/aerial_view_detector.py
similarity index 100%
rename from common/research/common/dataset/twitch/aerial_view_detector.py
rename to polystar_cv/research/common/dataset/twitch/aerial_view_detector.py
diff --git a/common/research/common/dataset/twitch/mask_aerial.jpg b/polystar_cv/research/common/dataset/twitch/mask_aerial.jpg
similarity index 100%
rename from common/research/common/dataset/twitch/mask_aerial.jpg
rename to polystar_cv/research/common/dataset/twitch/mask_aerial.jpg
diff --git a/common/research/common/dataset/twitch/mask_detector.py b/polystar_cv/research/common/dataset/twitch/mask_detector.py
similarity index 100%
rename from common/research/common/dataset/twitch/mask_detector.py
rename to polystar_cv/research/common/dataset/twitch/mask_detector.py
diff --git a/common/research/common/dataset/twitch/mask_robot_view.jpg b/polystar_cv/research/common/dataset/twitch/mask_robot_view.jpg
similarity index 100%
rename from common/research/common/dataset/twitch/mask_robot_view.jpg
rename to polystar_cv/research/common/dataset/twitch/mask_robot_view.jpg
diff --git a/common/research/common/dataset/twitch/robots_views_extractor.py b/polystar_cv/research/common/dataset/twitch/robots_views_extractor.py
similarity index 100%
rename from common/research/common/dataset/twitch/robots_views_extractor.py
rename to polystar_cv/research/common/dataset/twitch/robots_views_extractor.py
diff --git a/common/research/common/dataset/upload.py b/polystar_cv/research/common/dataset/upload.py
similarity index 70%
rename from common/research/common/dataset/upload.py
rename to polystar_cv/research/common/dataset/upload.py
index cc857b165a9579481658f4a309b335e9a55e45f7..5cbc1684c40e46b21a98c4d8d0bebf289cde8e0f 100644
--- a/common/research/common/dataset/upload.py
+++ b/polystar_cv/research/common/dataset/upload.py
@@ -2,6 +2,7 @@ import logging
 
 from tqdm import tqdm
 
+from research.common.dataset.cleaning.dataset_changes import DatasetChanges
 from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
 from research.common.datasets.roco.roco_datasets import ROCODatasets
 from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
@@ -13,6 +14,11 @@ def upload_all_digit_datasets(roco_datasets: ROCODatasets):
         upload_digit_dataset(roco_dataset)
 
 
+def upload_all_color_datasets(roco_datasets: ROCODatasets):
+    for roco_dataset in tqdm(roco_datasets, desc="Uploading datasets"):
+        upload_color_dataset(roco_dataset)
+
+
 def upload_digit_dataset(roco_dataset: ROCODatasetBuilder):
     _upload_armor_dataset(roco_dataset, "digits")
 
@@ -23,9 +29,11 @@ def upload_color_dataset(roco_dataset: ROCODatasetBuilder):
 
 def _upload_armor_dataset(roco_dataset: ROCODatasetBuilder, name: str):
     GCStorages.DEV.upload_directory(roco_dataset.main_dir / name, extensions_to_exclude={".changes"})
+    DatasetChanges(roco_dataset.main_dir / name).upload()
 
 
 if __name__ == "__main__":
     logging.getLogger().setLevel("INFO")
 
-    upload_all_digit_datasets(ROCODatasetsZoo.DJI)
+    upload_all_digit_datasets(ROCODatasetsZoo.TWITCH)
+    upload_digit_dataset(ROCODatasetsZoo.DJI.FINAL)
diff --git a/common/tests/common/integration_tests/__init__.py b/polystar_cv/research/common/datasets/__init__.py
similarity index 100%
rename from common/tests/common/integration_tests/__init__.py
rename to polystar_cv/research/common/datasets/__init__.py
diff --git a/common/research/common/datasets/dataset.py b/polystar_cv/research/common/datasets/dataset.py
similarity index 100%
rename from common/research/common/datasets/dataset.py
rename to polystar_cv/research/common/datasets/dataset.py
diff --git a/common/research/common/datasets/dataset_builder.py b/polystar_cv/research/common/datasets/dataset_builder.py
similarity index 100%
rename from common/research/common/datasets/dataset_builder.py
rename to polystar_cv/research/common/datasets/dataset_builder.py
diff --git a/common/research/common/datasets/filter_dataset.py b/polystar_cv/research/common/datasets/filter_dataset.py
similarity index 100%
rename from common/research/common/datasets/filter_dataset.py
rename to polystar_cv/research/common/datasets/filter_dataset.py
diff --git a/common/research/common/datasets/image_dataset.py b/polystar_cv/research/common/datasets/image_dataset.py
similarity index 100%
rename from common/research/common/datasets/image_dataset.py
rename to polystar_cv/research/common/datasets/image_dataset.py
diff --git a/common/research/common/datasets/image_file_dataset_builder.py b/polystar_cv/research/common/datasets/image_file_dataset_builder.py
similarity index 100%
rename from common/research/common/datasets/image_file_dataset_builder.py
rename to polystar_cv/research/common/datasets/image_file_dataset_builder.py
diff --git a/common/research/common/datasets/iterator_dataset.py b/polystar_cv/research/common/datasets/iterator_dataset.py
similarity index 100%
rename from common/research/common/datasets/iterator_dataset.py
rename to polystar_cv/research/common/datasets/iterator_dataset.py
diff --git a/common/research/common/datasets/lazy_dataset.py b/polystar_cv/research/common/datasets/lazy_dataset.py
similarity index 100%
rename from common/research/common/datasets/lazy_dataset.py
rename to polystar_cv/research/common/datasets/lazy_dataset.py
diff --git a/common/tests/common/integration_tests/datasets/__init__.py b/polystar_cv/research/common/datasets/roco/__init__.py
similarity index 100%
rename from common/tests/common/integration_tests/datasets/__init__.py
rename to polystar_cv/research/common/datasets/roco/__init__.py
diff --git a/common/research/common/datasets/roco/roco_annotation.py b/polystar_cv/research/common/datasets/roco/roco_annotation.py
similarity index 100%
rename from common/research/common/datasets/roco/roco_annotation.py
rename to polystar_cv/research/common/datasets/roco/roco_annotation.py
diff --git a/common/research/common/datasets/roco/roco_dataset.py b/polystar_cv/research/common/datasets/roco/roco_dataset.py
similarity index 100%
rename from common/research/common/datasets/roco/roco_dataset.py
rename to polystar_cv/research/common/datasets/roco/roco_dataset.py
diff --git a/common/research/common/datasets/roco/roco_dataset_builder.py b/polystar_cv/research/common/datasets/roco/roco_dataset_builder.py
similarity index 100%
rename from common/research/common/datasets/roco/roco_dataset_builder.py
rename to polystar_cv/research/common/datasets/roco/roco_dataset_builder.py
diff --git a/common/research/common/datasets/roco/roco_dataset_descriptor.py b/polystar_cv/research/common/datasets/roco/roco_dataset_descriptor.py
similarity index 100%
rename from common/research/common/datasets/roco/roco_dataset_descriptor.py
rename to polystar_cv/research/common/datasets/roco/roco_dataset_descriptor.py
diff --git a/common/research/common/datasets/roco/roco_datasets.py b/polystar_cv/research/common/datasets/roco/roco_datasets.py
similarity index 95%
rename from common/research/common/datasets/roco/roco_datasets.py
rename to polystar_cv/research/common/datasets/roco/roco_datasets.py
index d824c699bbf58640f958c0fc997e4bf741650b50..dbecdcdec9add9f5939986cdcc3e4d8955f19e1d 100644
--- a/common/research/common/datasets/roco/roco_datasets.py
+++ b/polystar_cv/research/common/datasets/roco/roco_datasets.py
@@ -19,6 +19,9 @@ class ROCODatasetsMeta(type):
     def __iter__(cls) -> Iterator[ROCODatasetBuilder]:
         return (cls._make_builder_from_name(name) for name in dir(cls) if _is_builder_name(cls, name))
 
+    def __len__(cls):
+        return sum(_is_builder_name(cls, name) for name in dir(cls))
+
     def union(cls) -> UnionLazyDataset[Path, ROCOAnnotation]:
         return UnionLazyDataset(cls, cls.name)
 
diff --git a/common/tests/common/unittests/__init__.py b/polystar_cv/research/common/datasets/roco/zoo/__init__.py
similarity index 100%
rename from common/tests/common/unittests/__init__.py
rename to polystar_cv/research/common/datasets/roco/zoo/__init__.py
diff --git a/common/research/common/datasets/roco/zoo/dji.py b/polystar_cv/research/common/datasets/roco/zoo/dji.py
similarity index 100%
rename from common/research/common/datasets/roco/zoo/dji.py
rename to polystar_cv/research/common/datasets/roco/zoo/dji.py
diff --git a/common/research/common/datasets/roco/zoo/dji_zoomed.py b/polystar_cv/research/common/datasets/roco/zoo/dji_zoomed.py
similarity index 100%
rename from common/research/common/datasets/roco/zoo/dji_zoomed.py
rename to polystar_cv/research/common/datasets/roco/zoo/dji_zoomed.py
diff --git a/common/research/common/datasets/roco/zoo/roco_dataset_zoo.py b/polystar_cv/research/common/datasets/roco/zoo/roco_dataset_zoo.py
similarity index 100%
rename from common/research/common/datasets/roco/zoo/roco_dataset_zoo.py
rename to polystar_cv/research/common/datasets/roco/zoo/roco_dataset_zoo.py
diff --git a/common/research/common/datasets/roco/zoo/twitch.py b/polystar_cv/research/common/datasets/roco/zoo/twitch.py
similarity index 100%
rename from common/research/common/datasets/roco/zoo/twitch.py
rename to polystar_cv/research/common/datasets/roco/zoo/twitch.py
diff --git a/common/research/common/datasets/slice_dataset.py b/polystar_cv/research/common/datasets/slice_dataset.py
similarity index 100%
rename from common/research/common/datasets/slice_dataset.py
rename to polystar_cv/research/common/datasets/slice_dataset.py
diff --git a/common/research/common/datasets/transform_dataset.py b/polystar_cv/research/common/datasets/transform_dataset.py
similarity index 100%
rename from common/research/common/datasets/transform_dataset.py
rename to polystar_cv/research/common/datasets/transform_dataset.py
diff --git a/common/research/common/datasets/union_dataset.py b/polystar_cv/research/common/datasets/union_dataset.py
similarity index 100%
rename from common/research/common/datasets/union_dataset.py
rename to polystar_cv/research/common/datasets/union_dataset.py
diff --git a/common/research/common/gcloud/gcloud_storage.py b/polystar_cv/research/common/gcloud/gcloud_storage.py
similarity index 58%
rename from common/research/common/gcloud/gcloud_storage.py
rename to polystar_cv/research/common/gcloud/gcloud_storage.py
index d83de5bb1bd812b681df8acec1cbe9f95787d613..4b47e20c6a6b14eb8bc401896d7de09941554498 100644
--- a/common/research/common/gcloud/gcloud_storage.py
+++ b/polystar_cv/research/common/gcloud/gcloud_storage.py
@@ -3,11 +3,12 @@ import shutil
 import tarfile
 from contextlib import contextmanager
 from enum import Enum
+from io import FileIO
 from pathlib import Path, PurePath
 from tempfile import TemporaryDirectory
 from typing import Iterable, Optional
 
-from google.cloud.storage import Blob, Bucket, Client
+from google.cloud.storage import Bucket, Client
 
 from polystar.common.constants import PROJECT_DIR
 
@@ -18,14 +19,19 @@ EXTENSIONS_TO_EXCLUDE = (".changes",)
 class GCStorage:
     def __init__(self, bucket_name: str):
         self.bucket_name = bucket_name
-        self.client: Optional[Client] = None
+        self._client: Optional[Client] = None
         self._bucket: Optional[Bucket] = None
-        self.url = f"https://console.cloud.google.com/storage/browser/{bucket_name}"
+        self.bucket_url = f"https://console.cloud.google.com/storage/browser/{bucket_name}"
+        self.storage_url = f"https://storage.cloud.google.com/{bucket_name}"
 
     def upload_file(self, local_path: Path, remote_path: Optional[PurePath] = None):
-        blob = self._make_remote_blob(local_path, remote_path)
+        remote_path = _make_remote_path(local_path, remote_path)
+        blob = self.bucket.blob(str(remote_path), chunk_size=10 * 1024 * 1024)
         blob.upload_from_filename(str(local_path), timeout=60 * 5)
-        logger.info(f"File file:///{local_path} uploaded")
+        logger.info(
+            f"File {local_path.name} uploaded to {self.bucket_url}/{remote_path.parent}. "
+            f"Download link: {self.storage_url}/{remote_path}"
+        )
 
     def upload_directory(self, local_path: Path, extensions_to_exclude: Iterable[str] = EXTENSIONS_TO_EXCLUDE):
         extensions_to_exclude = set(extensions_to_exclude)
@@ -35,14 +41,18 @@ class GCStorage:
                 tar.add(
                     str(local_path),
                     arcname="",
-                    exclude=lambda name: any(name.endswith(ext) for ext in extensions_to_exclude),
+                    filter=lambda f: None if any(f.name.endswith(ext) for ext in extensions_to_exclude) else f,
                 )
             return self.upload_file(tar_path, _make_remote_path(local_path.with_suffix(".tar.gz")))
 
     def download_file(self, local_path: Path, remote_path: Optional[PurePath] = None):
-        blob = self._make_remote_blob(local_path, remote_path)
+        local_path.parent.mkdir(exist_ok=True, parents=True)
+        remote_path = _make_remote_path(local_path, remote_path)
+        blob = self.bucket.get_blob(str(remote_path))
+        if blob is None:
+            raise FileNotFoundError(f"{remote_path} is not on {self.bucket_url}")
         blob.download_to_filename(str(local_path), timeout=60 * 5)
-        logger.info(f"File file:///{local_path} downloaded")
+        logger.info(f"File {local_path.name} downloaded to file:///{local_path}")
 
     def download_file_if_missing(self, local_path: Path, remote_path: Optional[PurePath] = None):
         if not local_path.exists():
@@ -69,7 +79,7 @@ class GCStorage:
         self.download_directory(local_path)
 
     @contextmanager
-    def open(self, local_path: Path, mode: str):
+    def open(self, local_path: Path, mode: str) -> FileIO:
         if "r" in mode:
             self.download_file_if_missing(local_path)
             with local_path.open(mode) as f:
@@ -82,23 +92,38 @@ class GCStorage:
         else:
             raise ValueError(f"mode {mode} is not supported")
 
-    def _make_remote_blob(self, local_path: Path, remote_path: Optional[PurePath]) -> Blob:
-        if remote_path is None:
-            remote_path = _make_remote_path(local_path)
+    @staticmethod
+    def open_from_str(remote_path: str, mode: str):
+        assert remote_path.startswith("gs://")
+        remote_path = remote_path[5:]
+        bucket_name, relative_path = remote_path.split("/", maxsplit=1)
+        return GCStorage(bucket_name).open(Path(PROJECT_DIR / relative_path), mode)
+
+    def glob(self, local_path: Path, remote_path: Optional[PurePath] = None, extension: str = None) -> Iterable[Path]:
+        remote_path = _make_remote_path(local_path, remote_path)
+        blobs = self.client.list_blobs(self.bucket, prefix=str(remote_path),)
+        if extension is None:
+            return (PROJECT_DIR / b.name for b in blobs)
+        for blob in blobs:
+            if blob.name.endswith(extension):
+                yield PROJECT_DIR / blob.name
 
-        return self.bucket.blob(str(remote_path), chunk_size=10 * 1024 * 1024)
+    @property
+    def client(self) -> Client:
+        if self._client is None:
+            self._client = Client()
+        return self._client
 
     @property
     def bucket(self) -> Bucket:
         if self._bucket is not None:
             return self._bucket
-        self.client = Client()
         self._bucket = self.client.bucket(self.bucket_name)
         return self._bucket
 
 
-def _make_remote_path(local_path: Path):
-    return local_path.relative_to(PROJECT_DIR)
+def _make_remote_path(local_path: Path, remote_path: Optional[PurePath] = None) -> PurePath:
+    return remote_path or local_path.relative_to(PROJECT_DIR)
 
 
 class GCStorages(GCStorage, Enum):
diff --git a/common/tests/common/unittests/object_validators/__init__.py b/polystar_cv/research/common/scripts/__init__.py
similarity index 100%
rename from common/tests/common/unittests/object_validators/__init__.py
rename to polystar_cv/research/common/scripts/__init__.py
diff --git a/common/research/common/scripts/construct_dataset_from_manual_annotation.py b/polystar_cv/research/common/scripts/construct_dataset_from_manual_annotation.py
similarity index 100%
rename from common/research/common/scripts/construct_dataset_from_manual_annotation.py
rename to polystar_cv/research/common/scripts/construct_dataset_from_manual_annotation.py
diff --git a/common/research/common/scripts/construct_twith_datasets_from_manual_annotation.py b/polystar_cv/research/common/scripts/construct_twith_datasets_from_manual_annotation.py
similarity index 100%
rename from common/research/common/scripts/construct_twith_datasets_from_manual_annotation.py
rename to polystar_cv/research/common/scripts/construct_twith_datasets_from_manual_annotation.py
diff --git a/common/research/common/scripts/correct_annotations.py b/polystar_cv/research/common/scripts/correct_annotations.py
similarity index 100%
rename from common/research/common/scripts/correct_annotations.py
rename to polystar_cv/research/common/scripts/correct_annotations.py
diff --git a/common/research/common/scripts/create_tensorflow_records.py b/polystar_cv/research/common/scripts/create_tensorflow_records.py
similarity index 100%
rename from common/research/common/scripts/create_tensorflow_records.py
rename to polystar_cv/research/common/scripts/create_tensorflow_records.py
diff --git a/common/research/common/scripts/extract_robots_views_from_video.py b/polystar_cv/research/common/scripts/extract_robots_views_from_video.py
similarity index 100%
rename from common/research/common/scripts/extract_robots_views_from_video.py
rename to polystar_cv/research/common/scripts/extract_robots_views_from_video.py
diff --git a/common/research/common/scripts/improve_roco_by_zooming.py b/polystar_cv/research/common/scripts/improve_roco_by_zooming.py
similarity index 100%
rename from common/research/common/scripts/improve_roco_by_zooming.py
rename to polystar_cv/research/common/scripts/improve_roco_by_zooming.py
diff --git a/common/research/common/scripts/make_twitch_chunks_to_annotate.py b/polystar_cv/research/common/scripts/make_twitch_chunks_to_annotate.py
similarity index 100%
rename from common/research/common/scripts/make_twitch_chunks_to_annotate.py
rename to polystar_cv/research/common/scripts/make_twitch_chunks_to_annotate.py
diff --git a/common/research/common/scripts/move_aerial_views.py b/polystar_cv/research/common/scripts/move_aerial_views.py
similarity index 100%
rename from common/research/common/scripts/move_aerial_views.py
rename to polystar_cv/research/common/scripts/move_aerial_views.py
diff --git a/common/research/common/scripts/visualize_dataset.py b/polystar_cv/research/common/scripts/visualize_dataset.py
similarity index 100%
rename from common/research/common/scripts/visualize_dataset.py
rename to polystar_cv/research/common/scripts/visualize_dataset.py
diff --git a/polystar_cv/research/common/utils/experiment_dir.py b/polystar_cv/research/common/utils/experiment_dir.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0a1c57a5242b3a894456c80880634d4dcda3f79
--- /dev/null
+++ b/polystar_cv/research/common/utils/experiment_dir.py
@@ -0,0 +1,15 @@
+from pathlib import Path
+
+from polystar.common.utils.time import create_time_id
+from research.common.constants import EVALUATION_DIR
+
+
+def prompt_experiment_dir(project_name: str) -> Path:
+    experiment_name: str = input(f"Experiment name for {project_name}: ")
+    return make_experiment_dir(project_name, experiment_name)
+
+
+def make_experiment_dir(project_name: str, experiment_name: str) -> Path:
+    experiment_dir = EVALUATION_DIR / project_name / f"{create_time_id()}_{experiment_name}"
+    experiment_dir.mkdir(exist_ok=True, parents=True)
+    return experiment_dir
diff --git a/robots-at-robots/polystar/robots_at_robots/__init__.py b/polystar_cv/research/robots/__init__.py
similarity index 100%
rename from robots-at-robots/polystar/robots_at_robots/__init__.py
rename to polystar_cv/research/robots/__init__.py
diff --git a/robots-at-robots/research/robots_at_robots/__init__.py b/polystar_cv/research/robots/armor_color/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/__init__.py
rename to polystar_cv/research/robots/armor_color/__init__.py
diff --git a/polystar_cv/research/robots/armor_color/benchmarker.py b/polystar_cv/research/robots/armor_color/benchmarker.py
new file mode 100644
index 0000000000000000000000000000000000000000..79f0a91577c29be7636f18ca76a17aa6bf3699a7
--- /dev/null
+++ b/polystar_cv/research/robots/armor_color/benchmarker.py
@@ -0,0 +1,16 @@
+from pathlib import Path
+
+from research.robots.armor_color.datasets import make_armor_color_datasets
+from research.robots.armor_color.pipeline import ArmorColorPipeline
+from research.robots.evaluation.benchmarker import Benchmarker
+
+
+def make_armor_color_benchmarker(report_dir: Path, include_dji: bool = True) -> Benchmarker:
+    train_datasets, validation_datasets, test_datasets = make_armor_color_datasets()
+    return Benchmarker(
+        report_dir=report_dir,
+        classes=ArmorColorPipeline.classes,
+        train_datasets=train_datasets,
+        validation_datasets=validation_datasets,
+        test_datasets=test_datasets,
+    )
diff --git a/polystar_cv/research/robots/armor_color/datasets.py b/polystar_cv/research/robots/armor_color/datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c53b9ef1e4cfc6ddc93ba8c53ebf07a97194e97
--- /dev/null
+++ b/polystar_cv/research/robots/armor_color/datasets.py
@@ -0,0 +1,49 @@
+from typing import List, Tuple
+
+from polystar.common.models.object import Armor, ArmorColor
+from research.common.datasets.image_dataset import FileImageDataset
+from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
+from research.robots.dataset.armor_value_dataset_generator import ArmorValueDatasetGenerator
+from research.robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
+
+
+class ArmorColorTargetFactory(ArmorValueTargetFactory[ArmorColor]):
+    def from_str(self, label: str) -> ArmorColor:
+        return ArmorColor(label)
+
+    def from_armor(self, armor: Armor) -> ArmorColor:
+        return armor.color
+
+
+def make_armor_color_dataset_generator() -> ArmorValueDatasetGenerator[ArmorColor]:
+    return ArmorValueDatasetGenerator("colors", ArmorColorTargetFactory())
+
+
+def make_armor_color_datasets(
+    include_dji: bool = True,
+) -> Tuple[List[FileImageDataset], List[FileImageDataset], List[FileImageDataset]]:
+    color_dataset_generator = make_armor_color_dataset_generator()
+
+    train_roco_datasets = [
+        ROCODatasetsZoo.TWITCH.T470150052,
+        ROCODatasetsZoo.TWITCH.T470152730,
+        ROCODatasetsZoo.TWITCH.T470153081,
+        ROCODatasetsZoo.TWITCH.T470158483,
+    ]
+    if include_dji:
+        train_roco_datasets.extend(
+            [
+                ROCODatasetsZoo.DJI.FINAL,
+                ROCODatasetsZoo.DJI.CENTRAL_CHINA,
+                ROCODatasetsZoo.DJI.NORTH_CHINA,
+                ROCODatasetsZoo.DJI.SOUTH_CHINA,
+            ]
+        )
+
+    train_datasets, validation_datasets, test_datasets = color_dataset_generator.from_roco_datasets(
+        train_roco_datasets,
+        [ROCODatasetsZoo.TWITCH.T470149568, ROCODatasetsZoo.TWITCH.T470152289],
+        [ROCODatasetsZoo.TWITCH.T470152838, ROCODatasetsZoo.TWITCH.T470151286],
+    )
+
+    return train_datasets, validation_datasets, test_datasets
diff --git a/polystar_cv/research/robots/armor_color/pipeline.py b/polystar_cv/research/robots/armor_color/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..50cd4416cf6a59d782ff7c726b4c05680eb8800c
--- /dev/null
+++ b/polystar_cv/research/robots/armor_color/pipeline.py
@@ -0,0 +1,11 @@
+from polystar.common.models.object import ArmorColor
+from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
+from polystar.common.pipeline.classification.keras_classification_pipeline import KerasClassificationPipeline
+
+
+class ArmorColorPipeline(ClassificationPipeline):
+    enum = ArmorColor
+
+
+class ArmorColorKerasPipeline(ArmorColorPipeline, KerasClassificationPipeline):
+    pass
diff --git a/robots-at-robots/research/robots_at_robots/armor_color/__init__.py b/polystar_cv/research/robots/armor_color/scripts/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/armor_color/__init__.py
rename to polystar_cv/research/robots/armor_color/scripts/__init__.py
diff --git a/polystar_cv/research/robots/armor_color/scripts/benchmark.py b/polystar_cv/research/robots/armor_color/scripts/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..b76be32957b62a7a0f1c912d3ca6e03afe416e6b
--- /dev/null
+++ b/polystar_cv/research/robots/armor_color/scripts/benchmark.py
@@ -0,0 +1,64 @@
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+
+from nptyping import Array
+from sklearn.linear_model import LogisticRegression
+
+from polystar.common.models.image import Image
+from polystar.common.models.object import ArmorColor
+from polystar.common.pipeline.classification.random_model import RandomClassifier
+from polystar.common.pipeline.classification.rule_based_classifier import RuleBasedClassifierABC
+from polystar.common.pipeline.featurizers.histogram_2d import Histogram2D
+from polystar.common.pipeline.featurizers.histogram_blocs_2d import HistogramBlocs2D
+from polystar.common.pipeline.pipe_abc import PipeABC
+from polystar.common.pipeline.preprocessors.rgb_to_hsv import RGB2HSV
+from research.common.utils.experiment_dir import prompt_experiment_dir
+from research.robots.armor_color.benchmarker import make_armor_color_benchmarker
+from research.robots.armor_color.pipeline import ArmorColorPipeline
+
+
+@dataclass
+class MeanChannels(PipeABC):
+    def transform_single(self, image: Image) -> Array[float, float, float]:
+        return image.mean(axis=(0, 1))
+
+
+class RedBlueComparisonClassifier(RuleBasedClassifierABC):
+    """A very simple model that compares the blue and red values obtained by the MeanChannels"""
+
+    def predict_single(self, features: Array[float, float, float]) -> ArmorColor:
+        return ArmorColor.RED if features[0] >= features[2] else ArmorColor.BLUE
+
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.info("Benchmarking")
+
+    _report_dir: Path = prompt_experiment_dir("armor-color")
+    _benchmarker = make_armor_color_benchmarker(_report_dir, include_dji=False)
+
+    _pipelines = [
+        ArmorColorPipeline.from_pipes([MeanChannels(), RedBlueComparisonClassifier()], name="rb-comparison"),
+        ArmorColorPipeline.from_pipes([RandomClassifier()], name="random"),
+        ArmorColorPipeline.from_pipes(
+            [RGB2HSV(), Histogram2D(), LogisticRegression(max_iter=200)], name="hsv-hist-lr",
+        ),
+        ArmorColorPipeline.from_pipes(
+            [RGB2HSV(), HistogramBlocs2D(rows=1, cols=3), LogisticRegression(max_iter=200)], name="hsv-hist-blocs-lr",
+        ),
+        ArmorColorPipeline.from_pipes([Histogram2D(), LogisticRegression(max_iter=200)], name="rgb-hist-lr"),
+        # ArmorColorKerasPipeline.from_custom_cnn(
+        #     logs_dir=str(_report_dir),
+        #     input_size=16,
+        #     conv_blocks=((32, 32), (64, 64)),
+        #     dropout=0.5,
+        #     dense_size=64,
+        #     lr=7.2e-4,
+        #     name="cnn",
+        #     batch_size=128,
+        #     steps_per_epoch="auto",
+        # ),
+    ]
+
+    _benchmarker.benchmark(_pipelines)
diff --git a/polystar_cv/research/robots/armor_color/scripts/hyper_tune_cnn.py b/polystar_cv/research/robots/armor_color/scripts/hyper_tune_cnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..656459f420b4a01828b87799b6bff8d30b2fe5c0
--- /dev/null
+++ b/polystar_cv/research/robots/armor_color/scripts/hyper_tune_cnn.py
@@ -0,0 +1,35 @@
+import logging
+import warnings
+from pathlib import Path
+
+from optuna import Trial
+
+from research.common.utils.experiment_dir import make_experiment_dir
+from research.robots.armor_color.benchmarker import make_armor_color_benchmarker
+from research.robots.armor_color.pipeline import ArmorColorKerasPipeline
+from research.robots.evaluation.hyper_tuner import HyperTuner
+
+
+def cnn_pipeline_factory(report_dir: Path, trial: Trial) -> ArmorColorKerasPipeline:
+    return ArmorColorKerasPipeline.from_custom_cnn(
+        input_size=32,
+        conv_blocks=((32, 32), (64, 64)),
+        logs_dir=str(report_dir),
+        dropout=trial.suggest_uniform("dropout", 0, 0.99),
+        lr=trial.suggest_loguniform("lr", 1e-5, 1e-1),
+        dense_size=2 ** round(trial.suggest_discrete_uniform("dense_size_log2", 3, 10, 1)),
+        batch_size=64,
+        steps_per_epoch="auto",
+        verbose=0,
+    )
+
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    logging.info("Hyperparameter tuning for CNN pipeline on color task")
+    HyperTuner(make_armor_color_benchmarker(make_experiment_dir("armor-color", "cnn_tuning"), include_dji=False)).tune(
+        cnn_pipeline_factory, n_trials=50
+    )
diff --git a/robots-at-robots/research/robots_at_robots/armor_digit/__init__.py b/polystar_cv/research/robots/armor_digit/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/armor_digit/__init__.py
rename to polystar_cv/research/robots/armor_digit/__init__.py
diff --git a/polystar_cv/research/robots/armor_digit/armor_digit_dataset.py b/polystar_cv/research/robots/armor_digit/armor_digit_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7f743707185a711e00379a4b7e529adb4aa18cd
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/armor_digit_dataset.py
@@ -0,0 +1,62 @@
+from itertools import islice
+from typing import List, Set, Tuple
+
+from polystar.common.filters.exclude_filter import ExcludeFilter
+from polystar.common.models.object import Armor, ArmorDigit
+from research.common.datasets.image_dataset import FileImageDataset
+from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
+from research.robots.dataset.armor_value_dataset_generator import ArmorValueDatasetGenerator
+from research.robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
+
+VALID_NUMBERS_2021: Set[int] = {1, 3, 4}  # University League
+
+
+def make_armor_digit_dataset_generator() -> ArmorValueDatasetGenerator[ArmorDigit]:
+    return ArmorValueDatasetGenerator("digits", ArmorDigitTargetFactory(), ExcludeFilter({ArmorDigit.OUTDATED}))
+
+
+def default_armor_digit_datasets() -> Tuple[List[FileImageDataset], List[FileImageDataset], List[FileImageDataset]]:
+    digit_dataset_generator = make_armor_digit_dataset_generator()
+    train_datasets, validation_datasets, test_datasets = digit_dataset_generator.from_roco_datasets(
+        [
+            ROCODatasetsZoo.TWITCH.T470150052,
+            ROCODatasetsZoo.TWITCH.T470152730,
+            ROCODatasetsZoo.TWITCH.T470153081,
+            ROCODatasetsZoo.TWITCH.T470158483,
+            ROCODatasetsZoo.DJI.FINAL,
+            ROCODatasetsZoo.DJI.CENTRAL_CHINA,
+            ROCODatasetsZoo.DJI.NORTH_CHINA,
+            ROCODatasetsZoo.DJI.SOUTH_CHINA,
+        ],
+        [ROCODatasetsZoo.TWITCH.T470149568, ROCODatasetsZoo.TWITCH.T470152289],
+        [ROCODatasetsZoo.TWITCH.T470152838, ROCODatasetsZoo.TWITCH.T470151286],
+    )
+    # train_datasets.append(
+    #     digit_dataset_generator.from_roco_dataset(ROCODatasetsZoo.DJI.FINAL).to_file_images()
+    #     # .cap(2133 + 1764 + 1436)
+    #     # .skip(2133 + 176 + 1436)
+    #     # .cap(5_000)
+    #     .build()
+    # )
+    return train_datasets, validation_datasets, test_datasets
+
+
+class ArmorDigitTargetFactory(ArmorValueTargetFactory[ArmorDigit]):
+    def from_str(self, label: str) -> ArmorDigit:
+        n = int(label)
+
+        if n in VALID_NUMBERS_2021:  # CHANGING
+            return ArmorDigit(n - (n >= 3))  # hacky, but digit 2 is absent
+
+        return ArmorDigit.OUTDATED
+
+    def from_armor(self, armor: Armor) -> ArmorDigit:
+        return ArmorDigit(armor.number) if armor.number else ArmorDigit.UNKNOWN
+
+
+if __name__ == "__main__":
+    _roco_dataset_builder = ROCODatasetsZoo.DJI.CENTRAL_CHINA
+    _armor_digit_dataset = make_armor_digit_dataset_generator().from_roco_dataset(_roco_dataset_builder)
+
+    for p, c, _name in islice(_armor_digit_dataset, 20, 30):
+        print(p, c, _name)
diff --git a/polystar_cv/research/robots/armor_digit/digit_benchmarker.py b/polystar_cv/research/robots/armor_digit/digit_benchmarker.py
new file mode 100644
index 0000000000000000000000000000000000000000..96473beca94287c2fc2c31fbd83fc9438aed6495
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/digit_benchmarker.py
@@ -0,0 +1,16 @@
+from pathlib import Path
+
+from research.robots.armor_digit.armor_digit_dataset import default_armor_digit_datasets
+from research.robots.armor_digit.pipeline import ArmorDigitPipeline
+from research.robots.evaluation.benchmarker import Benchmarker
+
+
+def make_default_digit_benchmarker(report_dir: Path) -> Benchmarker:
+    train_datasets, validation_datasets, test_datasets = default_armor_digit_datasets()
+    return Benchmarker(
+        report_dir=report_dir,
+        train_datasets=train_datasets,
+        validation_datasets=validation_datasets,
+        test_datasets=test_datasets,
+        classes=ArmorDigitPipeline.classes,
+    )
diff --git a/robots-at-robots/research/robots_at_robots/dataset/__init__.py b/polystar_cv/research/robots/armor_digit/gcloud/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/dataset/__init__.py
rename to polystar_cv/research/robots/armor_digit/gcloud/__init__.py
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/gather_performances.py b/polystar_cv/research/robots/armor_digit/gcloud/gather_performances.py
new file mode 100644
index 0000000000000000000000000000000000000000..67e0b93b7be8a13f33f96d183483e923d37c3ef0
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/gather_performances.py
@@ -0,0 +1,41 @@
+import logging
+import pickle
+from pathlib import Path
+from typing import List
+
+from polystar.common.models.object import ArmorDigit
+from polystar.common.utils.iterable_utils import flatten
+from research.common.constants import EVALUATION_DIR
+from research.common.gcloud.gcloud_storage import GCStorages
+from research.robots.evaluation.metrics.f1 import F1Metric
+from research.robots.evaluation.performance import ClassificationPerformances
+from research.robots.evaluation.reporter import ImagePipelineEvaluationReporter
+
+
+def load_performances(performances_paths: List[Path]) -> ClassificationPerformances:
+    return ClassificationPerformances(flatten(pickle.loads(perf_path.read_bytes()) for perf_path in performances_paths))
+
+
+def gather_performances(task_name: str, job_id: str):
+    logging.info(f"gathering performances for {job_id} on task {task_name}")
+    experiment_dir = EVALUATION_DIR / task_name / job_id
+    performances_paths = download_performances(experiment_dir)
+    performances = load_performances(performances_paths)
+    ImagePipelineEvaluationReporter(
+        report_dir=EVALUATION_DIR / task_name / job_id, classes=list(ArmorDigit), other_metrics=[F1Metric()]
+    ).report(performances)
+
+
+def download_performances(experiment_dir: Path) -> List[Path]:
+    performances_paths = list(GCStorages.DEV.glob(experiment_dir, extension=".pkl"))
+    logging.info(f"Found {len(performances_paths)} performances")
+    for performance_path in performances_paths:
+        GCStorages.DEV.download_file_if_missing(performance_path)
+    return performances_paths
+
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+
+    gather_performances("armor-digit", "cnn_20201220_224525")
+    gather_performances("armor-digit", "vgg16_20201220_224417")
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/hptuning_config.yaml b/polystar_cv/research/robots/armor_digit/gcloud/hptuning_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0a9fd9399e4fe497718c0d45c09b6c42f8ec098c
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/hptuning_config.yaml
@@ -0,0 +1,33 @@
+trainingInput:
+  pythonVersion: "3.7"
+  runtimeVersion: "2.3"
+  scaleTier: BASIC_GPU
+  region: europe-west6
+
+  hyperparameters:
+    goal: MAXIMIZE
+    hyperparameterMetricTag: val_accuracy
+    maxTrials: 50
+    maxParallelTrials: 5
+    params:
+      - parameterName: lr
+        type: DOUBLE
+        minValue: 0.00001
+        maxValue: 0.1
+        scaleType: UNIT_LOG_SCALE
+      - parameterName: dropout
+        type: DOUBLE
+        minValue: 0
+        maxValue: .99
+      - parameterName: dense-size
+        type: DISCRETE
+        discreteValues:
+          - 16
+          - 32
+          - 64
+          - 128
+          - 256
+          - 512
+          - 1024
+          - 2048
+        scaleType: UNIT_LOG_SCALE
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/train.py b/polystar_cv/research/robots/armor_digit/gcloud/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..fecfaa7ceaadaa6c516e7130d6a086f7dc22bc42
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/train.py
@@ -0,0 +1,19 @@
+import pickle
+from os.path import join
+
+from research.common.gcloud.gcloud_storage import GCStorage
+from research.robots.armor_digit.armor_digit_dataset import default_armor_digit_datasets
+from research.robots.armor_digit.pipeline import ArmorDigitPipeline
+from research.robots.evaluation.evaluator import ImageClassificationPipelineEvaluator
+from research.robots.evaluation.trainer import ImageClassificationPipelineTrainer
+
+
+def train_evaluate_digit_pipeline(pipeline: ArmorDigitPipeline, job_dir: str):
+    train_datasets, val_datasets, test_datasets = default_armor_digit_datasets()
+    trainer = ImageClassificationPipelineTrainer(train_datasets, val_datasets)
+    evaluator = ImageClassificationPipelineEvaluator(train_datasets, val_datasets, test_datasets)
+
+    trainer.train_pipeline(pipeline)
+
+    with GCStorage.open_from_str(join(job_dir, pipeline.name, "perfs.pkl"), "wb") as f:
+        pickle.dump(evaluator.evaluate_pipeline(pipeline), f)
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/train_cnn.py b/polystar_cv/research/robots/armor_digit/gcloud/train_cnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9c74378a8731a08b2103951b740ab54d8cd3c2c
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/train_cnn.py
@@ -0,0 +1,29 @@
+import logging
+import warnings
+from argparse import ArgumentParser
+
+from research.robots.armor_digit.gcloud.train import train_evaluate_digit_pipeline
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    _parser = ArgumentParser()
+    _parser.add_argument("--job-dir", type=str, required=True)
+    _parser.add_argument("--lr", type=float, required=True)
+    _parser.add_argument("--dropout", type=float, required=True)
+    _parser.add_argument("--dense-size", type=int, required=True)
+    _args = _parser.parse_args()
+
+    _pipeline = ArmorDigitKerasPipeline.from_custom_cnn(
+        input_size=32,
+        conv_blocks=((32, 32), (64, 64)),
+        logs_dir=_args.job_dir,
+        lr=_args.lr,
+        dense_size=_args.dense_size,
+        dropout=_args.dropout,
+    )
+
+    train_evaluate_digit_pipeline(_pipeline, _args.job_dir)
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/train_vgg16.py b/polystar_cv/research/robots/armor_digit/gcloud/train_vgg16.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8cc89e716520a185d7277dc8ca93f484308a4a7
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/train_vgg16.py
@@ -0,0 +1,31 @@
+import logging
+import warnings
+from argparse import ArgumentParser
+
+from tensorflow.python.keras.applications.vgg16 import VGG16
+
+from research.robots.armor_digit.gcloud.train import train_evaluate_digit_pipeline
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    _parser = ArgumentParser()
+    _parser.add_argument("--job-dir", type=str, required=True)
+    _parser.add_argument("--lr", type=float, required=True)
+    _parser.add_argument("--dropout", type=float, required=True)
+    _parser.add_argument("--dense-size", type=int, required=True)
+    _args = _parser.parse_args()
+
+    _pipeline = ArmorDigitKerasPipeline.from_transfer_learning(
+        model_factory=VGG16,
+        logs_dir=_args.job_dir,
+        input_size=32,
+        lr=_args.lr,
+        dense_size=_args.dense_size,
+        dropout=_args.dropout,
+    )
+
+    train_evaluate_digit_pipeline(_pipeline, _args.job_dir)
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/train_xception.py b/polystar_cv/research/robots/armor_digit/gcloud/train_xception.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3ce07baabdf61945b4987e47f54ecb9f40a98ee
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/train_xception.py
@@ -0,0 +1,31 @@
+import logging
+import warnings
+from argparse import ArgumentParser
+
+from tensorflow.python.keras.applications.xception import Xception
+
+from research.robots.armor_digit.gcloud.train import train_evaluate_digit_pipeline
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    _parser = ArgumentParser()
+    _parser.add_argument("--job-dir", type=str, required=True)
+    _parser.add_argument("--lr", type=float, required=True)
+    _parser.add_argument("--dropout", type=float, required=True)
+    _parser.add_argument("--dense-size", type=int, required=True)
+    _args = _parser.parse_args()
+
+    _pipeline = ArmorDigitKerasPipeline.from_transfer_learning(
+        model_factory=Xception,
+        logs_dir=_args.job_dir,
+        input_size=72,
+        lr=_args.lr,
+        dense_size=_args.dense_size,
+        dropout=_args.dropout,
+    )
+
+    train_evaluate_digit_pipeline(_pipeline, _args.job_dir)
diff --git a/polystar_cv/research/robots/armor_digit/gcloud/trainer.sh b/polystar_cv/research/robots/armor_digit/gcloud/trainer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d4fe0ec8086ea274c3e799301a1fdb94f36be500
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/gcloud/trainer.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -e
+
+cd ../../../../../
+
+# 1. Params
+task_name="armor-digit"
+
+read -rp "Experiment name:" experiment_name
+job_id=${experiment_name}_$(date +'%Y%m%d_%H%M%S')
+job_dir="gs://poly-cv-dev/experiments/$task_name/$job_id"
+author=$(git config user.name | tr " " - | tr '[:upper:]' '[:lower:]')
+echo Running job "$job_id" for task "$task_name" by "$author"
+
+# 2. build source
+poetry build -f wheel
+
+# 3. start job
+gcloud ai-platform jobs submit training "${job_id}" \
+    --config polystar_cv/research/robots/armor_digit/gcloud/hptuning_config.yaml \
+    --job-dir="${job_dir}" \
+    --packages ./dist/polystar_cv-0.2.0-py3-none-any.whl \
+    --module-name=research.robots.armor_digit.gcloud.train_cnn \
+    --labels task=${task_name},author="${author}"
+
+# 4. logs
+echo "logs:  https://console.cloud.google.com/logs/query;query=resource.labels.job_id%3D%22${job_id}%22?project=polystar-cv"
+echo "job:   https://console.cloud.google.com/ai-platform/jobs/${job_id}/charts/cpu?project=polystar-cv"
+
+tensorboard --logdir="${job_dir}"
diff --git a/polystar_cv/research/robots/armor_digit/pipeline.py b/polystar_cv/research/robots/armor_digit/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..24467ba3e894ca2f0c193f64330c5bb5a3855140
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/pipeline.py
@@ -0,0 +1,13 @@
+from polystar.common.models.object import ArmorDigit
+from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
+from polystar.common.pipeline.classification.keras_classification_pipeline import KerasClassificationPipeline
+from polystar.common.utils.registry import registry
+
+
+class ArmorDigitPipeline(ClassificationPipeline):
+    enum = ArmorDigit
+
+
+@registry.register()
+class ArmorDigitKerasPipeline(ArmorDigitPipeline, KerasClassificationPipeline):
+    pass
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/__init__.py b/polystar_cv/research/robots/armor_digit/scripts/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/evaluation/__init__.py
rename to polystar_cv/research/robots/armor_digit/scripts/__init__.py
diff --git a/polystar_cv/research/robots/armor_digit/scripts/benchmark.py b/polystar_cv/research/robots/armor_digit/scripts/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..2dde34dbd1f6dd7fd69493ad98cd86361cd41bae
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/scripts/benchmark.py
@@ -0,0 +1,56 @@
+import logging
+import warnings
+from pathlib import Path
+
+from polystar.common.constants import PROJECT_DIR
+from polystar.common.pipeline.classification.random_model import RandomClassifier
+from polystar.common.utils.serialization import pkl_load
+from research.common.utils.experiment_dir import prompt_experiment_dir
+from research.robots.armor_digit.digit_benchmarker import make_default_digit_benchmarker
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline, ArmorDigitPipeline
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    _report_dir: Path = prompt_experiment_dir("armor-digit")
+
+    logging.info(f"Running benchmarking {_report_dir.name}")
+
+    _benchmarker = make_default_digit_benchmarker(report_dir=_report_dir)
+
+    _random_pipeline = ArmorDigitPipeline.from_pipes([RandomClassifier()], name="random")
+    _cnn_pipeline = ArmorDigitKerasPipeline.from_custom_cnn(
+        input_size=32,
+        conv_blocks=((32, 32), (64, 64)),
+        logs_dir=str(_report_dir),
+        dropout=0.66,
+        lr=0.00078,
+        dense_size=1024,
+        name="cnn",
+    )
+    # _vgg16_pipeline = ArmorDigitKerasPipeline.from_transfer_learning(
+    #     input_size=32, logs_dir=_report_dir, dropout=0, lr=0.00021, dense_size=64, model_factory=VGG16
+    # )
+
+    _vgg16_pipeline = pkl_load(
+        PROJECT_DIR / "pipelines/armor-digit/20201225_131957_vgg16/VGG16 (32) - lr 2.1e-04 - drop 0.pkl"
+    )
+    _vgg16_pipeline.name = "vgg16_tl"
+
+    _distiled_vgg16_into_cnn_pipeline = ArmorDigitKerasPipeline.from_distillation(
+        teacher_pipeline=_vgg16_pipeline,
+        conv_blocks=((32, 32), (64, 64)),
+        logs_dir=_report_dir,
+        dropout=0.63,
+        lr=0.000776,
+        dense_size=1024,
+        temperature=41.2,
+        name="cnn_kd",
+    )
+
+    _benchmarker.benchmark(
+        pipelines=[_random_pipeline, _distiled_vgg16_into_cnn_pipeline, _cnn_pipeline],
+        trained_pipelines=[_vgg16_pipeline],
+    )
diff --git a/robots-at-robots/research/robots_at_robots/armor_digit/clean_datasets.py b/polystar_cv/research/robots/armor_digit/scripts/clean_datasets.py
similarity index 62%
rename from robots-at-robots/research/robots_at_robots/armor_digit/clean_datasets.py
rename to polystar_cv/research/robots/armor_digit/scripts/clean_datasets.py
index f78124e882254b00be7314e9ba499c95edaa610e..39a1f9c2f5979008d8d7cd48e112d3bac9970845 100644
--- a/robots-at-robots/research/robots_at_robots/armor_digit/clean_datasets.py
+++ b/polystar_cv/research/robots/armor_digit/scripts/clean_datasets.py
@@ -1,6 +1,6 @@
 from research.common.dataset.cleaning.dataset_cleaner_app import DatasetCleanerApp
 from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.armor_digit.armor_digit_dataset import make_armor_digit_dataset_generator
+from research.robots.armor_digit.armor_digit_dataset import make_armor_digit_dataset_generator
 
 if __name__ == "__main__":
     # _roco_dataset = ROCODatasetsZoo.TWITCH.T470149568
@@ -16,22 +16,7 @@ if __name__ == "__main__":
     _roco_dataset = ROCODatasetsZoo.DJI.FINAL
 
     _armor_digit_dataset = (
-        make_armor_digit_dataset_generator()
-        .from_roco_dataset(_roco_dataset)
-        .skip(
-            (1009 - 117)
-            + (1000 - 86)
-            + (1000 - 121)
-            + (1000 - 138)
-            + (1000 - 137)
-            + (1000 - 154)
-            + (1000 - 180)
-            + (1000 - 160)
-            + (1000 - 193)
-            + (1000 - 80)
-            + (1000 - 154)
-        )
-        .cap(1000)
+        make_armor_digit_dataset_generator().from_roco_dataset(_roco_dataset).skip(2133 + 1764 + 1436).cap(1000)
     )
 
     DatasetCleanerApp(_armor_digit_dataset, invalidate_key="u", validate_key="h").run()
diff --git a/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_cnn.py b/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_cnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..26ce661280132d49c5b1977e1f1f030a461ea811
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_cnn.py
@@ -0,0 +1,32 @@
+import logging
+import warnings
+from pathlib import Path
+
+from optuna import Trial
+
+from research.common.utils.experiment_dir import make_experiment_dir
+from research.robots.armor_digit.digit_benchmarker import make_default_digit_benchmarker
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline, ArmorDigitPipeline
+from research.robots.evaluation.hyper_tuner import HyperTuner
+
+
+def cnn_pipeline_factory(report_dir: Path, trial: Trial) -> ArmorDigitPipeline:
+    return ArmorDigitKerasPipeline.from_custom_cnn(
+        input_size=32,
+        conv_blocks=((32, 32), (64, 64)),
+        logs_dir=str(report_dir),
+        dropout=trial.suggest_uniform("dropout", 0, 0.99),
+        lr=trial.suggest_loguniform("lr", 1e-5, 1e-1),
+        dense_size=2 ** round(trial.suggest_discrete_uniform("dense_size_log2", 3, 10, 1)),
+    )
+
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    logging.info("Hyperparameter tuning for CNN pipeline on digit task")
+    HyperTuner(make_default_digit_benchmarker(make_experiment_dir("armor-digit", "cnn_tuning"))).tune(
+        cnn_pipeline_factory, n_trials=50
+    )
diff --git a/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_distiled_vgg16_into_cnn.py b/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_distiled_vgg16_into_cnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b035e72dad631fa6437c8e269c0c1cfe6195136
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/scripts/hyper_tune_distiled_vgg16_into_cnn.py
@@ -0,0 +1,39 @@
+import logging
+import warnings
+from pathlib import Path
+
+from optuna import Trial
+
+from polystar.common.constants import PROJECT_DIR
+from polystar.common.utils.serialization import pkl_load
+from research.common.utils.experiment_dir import make_experiment_dir
+from research.robots.armor_digit.digit_benchmarker import make_default_digit_benchmarker
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline, ArmorDigitPipeline
+from research.robots.evaluation.hyper_tuner import HyperTuner
+
+
+class DistilledPipelineFactory:
+    def __init__(self, teacher_name: str):
+        self.teacher: ArmorDigitKerasPipeline = pkl_load(PROJECT_DIR / "pipelines/armor-digit" / teacher_name)
+
+    def __call__(self, report_dir: Path, trial: Trial) -> ArmorDigitPipeline:
+        return ArmorDigitKerasPipeline.from_distillation(
+            teacher_pipeline=self.teacher,
+            conv_blocks=((32, 32), (64, 64)),
+            logs_dir=str(report_dir),
+            dropout=trial.suggest_uniform("dropout", 0, 0.99),
+            lr=trial.suggest_loguniform("lr", 5e-4, 1e-3),
+            dense_size=1024,  # 2 ** round(trial.suggest_discrete_uniform("dense_size_log2", 3, 10, 1)),
+            temperature=trial.suggest_loguniform("temperature", 1, 100),
+        )
+
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+
+    logging.info("Hyperparameter tuning for VGG16 distilation into CNN pipeline on digit task")
+    HyperTuner(make_default_digit_benchmarker(make_experiment_dir("armor-digit", "distillation_tuning"))).tune(
+        DistilledPipelineFactory("20201225_131957_vgg16/VGG16 (32) - lr 2.1e-04 - drop 0.pkl"), n_trials=50
+    )
diff --git a/polystar_cv/research/robots/armor_digit/scripts/train_vgg16.py b/polystar_cv/research/robots/armor_digit/scripts/train_vgg16.py
new file mode 100644
index 0000000000000000000000000000000000000000..650aabcf1f444a83ec2f79483c14bd1d7c8d0697
--- /dev/null
+++ b/polystar_cv/research/robots/armor_digit/scripts/train_vgg16.py
@@ -0,0 +1,37 @@
+import logging
+import warnings
+
+from tensorflow.python.keras.applications.vgg16 import VGG16
+
+from polystar.common.constants import PROJECT_DIR
+from polystar.common.utils.serialization import pkl_dump
+from polystar.common.utils.time import create_time_id
+from research.robots.armor_digit.digit_benchmarker import make_default_digit_benchmarker
+from research.robots.armor_digit.pipeline import ArmorDigitKerasPipeline
+
+PIPELINES_DIR = PROJECT_DIR / "pipelines"
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel("INFO")
+    logging.getLogger("tensorflow").setLevel("ERROR")
+    warnings.filterwarnings("ignore")
+    logging.info("Training vgg16")
+
+    _training_dir = PIPELINES_DIR / "armor-digit" / f"{create_time_id()}_vgg16_full_dset"
+
+    _vgg16_pipeline = ArmorDigitKerasPipeline.from_transfer_learning(
+        input_size=32,
+        logs_dir=str(_training_dir),
+        dropout=0,
+        lr=0.00021,
+        dense_size=64,
+        model_factory=VGG16,
+        verbose=1,
+    )
+
+    logging.info(f"Run `tensorboard --logdir={_training_dir}` for realtime logs")
+
+    _benchmarker = make_default_digit_benchmarker(_training_dir)
+    _benchmarker.benchmark([_vgg16_pipeline])
+
+    pkl_dump(_vgg16_pipeline, _training_dir / _vgg16_pipeline.name)
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/metrics/__init__.py b/polystar_cv/research/robots/dataset/__init__.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/evaluation/metrics/__init__.py
rename to polystar_cv/research/robots/dataset/__init__.py
diff --git a/robots-at-robots/research/robots_at_robots/dataset/armor_dataset_factory.py b/polystar_cv/research/robots/dataset/armor_dataset_factory.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/dataset/armor_dataset_factory.py
rename to polystar_cv/research/robots/dataset/armor_dataset_factory.py
diff --git a/robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_cache.py b/polystar_cv/research/robots/dataset/armor_value_dataset_cache.py
similarity index 73%
rename from robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_cache.py
rename to polystar_cv/research/robots/dataset/armor_value_dataset_cache.py
index 68f1b15057edf05447b2e6ffc0ced3394ffb457f..b826aa26800c1486dab607f7aeb9bf299444238c 100644
--- a/robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_cache.py
+++ b/polystar_cv/research/robots/dataset/armor_value_dataset_cache.py
@@ -3,6 +3,8 @@ from pathlib import Path
 from shutil import rmtree
 from typing import ClassVar, Generic, Optional
 
+from google.cloud.exceptions import Forbidden
+
 from polystar.common.models.image import Image, save_image
 from polystar.common.utils.misc import identity
 from polystar.common.utils.time import create_time_id
@@ -10,8 +12,9 @@ from polystar.common.utils.tqdm import smart_tqdm
 from research.common.datasets.lazy_dataset import LazyDataset, TargetT
 from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
 from research.common.datasets.transform_dataset import TransformDataset
-from research.robots_at_robots.dataset.armor_dataset_factory import ArmorDataset
-from research.robots_at_robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
+from research.common.gcloud.gcloud_storage import GCStorages
+from research.robots.dataset.armor_dataset_factory import ArmorDataset
+from research.robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
 
 
 class ArmorValueDatasetCache(Generic[TargetT]):
@@ -30,11 +33,23 @@ class ArmorValueDatasetCache(Generic[TargetT]):
         self.roco_dataset_builder = roco_dataset_builder
         self.lock_file = cache_dir / ".lock"
 
-    def generate_if_needed(self):
+        self.cache_dir.mkdir(parents=True, exist_ok=True)
+
+    def generate_or_download_if_needed(self):
         cause = self._get_generation_cause()
         if cause is None:
             return
         self._clean_cache_dir()
+        try:
+            GCStorages.DEV.download_directory(self.cache_dir)
+            cause = self._get_generation_cause()
+            if cause is None:
+                return
+            self._clean_cache_dir()
+        except FileNotFoundError:
+            cause += " and not on gcloud"
+        except Forbidden:
+            pass
         self.save(self._generate(), cause)
 
     def _clean_cache_dir(self):
@@ -44,7 +59,7 @@ class ArmorValueDatasetCache(Generic[TargetT]):
     def save(self, dataset: LazyDataset[Image, TargetT], cause: str):
         desc = f"Generating dataset {self.dataset_name} (cause: {cause})"
         for img, target, name in smart_tqdm(dataset, desc=desc, unit="img"):
-            save_image(img, self.cache_dir / f"{name}-{target}.jpg")
+            save_image(img, self.cache_dir / f"{name}-{str(target)}.jpg")
         self.lock_file.write_text(json.dumps({"version": self.VERSION, "date": create_time_id()}))
 
     def _generate(self) -> LazyDataset[Image, TargetT]:
diff --git a/robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_generator.py b/polystar_cv/research/robots/dataset/armor_value_dataset_generator.py
similarity index 70%
rename from robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_generator.py
rename to polystar_cv/research/robots/dataset/armor_value_dataset_generator.py
index 4aafd34e781d3ff32eac9a774a7da97e8b3fb448..a296d9c814af80914d5ba8e8ddc787221559fb41 100644
--- a/robots-at-robots/research/robots_at_robots/dataset/armor_value_dataset_generator.py
+++ b/polystar_cv/research/robots/dataset/armor_value_dataset_generator.py
@@ -1,5 +1,5 @@
 from pathlib import Path
-from typing import Generic, List
+from typing import Generic, Iterable, List
 
 from polystar.common.filters.exclude_filter import ExcludeFilter
 from polystar.common.filters.filter_abc import FilterABC
@@ -9,8 +9,8 @@ from research.common.datasets.image_dataset import FileImageDataset
 from research.common.datasets.image_file_dataset_builder import DirectoryDatasetBuilder
 from research.common.datasets.lazy_dataset import TargetT
 from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
-from research.robots_at_robots.dataset.armor_value_dataset_cache import ArmorValueDatasetCache
-from research.robots_at_robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
+from research.robots.dataset.armor_value_dataset_cache import ArmorValueDatasetCache
+from research.robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
 
 
 class ExcludeFilesFilter(ExcludeFilter[Path]):
@@ -30,14 +30,21 @@ class ArmorValueDatasetGenerator(Generic[TargetT]):
         self.targets_filter = targets_filter or PassThroughFilter()
 
     # FIXME signature inconsistency across methods
-    def from_roco_datasets(self, roco_datasets: List[ROCODatasetBuilder]) -> List[FileImageDataset[TargetT]]:
-        return [self.from_roco_dataset(roco_dataset).to_file_images().build() for roco_dataset in roco_datasets]
+    def from_roco_datasets(
+        self, *roco_datasets_list: List[ROCODatasetBuilder]
+    ) -> Iterable[List[FileImageDataset[TargetT]]]:
+        return (
+            [self.from_roco_dataset(roco_dataset).to_file_images().build() for roco_dataset in roco_datasets]
+            for roco_datasets in roco_datasets_list
+        )
 
     def from_roco_dataset(self, roco_dataset_builder: ROCODatasetBuilder) -> DirectoryDatasetBuilder[TargetT]:
         cache_dir = roco_dataset_builder.main_dir / self.task_name
         dataset_name = roco_dataset_builder.name
 
-        ArmorValueDatasetCache(roco_dataset_builder, cache_dir, dataset_name, self.target_factory).generate_if_needed()
+        ArmorValueDatasetCache(
+            roco_dataset_builder, cache_dir, dataset_name, self.target_factory
+        ).generate_or_download_if_needed()
 
         return (
             DirectoryDatasetBuilder(cache_dir, self.target_factory.from_file, dataset_name)
diff --git a/robots-at-robots/research/robots_at_robots/dataset/armor_value_target_factory.py b/polystar_cv/research/robots/dataset/armor_value_target_factory.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/dataset/armor_value_target_factory.py
rename to polystar_cv/research/robots/dataset/armor_value_target_factory.py
diff --git a/robots-at-runes/polystar/robots_at_runes/__init__.py b/polystar_cv/research/robots/demos/__init__.py
similarity index 100%
rename from robots-at-runes/polystar/robots_at_runes/__init__.py
rename to polystar_cv/research/robots/demos/__init__.py
diff --git a/robots-at-robots/research/robots_at_robots/demos/demo_infer.py b/polystar_cv/research/robots/demos/demo_infer.py
similarity index 89%
rename from robots-at-robots/research/robots_at_robots/demos/demo_infer.py
rename to polystar_cv/research/robots/demos/demo_infer.py
index 40c567ff5b4dd61cd8b18325ed1b882b9ad4994c..51c1cc2258d9ec00b2e680ff2dd01dbb618f2ae0 100644
--- a/robots-at-robots/research/robots_at_robots/demos/demo_infer.py
+++ b/polystar_cv/research/robots/demos/demo_infer.py
@@ -1,12 +1,12 @@
+from polystar.common.dependency_injection import make_injector
 from polystar.common.models.label_map import LabelMap
 from polystar.common.target_pipeline.detected_objects.detected_objects_factory import DetectedObjectFactory
 from polystar.common.target_pipeline.objects_detectors.tf_model_objects_detector import TFModelObjectsDetector
 from polystar.common.target_pipeline.objects_validators.confidence_object_validator import ConfidenceObjectValidator
 from polystar.common.utils.tensorflow import patch_tf_v2
 from polystar.common.view.plt_results_viewer import PltResultViewer
-from polystar.robots_at_robots.dependency_injection import make_injector
 from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.demos.utils import load_tf_model
+from research.robots.demos.utils import load_tf_model
 
 if __name__ == "__main__":
     patch_tf_v2()
diff --git a/robots-at-robots/research/robots_at_robots/demos/demo_pipeline.py b/polystar_cv/research/robots/demos/demo_pipeline.py
similarity index 90%
rename from robots-at-robots/research/robots_at_robots/demos/demo_pipeline.py
rename to polystar_cv/research/robots/demos/demo_pipeline.py
index c3a4d34ac4b40d71ae7b4214450be2a5137a0be7..ff5475b56724deabb5acb4d468ae92f763336e97 100644
--- a/robots-at-robots/research/robots_at_robots/demos/demo_pipeline.py
+++ b/polystar_cv/research/robots/demos/demo_pipeline.py
@@ -1,6 +1,7 @@
 import cv2
 
 from polystar.common.communication.print_target_sender import PrintTargetSender
+from polystar.common.dependency_injection import make_injector
 from polystar.common.models.camera import Camera
 from polystar.common.models.label_map import LabelMap
 from polystar.common.target_pipeline.armors_descriptors.armors_color_descriptor import ArmorsColorDescriptor
@@ -14,14 +15,10 @@ from polystar.common.target_pipeline.target_factories.ratio_simple_target_factor
 from polystar.common.target_pipeline.target_pipeline import NoTargetFoundException
 from polystar.common.utils.tensorflow import patch_tf_v2
 from polystar.common.view.plt_results_viewer import PltResultViewer
-from polystar.robots_at_robots.dependency_injection import make_injector
 from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.armor_color.benchmark import (
-    ArmorColorPipeline,
-    MeanChannels,
-    RedBlueComparisonClassifier,
-)
-from research.robots_at_robots.demos.utils import load_tf_model
+from research.robots.armor_color.pipeline import ArmorColorPipeline
+from research.robots.armor_color.scripts.benchmark import MeanChannels, RedBlueComparisonClassifier
+from research.robots.demos.utils import load_tf_model
 
 if __name__ == "__main__":
     patch_tf_v2()
diff --git a/robots-at-robots/research/robots_at_robots/demos/demo_pipeline_camera.py b/polystar_cv/research/robots/demos/demo_pipeline_camera.py
similarity index 97%
rename from robots-at-robots/research/robots_at_robots/demos/demo_pipeline_camera.py
rename to polystar_cv/research/robots/demos/demo_pipeline_camera.py
index 0db0dd13ce38d6a41ed71190fd788a0356679aba..660e09e9620c79f37a6733b34c942da9b916531f 100644
--- a/robots-at-robots/research/robots_at_robots/demos/demo_pipeline_camera.py
+++ b/polystar_cv/research/robots/demos/demo_pipeline_camera.py
@@ -5,6 +5,7 @@ import pycuda.autoinit  # This is needed for initializing CUDA driver
 
 from polystar.common.communication.file_descriptor_target_sender import FileDescriptorTargetSender
 from polystar.common.constants import MODELS_DIR
+from polystar.common.dependency_injection import make_injector
 from polystar.common.frame_generators.camera_frame_generator import CameraFrameGenerator
 from polystar.common.models.camera import Camera
 from polystar.common.models.label_map import LabelMap
@@ -18,7 +19,6 @@ from polystar.common.target_pipeline.objects_validators.type_object_validator im
 from polystar.common.target_pipeline.target_factories.ratio_simple_target_factory import RatioSimpleTargetFactory
 from polystar.common.utils.tensorflow import patch_tf_v2
 from polystar.common.view.cv2_results_viewer import CV2ResultViewer
-from polystar.robots_at_robots.dependency_injection import make_injector
 from polystar.robots_at_robots.globals import settings
 
 [pycuda.autoinit]  # So pycharm won't remove the import
diff --git a/robots-at-robots/research/robots_at_robots/demos/utils.py b/polystar_cv/research/robots/demos/utils.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/demos/utils.py
rename to polystar_cv/research/robots/demos/utils.py
diff --git a/robots-at-runes/research/robots_at_runes/__init__.py b/polystar_cv/research/robots/evaluation/__init__.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/__init__.py
rename to polystar_cv/research/robots/evaluation/__init__.py
diff --git a/polystar_cv/research/robots/evaluation/benchmarker.py b/polystar_cv/research/robots/evaluation/benchmarker.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c3123faaefd550caf4097e28e8dbf543a7a9394
--- /dev/null
+++ b/polystar_cv/research/robots/evaluation/benchmarker.py
@@ -0,0 +1,50 @@
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+from typing import List
+
+from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
+from research.common.datasets.image_dataset import FileImageDataset
+from research.robots.evaluation.evaluator import ImageClassificationPipelineEvaluator
+from research.robots.evaluation.metrics.f1 import F1Metric
+from research.robots.evaluation.performance import ClassificationPerformances
+from research.robots.evaluation.reporter import ImagePipelineEvaluationReporter
+from research.robots.evaluation.trainer import ImageClassificationPipelineTrainer
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class Benchmarker:
+    def __init__(
+        self,
+        train_datasets: List[FileImageDataset],
+        validation_datasets: List[FileImageDataset],
+        test_datasets: List[FileImageDataset],
+        classes: List,
+        report_dir: Path,
+    ):
+        report_dir.mkdir(exist_ok=True, parents=True)
+        self.trainer = ImageClassificationPipelineTrainer(train_datasets, validation_datasets)
+        self.evaluator = ImageClassificationPipelineEvaluator(train_datasets, validation_datasets, test_datasets)
+        self.reporter = ImagePipelineEvaluationReporter(
+            report_dir=report_dir, classes=classes, other_metrics=[F1Metric()]
+        )
+        self.performances = ClassificationPerformances()
+        logger.info(f"Run `tensorboard --logdir={report_dir}` for realtime logs when using keras")
+
+    def train_and_evaluate(self, pipeline: ClassificationPipeline) -> ClassificationPerformances:
+        self.trainer.train_pipeline(pipeline)
+        pipeline_performances = self.evaluator.evaluate_pipeline(pipeline)
+        self.performances += pipeline_performances
+        return pipeline_performances
+
+    def benchmark(
+        self, pipelines: List[ClassificationPipeline], trained_pipelines: List[ClassificationPipeline] = None
+    ):
+        self.trainer.train_pipelines(pipelines)
+        self.performances += self.evaluator.evaluate_pipelines(pipelines + (trained_pipelines or []))
+        self.make_report()
+
+    def make_report(self):
+        self.reporter.report(self.performances)
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluator.py b/polystar_cv/research/robots/evaluation/evaluator.py
similarity index 83%
rename from robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluator.py
rename to polystar_cv/research/robots/evaluation/evaluator.py
index 9f11ae38cb1071e8868ee9c13d38f5a3ec795782..d03c1c9ec6b157c5c0c71b896d2367e6e15f4420 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluator.py
+++ b/polystar_cv/research/robots/evaluation/evaluator.py
@@ -9,12 +9,12 @@ from polystar.common.pipeline.classification.classification_pipeline import Clas
 from polystar.common.utils.iterable_utils import flatten
 from research.common.datasets.image_dataset import FileImageDataset
 from research.common.datasets.lazy_dataset import TargetT
-from research.robots_at_robots.evaluation.performance import (
+from research.robots.evaluation.performance import (
     ClassificationPerformance,
     ClassificationPerformances,
     ContextualizedClassificationPerformance,
 )
-from research.robots_at_robots.evaluation.set import Set
+from research.robots.evaluation.set import Set
 
 
 class ImageClassificationPipelineEvaluator(Generic[TargetT]):
@@ -27,11 +27,13 @@ class ImageClassificationPipelineEvaluator(Generic[TargetT]):
         self.set2datasets = {Set.TRAIN: train_datasets, Set.VALIDATION: validation_datasets, Set.TEST: test_datasets}
 
     def evaluate_pipelines(self, pipelines: Iterable[ClassificationPipeline]) -> ClassificationPerformances:
-        return ClassificationPerformances(flatten(self._evaluate_pipeline(pipeline) for pipeline in pipelines))
+        rv = ClassificationPerformances()
+        for pipeline in pipelines:
+            rv += self.evaluate_pipeline(pipeline)
+        return rv
 
-    def _evaluate_pipeline(self, pipeline: ClassificationPipeline) -> Iterable[ContextualizedClassificationPerformance]:
-        for set_ in Set:
-            yield from self._evaluate_pipeline_on_set(pipeline, set_)
+    def evaluate_pipeline(self, pipeline: ClassificationPipeline) -> ClassificationPerformances:
+        return ClassificationPerformances(flatten(self._evaluate_pipeline_on_set(pipeline, set_) for set_ in Set))
 
     def _evaluate_pipeline_on_set(
         self, pipeline: ClassificationPipeline, set_: Set
diff --git a/polystar_cv/research/robots/evaluation/hyper_tuner.py b/polystar_cv/research/robots/evaluation/hyper_tuner.py
new file mode 100644
index 0000000000000000000000000000000000000000..4953894b83765eba5674c063f773bcc9af29482b
--- /dev/null
+++ b/polystar_cv/research/robots/evaluation/hyper_tuner.py
@@ -0,0 +1,34 @@
+from pathlib import Path
+from typing import Callable, Optional
+
+from optuna import Trial, create_study
+
+from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
+from polystar.common.utils.serialization import pkl_dump
+from research.robots.evaluation.benchmarker import Benchmarker
+from research.robots.evaluation.metrics.accuracy import AccuracyMetric
+from research.robots.evaluation.metrics.metric_abc import MetricABC
+
+PipelineFactory = Callable[[Path, Trial], ClassificationPipeline]
+
+
+class HyperTuner:
+    def __init__(self, benchmarker: Benchmarker, metric: MetricABC = AccuracyMetric(), report_frequency: int = 5):
+        self.report_frequency = report_frequency
+        self.metric = metric
+        self.benchmarker = benchmarker
+        self._pipeline_factory: Optional[PipelineFactory] = None
+
+    def tune(self, pipeline_factory: PipelineFactory, n_trials: int, minimize: bool = False):
+        self._pipeline_factory = pipeline_factory
+        study = create_study(direction="minimize" if minimize else "maximize")
+        study.optimize(self._objective, n_trials=n_trials, show_progress_bar=True)
+        self.benchmarker.make_report()
+        pkl_dump(study, self.benchmarker.reporter.report_dir / "study")
+
+    def _objective(self, trial: Trial) -> float:
+        pipeline = self._pipeline_factory(self.benchmarker.reporter.report_dir, trial)
+        performances = self.benchmarker.train_and_evaluate(pipeline)
+        if not trial.number % self.report_frequency:
+            self.benchmarker.make_report()
+        return self.metric(performances.validation.collapse())
diff --git a/robots-at-runes/research/robots_at_runes/dataset/__init__.py b/polystar_cv/research/robots/evaluation/metrics/__init__.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/__init__.py
rename to polystar_cv/research/robots/evaluation/metrics/__init__.py
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/metrics/accuracy.py b/polystar_cv/research/robots/evaluation/metrics/accuracy.py
similarity index 59%
rename from robots-at-robots/research/robots_at_robots/evaluation/metrics/accuracy.py
rename to polystar_cv/research/robots/evaluation/metrics/accuracy.py
index ccfe9c73bdda26c7c0624fa3220d30e335a76506..60716f26566175650c28638697ba873a75831350 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/metrics/accuracy.py
+++ b/polystar_cv/research/robots/evaluation/metrics/accuracy.py
@@ -1,5 +1,5 @@
-from research.robots_at_robots.evaluation.metrics.metric_abc import MetricABC
-from research.robots_at_robots.evaluation.performance import ClassificationPerformance
+from research.robots.evaluation.metrics.metric_abc import MetricABC
+from research.robots.evaluation.performance import ClassificationPerformance
 
 
 class AccuracyMetric(MetricABC):
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/metrics/f1.py b/polystar_cv/research/robots/evaluation/metrics/f1.py
similarity index 79%
rename from robots-at-robots/research/robots_at_robots/evaluation/metrics/f1.py
rename to polystar_cv/research/robots/evaluation/metrics/f1.py
index dd5f48ae0202e7b917f58e7d6f94b1713de2caff..0730c42350a9af016845de0d11351904575845a9 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/metrics/f1.py
+++ b/polystar_cv/research/robots/evaluation/metrics/f1.py
@@ -2,8 +2,8 @@ from enum import Enum, auto
 
 from sklearn.metrics import f1_score
 
-from research.robots_at_robots.evaluation.metrics.metric_abc import MetricABC
-from research.robots_at_robots.evaluation.performance import ClassificationPerformance
+from research.robots.evaluation.metrics.metric_abc import MetricABC
+from research.robots.evaluation.performance import ClassificationPerformance
 
 
 class F1Strategy(Enum):
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/metrics/metric_abc.py b/polystar_cv/research/robots/evaluation/metrics/metric_abc.py
similarity index 77%
rename from robots-at-robots/research/robots_at_robots/evaluation/metrics/metric_abc.py
rename to polystar_cv/research/robots/evaluation/metrics/metric_abc.py
index f25a0c3f122a311d3495e74a5d02a3d9eff224e2..f9395858e6986d6e4c5c3e4bd85c9eeee3ebda62 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/metrics/metric_abc.py
+++ b/polystar_cv/research/robots/evaluation/metrics/metric_abc.py
@@ -1,6 +1,6 @@
 from abc import ABC, abstractmethod
 
-from research.robots_at_robots.evaluation.performance import ClassificationPerformance
+from research.robots.evaluation.performance import ClassificationPerformance
 
 
 class MetricABC(ABC):
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/performance.py b/polystar_cv/research/robots/evaluation/performance.py
similarity index 85%
rename from robots-at-robots/research/robots_at_robots/evaluation/performance.py
rename to polystar_cv/research/robots/evaluation/performance.py
index 52c014c9b6348a7ddf6a556843806c2f09b908cb..1521fbd5a52e9b463c93c35ba92a2a98be80564c 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/performance.py
+++ b/polystar_cv/research/robots/evaluation/performance.py
@@ -1,4 +1,4 @@
-from dataclasses import dataclass
+from dataclasses import dataclass, field
 from typing import Dict, Iterable, List, Sequence
 
 import numpy as np
@@ -7,7 +7,7 @@ from memoized_property import memoized_property
 from polystar.common.filters.filter_abc import FilterABC
 from polystar.common.models.image import FileImage
 from polystar.common.utils.iterable_utils import flatten, group_by
-from research.robots_at_robots.evaluation.set import Set
+from research.robots.evaluation.set import Set
 
 
 @dataclass
@@ -39,7 +39,7 @@ class ContextualizedClassificationPerformance(ClassificationPerformance):
 
 @dataclass
 class ClassificationPerformances(Iterable[ContextualizedClassificationPerformance]):
-    performances: List[ContextualizedClassificationPerformance]
+    performances: List[ContextualizedClassificationPerformance] = field(default_factory=list)
 
     @property
     def train(self) -> "ClassificationPerformances":
@@ -62,7 +62,7 @@ class ClassificationPerformances(Iterable[ContextualizedClassificationPerformanc
             for name, performances in group_by(self, lambda p: p.pipeline_name).items()
         }
 
-    def merge(self) -> ClassificationPerformance:
+    def collapse(self) -> ClassificationPerformance:
         return ClassificationPerformance(
             examples=flatten(p.examples for p in self),
             labels=np.concatenate([p.labels for p in self]),
@@ -74,6 +74,13 @@ class ClassificationPerformances(Iterable[ContextualizedClassificationPerformanc
     def __iter__(self):
         return iter(self.performances)
 
+    def __len__(self):
+        return len(self.performances)
+
+    def __iadd__(self, other: "ClassificationPerformances"):
+        self.performances.extend(other.performances)
+        return self
+
 
 @dataclass
 class SetClassificationPerformanceFilter(FilterABC[ContextualizedClassificationPerformance]):
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluation_reporter.py b/polystar_cv/research/robots/evaluation/reporter.py
similarity index 66%
rename from robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluation_reporter.py
rename to polystar_cv/research/robots/evaluation/reporter.py
index 497ed622e714a3986d5f3418bf22f78b726f848e..e7f6d4703331cffbe8a266e46e002eee5c6df30d 100644
--- a/robots-at-robots/research/robots_at_robots/evaluation/image_pipeline_evaluation_reporter.py
+++ b/polystar_cv/research/robots/evaluation/reporter.py
@@ -2,6 +2,7 @@ from collections import Counter
 from dataclasses import InitVar, dataclass, field
 from math import log
 from os.path import relpath
+from pathlib import Path
 from typing import Generic, List, Optional, Tuple
 
 import matplotlib.pyplot as plt
@@ -10,46 +11,49 @@ import seaborn as sns
 from matplotlib.axes import Axes, logging
 from matplotlib.figure import Figure
 from pandas import DataFrame
-from sklearn.metrics import classification_report, confusion_matrix
+from sklearn.metrics import ConfusionMatrixDisplay, classification_report, confusion_matrix
 
 from polystar.common.pipeline.classification.classification_pipeline import EnumT
 from polystar.common.utils.dataframe import Format, format_df_row, format_df_rows, make_formater
-from polystar.common.utils.markdown import MarkdownFile, markdown_to_pdf
-from polystar.common.utils.time import create_time_id
-from research.common.constants import DSET_DIR, EVALUATION_DIR
-from research.robots_at_robots.evaluation.metrics.accuracy import AccuracyMetric
-from research.robots_at_robots.evaluation.metrics.metric_abc import MetricABC
-from research.robots_at_robots.evaluation.performance import ClassificationPerformance, ClassificationPerformances
-from research.robots_at_robots.evaluation.set import Set
+from polystar.common.utils.markdown import MarkdownFile
+from research.common.constants import DSET_DIR
+from research.robots.evaluation.metrics.accuracy import AccuracyMetric
+from research.robots.evaluation.metrics.metric_abc import MetricABC
+from research.robots.evaluation.performance import ClassificationPerformance, ClassificationPerformances
+from research.robots.evaluation.set import Set
+
+logger = logging.getLogger(__name__)
 
 
 @dataclass
 class ImagePipelineEvaluationReporter(Generic[EnumT]):
-    evaluation_project: str
-    experiment_name: str
+    report_dir: Path
     classes: List[EnumT]
     main_metric: MetricABC = field(default_factory=AccuracyMetric)
     other_metrics: InitVar[List[MetricABC]] = None
     _mf: MarkdownFile = field(init=False)
     _performances: ClassificationPerformances = field(init=False)
+    _has_validation: bool = field(init=False)
+    _sorted_pipeline_names: List[str] = field(init=False)
 
     def __post_init__(self, other_metrics: List[MetricABC]):
-        self.report_dir = EVALUATION_DIR / self.evaluation_project / f"{create_time_id()}_{self.experiment_name}"
         self.all_metrics: List[MetricABC] = [self.main_metric] + (other_metrics or [])
 
     def report(self, performances: ClassificationPerformances):
         sns.set()
+
         self._performances = performances
-        report_path = self.report_dir / "report.md"
-        with MarkdownFile(report_path) as self._mf:
+        self._has_validation = bool(self._performances.validation)
+        self._sorted_pipeline_names = self._make_sorted_pipeline_names()
+
+        with MarkdownFile(self.report_dir / "report.md") as self._mf:
 
             self._mf.title(f"Evaluation report")
             self._report_datasets()
             self._report_aggregated_results()
             self._report_pipelines_results()
 
-            logging.info(f"Report generated at file:///{self.report_dir/'report.md'}")
-        markdown_to_pdf(report_path)
+            logger.info(f"Report generated at file:///{self.report_dir/'report.md'}")
 
     def _report_datasets(self):
         self._mf.title("Datasets", level=2)
@@ -57,8 +61,9 @@ class ImagePipelineEvaluationReporter(Generic[EnumT]):
         self._mf.title("Train-val", level=3)
         self._mf.paragraph("Train")
         self._report_dataset(self._performances.train)
-        self._mf.paragraph("Val")
-        self._report_dataset(self._performances.validation)
+        if self._has_validation:
+            self._mf.paragraph("Val")
+            self._report_dataset(self._performances.validation)
 
         self._mf.title("Testing", level=3)
         self._report_dataset(self._performances.test)
@@ -91,40 +96,48 @@ class ImagePipelineEvaluationReporter(Generic[EnumT]):
 
         self._mf.paragraph("On test set:")
         self._mf.table(self._make_aggregated_results_for_set(Set.TEST))
-        self._mf.paragraph("On validation set:")
-        self._mf.table(self._make_aggregated_results_for_set(Set.VALIDATION))
+        if self._has_validation:
+            self._mf.paragraph("On validation set:")
+            self._mf.table(self._make_aggregated_results_for_set(Set.VALIDATION))
         self._mf.paragraph("On train set:")
         self._mf.table(self._make_aggregated_results_for_set(Set.TRAIN))
 
+    def _make_sorted_pipeline_names(self) -> List[str]:
+        pipeline_name2score = {
+            pipeline_name: self.main_metric(
+                (performances.validation if self._has_validation else performances.test).collapse()
+            )
+            for pipeline_name, performances in self._performances.group_by_pipeline().items()
+        }
+        return sorted(pipeline_name2score, key=pipeline_name2score.get, reverse=True)
+
     def _report_pipelines_results(self):
-        for pipeline_name, performances in sorted(
-            self._performances.group_by_pipeline().items(),
-            key=lambda name_perfs: self.main_metric(name_perfs[1].test.merge()),
-            reverse=True,
-        ):
-            self._report_pipeline_results(pipeline_name, performances)
+        pipeline_name2perfs = self._performances.group_by_pipeline()
+        for pipeline_name in self._sorted_pipeline_names:
+            self._report_pipeline_results(pipeline_name, pipeline_name2perfs[pipeline_name])
 
     def _report_pipeline_results(self, pipeline_name: str, performances: ClassificationPerformances):
         self._mf.title(pipeline_name, level=2)
 
         self._mf.title("Test results", level=3)
-        self._report_pipeline_set_results(performances, Set.TEST)
+        self._report_pipeline_set_results(pipeline_name, performances, Set.TEST)
 
-        self._mf.title("Validation results", level=3)
-        self._report_pipeline_set_results(performances, Set.VALIDATION)
+        if self._has_validation:
+            self._mf.title("Validation results", level=3)
+            self._report_pipeline_set_results(pipeline_name, performances, Set.VALIDATION)
 
         self._mf.title("Train results", level=3)
-        self._report_pipeline_set_results(performances, Set.TRAIN)
+        self._report_pipeline_set_results(pipeline_name, performances, Set.TRAIN)
 
-    def _report_pipeline_set_results(self, performances: ClassificationPerformances, set_: Set):
+    def _report_pipeline_set_results(self, pipeline_name: str, performances: ClassificationPerformances, set_: Set):
         performances = performances.on_set(set_)
-        perf = performances.merge()
+        perf = performances.collapse()
 
         self._mf.title("Metrics", level=4)
         self._report_pipeline_set_metrics(performances, perf, set_)
 
         self._mf.title("Confusion Matrix:", level=4)
-        self._report_pipeline_set_confusion_matrix(perf)
+        self._report_pipeline_set_confusion_matrix(pipeline_name, perf, set_)
 
         self._mf.title("25 Mistakes examples", level=4)
         self._report_pipeline_set_mistakes(perf)
@@ -164,12 +177,15 @@ class ImagePipelineEvaluationReporter(Generic[EnumT]):
         format_df_row(df, "support", int)
         self._mf.table(df)
 
-    def _report_pipeline_set_confusion_matrix(self, perf: ClassificationPerformance):
-        self._mf.table(
-            DataFrame(
-                confusion_matrix(perf.labels, perf.predictions), index=perf.unique_labels, columns=perf.unique_labels
-            )
+    def _report_pipeline_set_confusion_matrix(self, pipeline_name: str, perf: ClassificationPerformance, set_: Set):
+        sns.reset_defaults()
+        cm = ConfusionMatrixDisplay(
+            confusion_matrix(perf.labels, perf.predictions, labels=perf.unique_labels),
+            display_labels=perf.unique_labels,
         )
+        cm.plot(cmap=plt.cm.Blues, values_format=".4g")
+        self._mf.figure(cm.figure_, f"{pipeline_name}_{set_}_cm.png")
+        sns.set()
 
     def _report_pipeline_set_mistakes(self, perf: ClassificationPerformance):
         mistakes = perf.mistakes
@@ -211,70 +227,70 @@ class ImagePipelineEvaluationReporter(Generic[EnumT]):
                 }
                 for perf in self._performances
             ]
-        ).sort_values(["set", self.main_metric.name], ascending=[True, False])
+        )
 
         df[f"{self.main_metric.name} "] = list(zip(df[self.main_metric.name], df.support))
         df["time "] = list(zip(df.time, df.support))
 
         return (
-            _cat_pipeline_results(df, f"{self.main_metric.name} ", "{:.1%}", limits=(0, 1)),
-            _cat_pipeline_results(df, "time ", "{:.2e}", log_scale=True),
+            self._cat_pipeline_results(df, f"{self.main_metric.name} ", "{:.1%}", limits=(0, 1)),
+            self._cat_pipeline_results(df, "time ", "{:.2e}", log_scale=True),
         )
 
     def _make_aggregated_results_for_set(self, set_: Set) -> DataFrame:
-        pipeline2performances = self._performances.on_set(set_).group_by_pipeline()
-        pipeline2performance = {
-            pipeline_name: performances.merge() for pipeline_name, performances in pipeline2performances.items()
+        pipeline_name2performances = self._performances.on_set(set_).group_by_pipeline()
+        pipeline_name2performance = {
+            pipeline_name: performances.collapse() for pipeline_name, performances in pipeline_name2performances.items()
         }
-        return (
-            DataFrame(
-                [
-                    {
-                        "pipeline": pipeline_name,
-                        self.main_metric.name: self.main_metric(performance),
-                        "inference time": performance.mean_inference_time,
-                    }
-                    for pipeline_name, performance in pipeline2performance.items()
-                ]
-            )
-            .set_index("pipeline")
-            .sort_values(self.main_metric.name, ascending=False)
+        return DataFrame(
+            [
+                {
+                    "pipeline": pipeline_name,
+                    self.main_metric.name: self.main_metric(pipeline_name2performance[pipeline_name]),
+                    "inference time": pipeline_name2performance[pipeline_name].mean_inference_time,
+                }
+                for pipeline_name in self._sorted_pipeline_names
+            ]
+        ).set_index("pipeline")
+
+    def _cat_pipeline_results(
+        self, df: DataFrame, y: str, fmt: str, limits: Optional[Tuple[float, float]] = None, log_scale: bool = False
+    ) -> Figure:
+        cols = ["test"]
+        if self._has_validation:
+            cols.append("validation")
+        cols.append("train")
+        grid: sns.FacetGrid = sns.catplot(
+            data=df,
+            x="pipeline",
+            y=y,
+            col="set",
+            kind="bar",
+            sharey=True,
+            legend=False,
+            col_order=cols,
+            height=8,
+            estimator=weighted_mean,
+            orient="v",
+            order=self._sorted_pipeline_names,
         )
 
+        fig: Figure = grid.fig
+
+        grid.set_xticklabels(rotation=30, ha="right")
+        _format_axes(fig.get_axes(), fmt, limits=limits, log_scale=log_scale)
+
+        fig.suptitle(y)
+        fig.tight_layout()
+
+        return fig
+
 
 def weighted_mean(x, **kws):
     val, weight = map(np.asarray, zip(*x))
     return (val * weight).sum() / weight.sum()
 
 
-def _cat_pipeline_results(
-    df: DataFrame, y: str, fmt: str, limits: Optional[Tuple[float, float]] = None, log_scale: bool = False
-) -> Figure:
-    grid: sns.FacetGrid = sns.catplot(
-        data=df,
-        x="pipeline",
-        y=y,
-        col="set",
-        kind="bar",
-        sharey=True,
-        legend=False,
-        col_order=["test", "validation", "train"],
-        height=8,
-        estimator=weighted_mean,
-        orient="v",
-    )
-
-    fig: Figure = grid.fig
-
-    grid.set_xticklabels(rotation=30, ha="right")
-    _format_axes(fig.get_axes(), fmt, limits=limits, log_scale=log_scale)
-
-    fig.suptitle(y)
-    fig.tight_layout()
-
-    return fig
-
-
 def bar_plot_with_secondary(
     df: DataFrame,
     title: str,
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/set.py b/polystar_cv/research/robots/evaluation/set.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/evaluation/set.py
rename to polystar_cv/research/robots/evaluation/set.py
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/trainer.py b/polystar_cv/research/robots/evaluation/trainer.py
similarity index 100%
rename from robots-at-robots/research/robots_at_robots/evaluation/trainer.py
rename to polystar_cv/research/robots/evaluation/trainer.py
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/__init__.py b/polystar_cv/research/runes/__init__.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/__init__.py
rename to polystar_cv/research/runes/__init__.py
diff --git a/robots-at-runes/research/robots_at_runes/constants.py b/polystar_cv/research/runes/constants.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/constants.py
rename to polystar_cv/research/runes/constants.py
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/__init__.py b/polystar_cv/research/runes/dataset/__init__.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/__init__.py
rename to polystar_cv/research/runes/dataset/__init__.py
diff --git a/polystar_cv/research/runes/dataset/blend/__init__.py b/polystar_cv/research/runes/dataset/blend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/examples/.gitignore b/polystar_cv/research/runes/dataset/blend/examples/.gitignore
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/examples/.gitignore
rename to polystar_cv/research/runes/dataset/blend/examples/.gitignore
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/examples/back1.jpg b/polystar_cv/research/runes/dataset/blend/examples/back1.jpg
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/examples/back1.jpg
rename to polystar_cv/research/runes/dataset/blend/examples/back1.jpg
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/examples/logo.png b/polystar_cv/research/runes/dataset/blend/examples/logo.png
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/examples/logo.png
rename to polystar_cv/research/runes/dataset/blend/examples/logo.png
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/examples/logo.xml b/polystar_cv/research/runes/dataset/blend/examples/logo.xml
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/examples/logo.xml
rename to polystar_cv/research/runes/dataset/blend/examples/logo.xml
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/image_blender.py b/polystar_cv/research/runes/dataset/blend/image_blender.py
similarity index 88%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/image_blender.py
rename to polystar_cv/research/runes/dataset/blend/image_blender.py
index 976aba5d9e9ebf178655cc3f69b3a3184f554fc0..87eeb6d753c3f7ed7d0c18f5b1bd37ce10afa715 100644
--- a/robots-at-runes/research/robots_at_runes/dataset/blend/image_blender.py
+++ b/polystar_cv/research/runes/dataset/blend/image_blender.py
@@ -6,9 +6,8 @@ import cv2
 import numpy as np
 
 from polystar.common.models.image import Image
-from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import \
-    LabeledImageModifierABC
-from research.robots_at_runes.dataset.labeled_image import LabeledImage, PointOfInterest
+from research.runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import LabeledImageModifierABC
+from research.runes.dataset.labeled_image import LabeledImage, PointOfInterest
 
 
 @dataclass
@@ -70,8 +69,8 @@ if __name__ == "__main__":
 
     import matplotlib.pyplot as plt
 
-    from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_rotator import LabeledImageRotator
-    from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_scaler import LabeledImageScaler
+    from research.runes.dataset.blend import LabeledImageScaler
+    from research.runes.dataset.blend.labeled_image_modifiers.labeled_image_rotator import LabeledImageRotator
 
     EXAMPLES_DIR = Path(__file__).parent / "examples"
 
diff --git a/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/__init__.py b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py
similarity index 94%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py
rename to polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py
index cf6f90b958abc9e2bdcaefb1a513addf4f2478e1..5072c901e1d06bbfceedaa53944b44d2458a6f4e 100644
--- a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py
+++ b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_modifier_abc.py
@@ -2,7 +2,7 @@ from abc import ABC, abstractmethod
 from random import random
 
 from polystar.common.models.image import Image
-from research.robots_at_runes.dataset.labeled_image import LabeledImage, PointOfInterest
+from research.runes.dataset.labeled_image import LabeledImage, PointOfInterest
 
 
 class LabeledImageModifierABC(ABC):
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py
similarity index 85%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py
rename to polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py
index 6e241c20957c08f66f9d01de926d6b34824c070f..3dd178a53ac2cc215672fc342bf6902a42dad862 100644
--- a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py
+++ b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_rotator.py
@@ -1,12 +1,11 @@
 from dataclasses import dataclass
 
 import numpy as np
-
 from imutils import rotate_bound
+
 from polystar.common.models.image import Image
-from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import \
-    LabeledImageModifierABC
-from research.robots_at_runes.dataset.labeled_image import PointOfInterest
+from research.runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import LabeledImageModifierABC
+from research.runes.dataset.labeled_image import PointOfInterest
 
 
 @dataclass
diff --git a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py
similarity index 80%
rename from robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py
rename to polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py
index a7dacd8e246e6e92c88c1b81495d3f965c82300b..a5ac388cc44331168c2d4982cf93dde7d8764568 100644
--- a/robots-at-runes/research/robots_at_runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py
+++ b/polystar_cv/research/runes/dataset/blend/labeled_image_modifiers/labeled_image_scaler.py
@@ -3,9 +3,8 @@ from dataclasses import dataclass
 import cv2
 
 from polystar.common.models.image import Image
-from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import \
-    LabeledImageModifierABC
-from research.robots_at_runes.dataset.labeled_image import PointOfInterest
+from research.runes.dataset.blend.labeled_image_modifiers.labeled_image_modifier_abc import LabeledImageModifierABC
+from research.runes.dataset.labeled_image import PointOfInterest
 
 
 @dataclass
diff --git a/robots-at-runes/research/robots_at_runes/dataset/dataset_generator.py b/polystar_cv/research/runes/dataset/dataset_generator.py
similarity index 85%
rename from robots-at-runes/research/robots_at_runes/dataset/dataset_generator.py
rename to polystar_cv/research/runes/dataset/dataset_generator.py
index d5a871c2436e5fe02b9085769b9a765fce95104f..9dcf2614c1909d467ea5abc1ce7ce557da44ad95 100644
--- a/robots-at-runes/research/robots_at_runes/dataset/dataset_generator.py
+++ b/polystar_cv/research/runes/dataset/dataset_generator.py
@@ -12,11 +12,11 @@ from research.common.dataset.perturbations.image_modifiers.gaussian_noise import
 from research.common.dataset.perturbations.image_modifiers.horizontal_blur import HorizontalBlurrer
 from research.common.dataset.perturbations.image_modifiers.saturation import SaturationModifier
 from research.common.dataset.perturbations.perturbator import ImagePerturbator
-from research.robots_at_runes.constants import RUNES_DATASET_DIR
-from research.robots_at_runes.dataset.blend.image_blender import ImageBlender
-from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_rotator import LabeledImageRotator
-from research.robots_at_runes.dataset.blend.labeled_image_modifiers.labeled_image_scaler import LabeledImageScaler
-from research.robots_at_runes.dataset.labeled_image import load_labeled_images_in_directory
+from research.runes.constants import RUNES_DATASET_DIR
+from research.runes.dataset.blend import LabeledImageScaler
+from research.runes.dataset.blend.image_blender import ImageBlender
+from research.runes.dataset.blend.labeled_image_modifiers.labeled_image_rotator import LabeledImageRotator
+from research.runes.dataset.labeled_image import load_labeled_images_in_directory
 
 
 class DatasetGenerator:
diff --git a/robots-at-runes/research/robots_at_runes/dataset/labeled_image.py b/polystar_cv/research/runes/dataset/labeled_image.py
similarity index 100%
rename from robots-at-runes/research/robots_at_runes/dataset/labeled_image.py
rename to polystar_cv/research/runes/dataset/labeled_image.py
diff --git a/polystar_cv/tests/common/integration_tests/__init__.py b/polystar_cv/tests/common/integration_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/polystar_cv/tests/common/integration_tests/datasets/__init__.py b/polystar_cv/tests/common/integration_tests/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/common/tests/common/integration_tests/datasets/test_dji_dataset.py b/polystar_cv/tests/common/integration_tests/datasets/test_dji_dataset.py
similarity index 100%
rename from common/tests/common/integration_tests/datasets/test_dji_dataset.py
rename to polystar_cv/tests/common/integration_tests/datasets/test_dji_dataset.py
diff --git a/common/tests/common/integration_tests/datasets/test_dji_zoomed_dataset.py b/polystar_cv/tests/common/integration_tests/datasets/test_dji_zoomed_dataset.py
similarity index 100%
rename from common/tests/common/integration_tests/datasets/test_dji_zoomed_dataset.py
rename to polystar_cv/tests/common/integration_tests/datasets/test_dji_zoomed_dataset.py
diff --git a/common/tests/common/integration_tests/datasets/test_twitch_dataset_v1.py b/polystar_cv/tests/common/integration_tests/datasets/test_twitch_dataset_v1.py
similarity index 100%
rename from common/tests/common/integration_tests/datasets/test_twitch_dataset_v1.py
rename to polystar_cv/tests/common/integration_tests/datasets/test_twitch_dataset_v1.py
diff --git a/polystar_cv/tests/common/unittests/__init__.py b/polystar_cv/tests/common/unittests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/common/tests/common/unittests/datasets/roco/test_directory_dataset_zoo.py b/polystar_cv/tests/common/unittests/datasets/roco/test_directory_dataset_zoo.py
similarity index 100%
rename from common/tests/common/unittests/datasets/roco/test_directory_dataset_zoo.py
rename to polystar_cv/tests/common/unittests/datasets/roco/test_directory_dataset_zoo.py
diff --git a/common/tests/common/unittests/datasets/test_dataset.py b/polystar_cv/tests/common/unittests/datasets/test_dataset.py
similarity index 100%
rename from common/tests/common/unittests/datasets/test_dataset.py
rename to polystar_cv/tests/common/unittests/datasets/test_dataset.py
diff --git a/common/tests/common/unittests/datasets_v3/roco/test_directory_dataset_zoo.py b/polystar_cv/tests/common/unittests/datasets_v3/roco/test_directory_dataset_zoo.py
similarity index 100%
rename from common/tests/common/unittests/datasets_v3/roco/test_directory_dataset_zoo.py
rename to polystar_cv/tests/common/unittests/datasets_v3/roco/test_directory_dataset_zoo.py
diff --git a/common/tests/common/unittests/datasets_v3/test_dataset.py b/polystar_cv/tests/common/unittests/datasets_v3/test_dataset.py
similarity index 100%
rename from common/tests/common/unittests/datasets_v3/test_dataset.py
rename to polystar_cv/tests/common/unittests/datasets_v3/test_dataset.py
diff --git a/common/tests/common/unittests/filters/test_filters_abc.py b/polystar_cv/tests/common/unittests/filters/test_filters_abc.py
similarity index 100%
rename from common/tests/common/unittests/filters/test_filters_abc.py
rename to polystar_cv/tests/common/unittests/filters/test_filters_abc.py
diff --git a/common/tests/common/unittests/image_pipeline/test_image_classifier_pipeline.py b/polystar_cv/tests/common/unittests/image_pipeline/test_image_classifier_pipeline.py
similarity index 100%
rename from common/tests/common/unittests/image_pipeline/test_image_classifier_pipeline.py
rename to polystar_cv/tests/common/unittests/image_pipeline/test_image_classifier_pipeline.py
diff --git a/polystar_cv/tests/common/unittests/object_validators/__init__.py b/polystar_cv/tests/common/unittests/object_validators/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/common/tests/common/unittests/object_validators/test_in_box_validator.py b/polystar_cv/tests/common/unittests/object_validators/test_in_box_validator.py
similarity index 100%
rename from common/tests/common/unittests/object_validators/test_in_box_validator.py
rename to polystar_cv/tests/common/unittests/object_validators/test_in_box_validator.py
diff --git a/pyproject.toml b/pyproject.toml
index 5e88305e45e1871c093f63d74830ea829f47cfc4..82b631031aeb07f1dfbac085901ba6fa997fef3e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,8 +1,10 @@
 [tool.poetry]
-name = "polystar.cv"
+name = "polystar_cv"
 version = "0.2.0"
 description = "CV code for Polystar's RoboMaster team"
 authors = ["Polystar"]
+packages = [{ include = "polystar", from = "polystar_cv" }, { include = "research", from = "polystar_cv" }]
+include = ["**/.changes"]
 
 [tool.poetry.dependencies]
 python = "^3.6"
@@ -24,15 +26,25 @@ dataclasses = "^0.6.0"
 imutils = "^0.5.3"
 more-itertools = "^8.4.0"
 
-[tool.poetry.dev-dependencies]
-tensorflow = "2.1.x"
-tensorflow-estimator = "2.1.x"
 opencv-python = "4.1.x"
 matplotlib = "^3.1.3"
-kivy = "^1.11.1"
 markdown = "^3.3.3"
 xhtml2pdf = "^0.2.5"
 google-cloud-storage = "^1.35.0"
+pyyaml = "^5.3.1"
+six = "1.15.0"  # https://github.com/googleapis/python-bigquery/issues/70
+
+[tool.poetry.dev-dependencies]
+tensorflow = "2.3.x"
+tensorflow-estimator = "2.3.x"
+kivy = "^1.11.1"
+cloudml-hypertune = "^0.1.0-alpha.6"
+google-api-python-client = "^1.12.8"
+wheel = "^0.36.2"
+optuna = "^2.3.0"
+hyperopt = "^0.2.5"
+plotly = "^4.14.1"
+pydot = "^1.4.1"
 
 [tool.black]
 line-length = 120
@@ -43,3 +55,7 @@ profile='black'
 line_length = 120
 known_first_party = ['polystar','tests','research','tools','scripts']
 skip = ['.eggs','.git','.hg','.mypy_cache','.nox','.pants.d','.tox','.venv','_build','buck-out','build','dist','node_modules','venv','__init__.py']
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
diff --git a/robots-at-robots/Readme.md b/robots-at-robots/Readme.md
deleted file mode 100644
index fcca02bd47c0e10bff009e4ffbfd982dd9d17973..0000000000000000000000000000000000000000
--- a/robots-at-robots/Readme.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# Robots@Robots
-
-## Goal
-
-The goal of this project is to detect the other robots, from our robots, to be able to assist the pilot into shooting them.
diff --git a/robots-at-robots/config/settings.toml b/robots-at-robots/config/settings.toml
deleted file mode 100644
index fe532b09ae715c1a30b4413411f26bbc6bf32145..0000000000000000000000000000000000000000
--- a/robots-at-robots/config/settings.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-[default]
-MODEL_NAME = 'robots/TRT_ssd_mobilenet_v2_roco.bin'
-
-[development]
-
-[production]
diff --git a/robots-at-robots/polystar/robots_at_robots/dependency_injection.py b/robots-at-robots/polystar/robots_at_robots/dependency_injection.py
deleted file mode 100644
index cc76fa14d067838ac6db6d0c29e23a922105ab58..0000000000000000000000000000000000000000
--- a/robots-at-robots/polystar/robots_at_robots/dependency_injection.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from dataclasses import dataclass
-
-from dynaconf import LazySettings
-from injector import Injector, Module
-
-from polystar.common.dependency_injection import CommonModule
-from polystar.robots_at_robots.globals import settings
-
-
-def make_injector() -> Injector:
-    return Injector(modules=[CommonModule(settings), RobotsAtRobotsModule(settings)])
-
-
-@dataclass
-class RobotsAtRobotsModule(Module):
-    settings: LazySettings
diff --git a/robots-at-robots/polystar/robots_at_robots/globals.py b/robots-at-robots/polystar/robots_at_robots/globals.py
deleted file mode 100644
index bf1c91a7aa75768c581889ad23a8a2143332a344..0000000000000000000000000000000000000000
--- a/robots-at-robots/polystar/robots_at_robots/globals.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from polystar.common.settings import make_settings
-
-PROJECT_NAME = "robots-at-robots"
-
-settings = make_settings(PROJECT_NAME)
diff --git a/robots-at-robots/research/robots_at_robots/armor_color/armor_color_benchmarker.py b/robots-at-robots/research/robots_at_robots/armor_color/armor_color_benchmarker.py
deleted file mode 100644
index 37a9e35e4a2601bc651b4cb7665f0aea43be73f8..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_color/armor_color_benchmarker.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from typing import List
-
-from polystar.common.models.object import ArmorColor
-from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
-from research.robots_at_robots.armor_color.armor_color_dataset import make_armor_color_dataset_generator
-from research.robots_at_robots.evaluation.benchmark import make_armor_value_benchmarker
-
-
-def make_armor_color_benchmarker(
-    train_roco_datasets: List[ROCODatasetBuilder],
-    validation_roco_datasets: List[ROCODatasetBuilder],
-    test_roco_datasets: List[ROCODatasetBuilder],
-    experiment_name: str,
-):
-    dataset_generator = make_armor_color_dataset_generator()
-    return make_armor_value_benchmarker(
-        train_roco_datasets=train_roco_datasets,
-        validation_roco_datasets=validation_roco_datasets,
-        test_roco_datasets=test_roco_datasets,
-        evaluation_project="armor-color",
-        experiment_name=experiment_name,
-        classes=list(ArmorColor),
-        dataset_generator=dataset_generator,
-    )
diff --git a/robots-at-robots/research/robots_at_robots/armor_color/armor_color_dataset.py b/robots-at-robots/research/robots_at_robots/armor_color/armor_color_dataset.py
deleted file mode 100644
index 02985e4ac7b635db4f0e477d5a6d543cb34ecba3..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_color/armor_color_dataset.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from itertools import islice
-
-from polystar.common.models.object import Armor, ArmorColor
-from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.dataset.armor_value_dataset_generator import ArmorValueDatasetGenerator
-from research.robots_at_robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
-
-
-class ArmorColorTargetFactory(ArmorValueTargetFactory[ArmorColor]):
-    def from_str(self, label: str) -> ArmorColor:
-        return ArmorColor(label)
-
-    def from_armor(self, armor: Armor) -> ArmorColor:
-        return armor.color
-
-
-def make_armor_color_dataset_generator() -> ArmorValueDatasetGenerator[ArmorColor]:
-    return ArmorValueDatasetGenerator("colors", ArmorColorTargetFactory())
-
-
-if __name__ == "__main__":
-    _roco_dataset_builder = ROCODatasetsZoo.DJI.CENTRAL_CHINA
-    _armor_color_dataset = make_armor_color_dataset_generator().from_roco_dataset(_roco_dataset_builder)
-
-    for p, c, _name in islice(_armor_color_dataset, 20, 25):
-        print(p, c, _name)
diff --git a/robots-at-robots/research/robots_at_robots/armor_color/benchmark.py b/robots-at-robots/research/robots_at_robots/armor_color/benchmark.py
deleted file mode 100644
index 441fb0d709354c2190877c8b3d5a293071b98aad..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_color/benchmark.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import logging
-from dataclasses import dataclass
-
-from nptyping import Array
-from sklearn.linear_model import LogisticRegression
-
-from polystar.common.image_pipeline.featurizers.histogram_2d import Histogram2D
-from polystar.common.image_pipeline.preprocessors.rgb_to_hsv import RGB2HSV
-from polystar.common.models.image import Image
-from polystar.common.models.object import ArmorColor
-from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
-from polystar.common.pipeline.classification.random_model import RandomClassifier
-from polystar.common.pipeline.classification.rule_based_classifier import RuleBasedClassifierABC
-from polystar.common.pipeline.pipe_abc import PipeABC
-from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.armor_color.armor_color_benchmarker import make_armor_color_benchmarker
-
-
-class ArmorColorPipeline(ClassificationPipeline):
-    enum = ArmorColor
-
-
-@dataclass
-class MeanChannels(PipeABC):
-    def transform_single(self, image: Image) -> Array[float, float, float]:
-        return image.mean(axis=(0, 1))
-
-
-class RedBlueComparisonClassifier(RuleBasedClassifierABC):
-    """A very simple model that compares the blue and red values obtained by the MeanChannels"""
-
-    def predict_single(self, features: Array[float, float, float]) -> ArmorColor:
-        return ArmorColor.Red if features[0] >= features[2] else ArmorColor.Blue
-
-
-if __name__ == "__main__":
-    logging.getLogger().setLevel("INFO")
-
-    _benchmarker = make_armor_color_benchmarker(
-        train_roco_datasets=[
-            ROCODatasetsZoo.TWITCH.T470150052,
-            ROCODatasetsZoo.TWITCH.T470152289,
-            ROCODatasetsZoo.TWITCH.T470149568,
-            ROCODatasetsZoo.TWITCH.T470151286,
-        ],
-        validation_roco_datasets=[],
-        test_roco_datasets=[
-            ROCODatasetsZoo.TWITCH.T470152838,
-            ROCODatasetsZoo.TWITCH.T470153081,
-            ROCODatasetsZoo.TWITCH.T470158483,
-            ROCODatasetsZoo.TWITCH.T470152730,
-        ],
-        experiment_name="test",
-    )
-
-    red_blue_comparison_pipeline = ArmorColorPipeline.from_pipes(
-        [MeanChannels(), RedBlueComparisonClassifier()], name="rb-comparison",
-    )
-    random_pipeline = ArmorColorPipeline.from_pipes([RandomClassifier()], name="random")
-    hsv_hist_lr_pipeline = ArmorColorPipeline.from_pipes(
-        [RGB2HSV(), Histogram2D(), LogisticRegression()], name="hsv-hist-lr",
-    )
-
-    _benchmarker.benchmark([random_pipeline, red_blue_comparison_pipeline, hsv_hist_lr_pipeline])
diff --git a/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_benchmarker.py b/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_benchmarker.py
deleted file mode 100644
index 8e77cab413cd3fa9acc32a6bbd3004e2a567d441..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_benchmarker.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from typing import List
-
-from polystar.common.models.object import ArmorDigit
-from research.common.datasets.image_dataset import FileImageDataset
-from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
-from research.robots_at_robots.armor_digit.armor_digit_dataset import make_armor_digit_dataset_generator
-from research.robots_at_robots.evaluation.benchmark import make_armor_value_benchmarker
-
-
-def make_armor_digit_benchmarker(
-    train_roco_datasets: List[ROCODatasetBuilder],
-    validation_roco_datasets: List[ROCODatasetBuilder],
-    test_roco_datasets: List[ROCODatasetBuilder],
-    experiment_name: str,
-    train_digit_datasets: List[FileImageDataset[ArmorDigit]] = None,
-    validation_digit_datasets: List[FileImageDataset[ArmorDigit]] = None,
-    test_digit_datasets: List[FileImageDataset[ArmorDigit]] = None,
-):
-    dataset_generator = make_armor_digit_dataset_generator()
-    return make_armor_value_benchmarker(
-        train_roco_datasets=train_roco_datasets,
-        validation_roco_datasets=validation_roco_datasets,
-        test_roco_datasets=test_roco_datasets,
-        evaluation_project="armor-digit",
-        experiment_name=experiment_name,
-        classes=list(ArmorDigit),
-        dataset_generator=dataset_generator,
-        train_datasets=train_digit_datasets,
-        validation_datasets=validation_digit_datasets,
-        test_datasets=test_digit_datasets,
-    )
diff --git a/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_dataset.py b/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_dataset.py
deleted file mode 100644
index bd3564546c0b0ffd8e0a7dd5d685fafd1fb72208..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_digit/armor_digit_dataset.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from itertools import islice
-
-from polystar.common.filters.exclude_filter import ExcludeFilter
-from polystar.common.models.object import Armor, ArmorDigit
-from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.dataset.armor_value_dataset_generator import ArmorValueDatasetGenerator
-from research.robots_at_robots.dataset.armor_value_target_factory import ArmorValueTargetFactory
-
-
-class ArmorDigitTargetFactory(ArmorValueTargetFactory[ArmorDigit]):
-    def from_str(self, label: str) -> ArmorDigit:
-        n = int(label)
-
-        if 1 <= n <= 5:  # CHANGING
-            return ArmorDigit(n)
-
-        return ArmorDigit.OUTDATED
-
-    def from_armor(self, armor: Armor) -> ArmorDigit:
-        return ArmorDigit(armor.number) if armor.number else ArmorDigit.UNKNOWN
-
-
-def make_armor_digit_dataset_generator() -> ArmorValueDatasetGenerator[ArmorDigit]:
-    return ArmorValueDatasetGenerator("digits", ArmorDigitTargetFactory(), ExcludeFilter({ArmorDigit.OUTDATED}))
-
-
-if __name__ == "__main__":
-    _roco_dataset_builder = ROCODatasetsZoo.DJI.CENTRAL_CHINA
-    _armor_digit_dataset = make_armor_digit_dataset_generator().from_roco_dataset(_roco_dataset_builder)
-
-    for p, c, _name in islice(_armor_digit_dataset, 20, 30):
-        print(p, c, _name)
diff --git a/robots-at-robots/research/robots_at_robots/armor_digit/benchmark.py b/robots-at-robots/research/robots_at_robots/armor_digit/benchmark.py
deleted file mode 100644
index 81e750a3b6ecafa877899c92977b624f9d7e57ed..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/armor_digit/benchmark.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import logging
-import warnings
-from pathlib import Path
-from typing import Callable, List, Sequence, Tuple
-
-from keras_preprocessing.image import ImageDataGenerator
-from numpy import asarray
-from tensorflow_core.python.keras import Input, Model, Sequential
-from tensorflow_core.python.keras.applications.vgg16 import VGG16
-from tensorflow_core.python.keras.applications.xception import Xception
-from tensorflow_core.python.keras.callbacks import EarlyStopping, TensorBoard
-from tensorflow_core.python.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
-from tensorflow_core.python.keras.optimizer_v2.adam import Adam
-from tensorflow_core.python.keras.optimizer_v2.gradient_descent import SGD
-from tensorflow_core.python.keras.utils.np_utils import to_categorical
-
-from polystar.common.image_pipeline.preprocessors.normalise import Normalise
-from polystar.common.image_pipeline.preprocessors.resize import Resize
-from polystar.common.models.image import Image
-from polystar.common.models.object import ArmorDigit
-from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
-from polystar.common.pipeline.classification.classifier_abc import ClassifierABC
-from polystar.common.pipeline.classification.random_model import RandomClassifier
-from research.common.datasets.roco.zoo.roco_dataset_zoo import ROCODatasetsZoo
-from research.robots_at_robots.armor_digit.armor_digit_benchmarker import make_armor_digit_benchmarker
-from research.robots_at_robots.armor_digit.armor_digit_dataset import make_armor_digit_dataset_generator
-from research.robots_at_robots.evaluation.benchmark import Benchmarker
-
-
-class ArmorDigitPipeline(ClassificationPipeline):
-    enum = ArmorDigit
-
-
-class KerasClassifier(ClassifierABC):
-    def __init__(self, model: Model, optimizer, logs_dir: Path, with_data_augmentation: bool, batch_size: int = 32):
-        self.batch_size = batch_size
-        self.logs_dir = logs_dir
-        self.with_data_augmentation = with_data_augmentation
-        self.model = model
-        self.model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
-
-    @property
-    def train_data_gen(self) -> ImageDataGenerator:
-        if not self.with_data_augmentation:
-            return ImageDataGenerator()
-        return ImageDataGenerator(rotation_range=45, zoom_range=[0.8, 1])  # brightness_range=[0.7, 1.4]
-
-    def fit(self, images: List[Image], labels: List[int], validation_size: int) -> "KerasClassifier":
-        images = asarray(images)
-        labels = to_categorical(asarray(labels), 5)  # FIXME
-        train_images, train_labels = images[:-validation_size], labels[:-validation_size]
-        val_images, val_labels = images[-validation_size:], labels[-validation_size:]
-
-        train_generator = self.train_data_gen.flow(train_images, train_labels, batch_size=self.batch_size, shuffle=True)
-
-        self.model.fit(
-            x=train_generator,
-            steps_per_epoch=len(train_images) / self.batch_size,
-            validation_data=(val_images, val_labels),
-            epochs=300,
-            callbacks=[
-                EarlyStopping(verbose=0, patience=15, restore_best_weights=True),
-                TensorBoard(log_dir=self.logs_dir, histogram_freq=4, write_graph=True, write_images=False),
-            ],
-            verbose=0,
-        )
-        return self
-
-    def predict_proba(self, examples: List[Image]) -> Sequence[float]:
-        return self.model.predict_proba(asarray(examples))
-
-
-class CNN(Sequential):
-    def __init__(
-        self, input_size: Tuple[int, int], conv_blocks: Sequence[Sequence[int]], dense_size: int, output_size: int,
-    ):
-        super().__init__()
-        self.add(Input((*input_size, 3)))
-
-        for conv_sizes in conv_blocks:
-            for size in conv_sizes:
-                self.add(Conv2D(size, (3, 3), activation="relu"))
-            self.add(MaxPooling2D())
-
-        self.add(Flatten())
-        self.add(Dense(dense_size))
-        self.add(Dropout(0.5))
-        self.add(Dense(output_size, activation="softmax"))
-
-
-def make_digits_cnn_pipeline(
-    input_size: int, conv_blocks: Sequence[Sequence[int]], report_dir: Path, with_data_augmentation: bool, lr: float
-) -> ArmorDigitPipeline:
-    name = (
-        f"cnn - ({input_size}) - lr {lr} - "
-        + " ".join("_".join(map(str, sizes)) for sizes in conv_blocks)
-        + (" - with_data_augm" * with_data_augmentation)
-    )
-    input_size = (input_size, input_size)
-    return ArmorDigitPipeline.from_pipes(
-        [
-            Resize(input_size),
-            Normalise(),
-            KerasClassifier(
-                CNN(input_size=input_size, conv_blocks=conv_blocks, dense_size=128, output_size=5),
-                logs_dir=report_dir / name,
-                with_data_augmentation=with_data_augmentation,
-                optimizer=SGD(lr, momentum=0.9),
-            ),
-        ],
-        name=name,
-    )
-
-
-class TransferLearning(Sequential):
-    def __init__(self, input_size: Tuple[int, int], n_classes: int, model_factory: Callable[..., Model]):
-        input_shape = (*input_size, 3)
-        base_model: Model = model_factory(weights="imagenet", input_shape=input_shape, include_top=False)
-
-        super().__init__(
-            [
-                Input(input_shape),
-                base_model,
-                Flatten(),
-                Dense(128, activation="relu"),
-                Dropout(0.5),
-                Dense(n_classes, activation="softmax"),
-            ]
-        )
-
-
-def make_tl_pipeline(
-    report_dir: Path, input_size: int, with_data_augmentation: bool, lr: float, model_factory: Callable[..., Model]
-):
-    name = f"{model_factory.__name__} ({input_size}) - lr {lr}" + (" - with_data_augm" * with_data_augmentation)
-    input_size = (input_size, input_size)
-    return ArmorDigitPipeline.from_pipes(
-        [
-            Resize(input_size),
-            Normalise(),
-            KerasClassifier(
-                model=TransferLearning(input_size=input_size, n_classes=5, model_factory=model_factory),  # FIXME
-                optimizer=Adam(lr),  # FIXME
-                logs_dir=report_dir,
-                with_data_augmentation=with_data_augmentation,
-            ),
-        ],
-        name=name,
-    )
-
-
-def make_vgg16_pipeline(
-    report_dir: Path, input_size: int, with_data_augmentation: bool, lr: float
-) -> ArmorDigitPipeline:
-    return make_tl_pipeline(
-        model_factory=VGG16,
-        input_size=input_size,
-        with_data_augmentation=with_data_augmentation,
-        lr=lr,
-        report_dir=report_dir,
-    )
-
-
-def make_xception_pipeline(
-    report_dir: Path, input_size: int, with_data_augmentation: bool, lr: float
-) -> ArmorDigitPipeline:
-    return make_tl_pipeline(
-        model_factory=Xception,
-        input_size=input_size,
-        with_data_augmentation=with_data_augmentation,
-        lr=lr,
-        report_dir=report_dir,
-    )
-
-
-def make_default_digit_benchmarker(exp_name: str) -> Benchmarker:
-    return make_armor_digit_benchmarker(
-        train_digit_datasets=[
-            # Only the start of the dataset is cleaned as of now
-            make_armor_digit_dataset_generator()
-            .from_roco_dataset(ROCODatasetsZoo.DJI.FINAL)
-            .to_file_images()
-            .cap(
-                (1009 - 117)
-                + (1000 - 86)
-                + (1000 - 121)
-                + (1000 - 138)
-                + (1000 - 137)
-                + (1000 - 154)
-                + (1000 - 180)
-                + (1000 - 160)
-                + (1000 - 193)
-                + (1000 - 80)
-            )
-            .build()
-        ],
-        train_roco_datasets=[
-            # ROCODatasetsZoo.DJI.CENTRAL_CHINA,
-            # ROCODatasetsZoo.DJI.FINAL,
-            # ROCODatasetsZoo.DJI.NORTH_CHINA,
-            # ROCODatasetsZoo.DJI.SOUTH_CHINA,
-            ROCODatasetsZoo.TWITCH.T470150052,
-            ROCODatasetsZoo.TWITCH.T470149568,
-            ROCODatasetsZoo.TWITCH.T470151286,
-        ],
-        validation_roco_datasets=[ROCODatasetsZoo.TWITCH.T470152289],
-        test_roco_datasets=[
-            ROCODatasetsZoo.TWITCH.T470152838,
-            ROCODatasetsZoo.TWITCH.T470153081,
-            ROCODatasetsZoo.TWITCH.T470158483,
-            ROCODatasetsZoo.TWITCH.T470152730,
-        ],
-        experiment_name=exp_name,
-    )
-
-
-if __name__ == "__main__":
-    logging.getLogger().setLevel("INFO")
-    logging.getLogger("tensorflow").setLevel("ERROR")
-    warnings.filterwarnings("ignore")
-
-    _benchmarker = make_default_digit_benchmarker("xception")
-
-    _report_dir = _benchmarker.reporter.report_dir
-
-    _random_pipeline = ArmorDigitPipeline.from_pipes([RandomClassifier()], name="random")
-    _cnn_pipelines = [
-        make_digits_cnn_pipeline(
-            32, ((32, 32), (64, 64)), _report_dir, with_data_augmentation=with_data_augmentation, lr=lr,
-        )
-        for with_data_augmentation in [False]
-        for lr in [2.5e-2, 1.6e-2, 1e-2, 6.3e-3, 4e-4]
-    ]
-    # cnn_pipelines = [
-    #     make_digits_cnn_pipeline(
-    #         64, ((32,), (64, 64), (64, 64)), reporter.report_dir, with_data_augmentation=True, lr=lr
-    #     )
-    #     for with_data_augmentation in [True, False]
-    #     for lr in (5.6e-2, 3.1e-2, 1.8e-2, 1e-2, 5.6e-3, 3.1e-3, 1.8e-3, 1e-3)
-    # ]
-
-    vgg16_pipelines = [
-        make_vgg16_pipeline(_report_dir, input_size=32, with_data_augmentation=False, lr=lr)
-        for lr in (1e-5, 5e-4, 2e-4, 1e-4, 5e-3)
-    ]
-
-    xception_pipelines = [
-        make_xception_pipeline(_report_dir, input_size=71, with_data_augmentation=False, lr=lr) for lr in (2e-4, 5e-5)
-    ]
-
-    logging.info(f"Run `tensorboard --logdir={_report_dir}` for realtime logs")
-
-    _benchmarker.benchmark([_random_pipeline] + xception_pipelines)
diff --git a/robots-at-robots/research/robots_at_robots/constants.py b/robots-at-robots/research/robots_at_robots/constants.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/constants.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/robots-at-robots/research/robots_at_robots/evaluation/benchmark.py b/robots-at-robots/research/robots_at_robots/evaluation/benchmark.py
deleted file mode 100644
index 958a2bf236fd4f1256bac99daa2b7d74aae39283..0000000000000000000000000000000000000000
--- a/robots-at-robots/research/robots_at_robots/evaluation/benchmark.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from dataclasses import dataclass
-from typing import List
-
-from polystar.common.pipeline.classification.classification_pipeline import ClassificationPipeline
-from research.common.datasets.image_dataset import FileImageDataset
-from research.common.datasets.roco.roco_dataset_builder import ROCODatasetBuilder
-from research.robots_at_robots.dataset.armor_value_dataset_generator import ArmorValueDatasetGenerator
-from research.robots_at_robots.evaluation.image_pipeline_evaluation_reporter import ImagePipelineEvaluationReporter
-from research.robots_at_robots.evaluation.image_pipeline_evaluator import ImageClassificationPipelineEvaluator
-from research.robots_at_robots.evaluation.metrics.f1 import F1Metric
-from research.robots_at_robots.evaluation.trainer import ImageClassificationPipelineTrainer
-
-
-@dataclass
-class Benchmarker:
-    def __init__(
-        self,
-        train_datasets: List[FileImageDataset],
-        validation_datasets: List[FileImageDataset],
-        test_datasets: List[FileImageDataset],
-        evaluation_project: str,
-        experiment_name: str,
-        classes: List,
-    ):
-        self.trainer = ImageClassificationPipelineTrainer(train_datasets, validation_datasets)
-        self.evaluator = ImageClassificationPipelineEvaluator(train_datasets, validation_datasets, test_datasets)
-        self.reporter = ImagePipelineEvaluationReporter(
-            evaluation_project, experiment_name, classes, other_metrics=[F1Metric()]
-        )
-
-    def benchmark(self, pipelines: List[ClassificationPipeline]):
-        self.trainer.train_pipelines(pipelines)
-        self.reporter.report(self.evaluator.evaluate_pipelines(pipelines))
-
-
-def make_armor_value_benchmarker(
-    train_roco_datasets: List[ROCODatasetBuilder],
-    validation_roco_datasets: List[ROCODatasetBuilder],
-    test_roco_datasets: List[ROCODatasetBuilder],
-    evaluation_project: str,
-    experiment_name: str,
-    dataset_generator: ArmorValueDatasetGenerator,
-    classes: List,
-    train_datasets: List[FileImageDataset] = None,
-    validation_datasets: List[FileImageDataset] = None,
-    test_datasets: List[FileImageDataset] = None,
-):
-    return Benchmarker(
-        train_datasets=dataset_generator.from_roco_datasets(train_roco_datasets) + (train_datasets or []),
-        validation_datasets=dataset_generator.from_roco_datasets(validation_roco_datasets)
-        + (validation_datasets or []),
-        test_datasets=dataset_generator.from_roco_datasets(test_roco_datasets) + (test_datasets or []),
-        evaluation_project=evaluation_project,
-        experiment_name=experiment_name,
-        classes=classes,
-    )
diff --git a/robots-at-runes/Readme.md b/robots-at-runes/Readme.md
deleted file mode 100644
index f2b9dade41c33af3f9f482da1785a1d45096ba3c..0000000000000000000000000000000000000000
--- a/robots-at-runes/Readme.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# Robots@Runes
-
-## Goal
-
-The goal of this project is to detect the runes' rotation, from our robots, to be able to assist the pilot into shooting them.
diff --git a/robots-at-runes/config/settings.toml b/robots-at-runes/config/settings.toml
deleted file mode 100644
index bbefeb544733c90046f74ea7b2382c7c0d7bc7cc..0000000000000000000000000000000000000000
--- a/robots-at-runes/config/settings.toml
+++ /dev/null
@@ -1,5 +0,0 @@
-[default]
-
-[development]
-
-[production]
diff --git a/robots-at-runes/polystar/robots_at_runes/globals.py b/robots-at-runes/polystar/robots_at_runes/globals.py
deleted file mode 100644
index 30aaa4d8ca5aeb13247fcda7f93b2ddac2b4bc3a..0000000000000000000000000000000000000000
--- a/robots-at-runes/polystar/robots_at_runes/globals.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from polystar.common.settings import make_settings
-
-PROJECT_NAME = "robots-at-runes"
-
-settings = make_settings(PROJECT_NAME)