diff --git a/common/polystar/common/communication/usb_target_sender.py b/common/polystar/common/communication/usb_target_sender.py
new file mode 100644
index 0000000000000000000000000000000000000000..b295f9b30bacb3cfd2c4482d5f89dcd419596fe4
--- /dev/null
+++ b/common/polystar/common/communication/usb_target_sender.py
@@ -0,0 +1,59 @@
+import logging
+
+import usb.core
+import usb.util
+
+from polystar.common.communication.target_sender_abc import TargetSenderABC
+from polystar.common.target_pipeline.target_abc import TargetABC, SimpleTarget
+
+logger = logging.getLogger(__name__)
+
+
+class USBConnectionFailed(Exception):
+    pass
+
+
+class USBTargetSender(TargetSenderABC):
+    def __init__(self):
+        self._endpoint = self._get_usb_endpoint()
+
+    def send(self, target: TargetABC):
+        data = target.to_json()
+        if self._endpoint is not None:
+            self._endpoint.write(target.to_json())
+        else:
+            logger.warning(f"{data} not sent")
+
+    def _get_usb_endpoint(self) -> usb.core.Endpoint:
+        try:
+            dev = usb.core.find(idVendor=0xFFFE, idProduct=0x0001)
+
+            if dev is None:
+                raise USBConnectionFailed
+
+            # With no arguments, the first configuration will be the active one
+            dev.set_configuration()
+
+            intf = dev.get_active_configuration()[(0, 0)]
+            ep = usb.util.find_descriptor(
+                intf,
+                # match the first OUT endpoint
+                custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT,
+            )
+
+            if ep is None:
+                raise USBConnectionFailed()
+
+            return ep
+        except USBConnectionFailed:
+            return self._handle_no_connection()
+
+    @staticmethod
+    def _handle_no_connection():
+        logger.warning("failed to setup usb connection")
+        # TODO: what should we do in production ?
+        return None
+
+
+if __name__ == "__main__":
+    USBTargetSender().send(SimpleTarget(10, 20, 30))
diff --git a/common/polystar/common/constants.py b/common/polystar/common/constants.py
index a40490bf0b319eb3bff9918ca217c92cb509fdfe..03bfb7e46ee3412e390b4383a85c01f76c78d712 100644
--- a/common/polystar/common/constants.py
+++ b/common/polystar/common/constants.py
@@ -4,4 +4,4 @@ PROJECT_DIR: Path = Path(__file__).parent.parent.parent.parent
 RESOURCES_DIR: Path = PROJECT_DIR / "resources"
 MODELS_DIR: Path = RESOURCES_DIR / "models"
 
-LABEL_MAP_PATH: Path = PROJECT_DIR / "dataset" / "tf_records" / "label_map.pbtxt"
+LABEL_MAP_PATH: Path = PROJECT_DIR / "dataset" / "label_map.pbtxt"
diff --git a/common/polystar/common/models/image_annotation.py b/common/polystar/common/models/image_annotation.py
index de2f4aab273c10ab6ff5699bd8aae7b552905a86..b34e4116626ba1be75bb26c7e3ac62045b50d087 100644
--- a/common/polystar/common/models/image_annotation.py
+++ b/common/polystar/common/models/image_annotation.py
@@ -1,12 +1,11 @@
 import logging
+from dataclasses import dataclass, field
 from pathlib import Path
 from typing import List
+from xml.dom.minidom import parseString
 
 import xmltodict
-from dataclasses import dataclass, field
 from dicttoxml import dicttoxml
-from xml.dom.minidom import parseString
-
 from polystar.common.models.image import Image
 from polystar.common.models.object import Object, ObjectFactory
 
@@ -37,7 +36,7 @@ class ImageAnnotation:
         try:
             annotation = xmltodict.parse(xml_file.read_text())["annotation"]
 
-            json_objects = annotation.get("object", [])
+            json_objects = annotation.get("object", []) or []
             json_objects = json_objects if isinstance(json_objects, list) else [json_objects]
             roco_json_objects = [obj_json for obj_json in json_objects if not obj_json["name"].startswith("rune")]
             objects = [ObjectFactory.from_json(obj_json) for obj_json in roco_json_objects]
@@ -71,3 +70,13 @@ class ImageAnnotation:
             .replace(b"<object><object>", b"<object>")
             .replace(b"</object></object>", b"</object>")
         ).toprettyxml()
+
+    def save_to_dir(self, directory: Path, image_name: str):
+        self.image_path = (directory / "image" / image_name).with_suffix(".jpg")
+        self.xml_path = (directory / "image_annotation" / image_name).with_suffix(".xml")
+
+        self.image_path.parent.mkdir(exist_ok=True, parents=True)
+        self.xml_path.parent.mkdir(exist_ok=True, parents=True)
+
+        Image.save(self.image, self.image_path)
+        self.xml_path.write_text(self.to_xml())
diff --git a/common/polystar/common/models/label_map.py b/common/polystar/common/models/label_map.py
index 7957bd0bcdff95b857780d045e49a05a66ef7e98..3574349f823aa38e046df429e0dde390893765af 100644
--- a/common/polystar/common/models/label_map.py
+++ b/common/polystar/common/models/label_map.py
@@ -1,8 +1,9 @@
 import re
+from dataclasses import dataclass
 from pathlib import Path
-from typing import Dict, Any, List
+from typing import Any, Dict, List
 
-from dataclasses import dataclass
+from polystar.common.constants import LABEL_MAP_PATH
 
 
 @dataclass
@@ -30,3 +31,6 @@ class LabelMap:
         name2id = {n: int(i) for i, n in d.items()}
         id2name = d
         return LabelMap(id2name=id2name, name2id=name2id)
+
+
+label_map = LabelMap.from_file(LABEL_MAP_PATH)
diff --git a/common/polystar/common/models/object.py b/common/polystar/common/models/object.py
index 63f3d5639409e5ce028cca104daebfeeb9b2b7ab..e14c05569c89a5d422598ba19f492fbc52b72157 100644
--- a/common/polystar/common/models/object.py
+++ b/common/polystar/common/models/object.py
@@ -60,7 +60,7 @@ class ObjectFactory:
         if t is not ObjectType.Armor:
             return Object(type=t, box=Box.from_size(x, y, w, h=h))
 
-        armor_number = ArmorNumber(json["armor_class"]) if json["armor_class"] != "none" else 0
+        armor_number = ArmorNumber(int(json["armor_class"])) if json["armor_class"] != "none" else 0
 
         return Armor(
             type=t, box=Box.from_size(x, y, w, h=h), number=armor_number, color=ArmorColor(json["armor_color"])
@@ -70,10 +70,10 @@ class ObjectFactory:
     def to_json(obj: Object) -> Json:
         rv = Json(
             {
-                "name": obj.type.value.lower(),
+                "name": obj.type.name.lower(),
                 "bndbox": {"xmin": obj.box.x1, "xmax": obj.box.x2, "ymin": obj.box.y1, "ymax": obj.box.y2},
             }
         )
         if isinstance(obj, Armor):
-            rv.update({"armor_class": obj.number, "armor_color": obj.color.value.lower()})
+            rv.update({"armor_class": obj.number, "armor_color": obj.color.name.lower()})
         return rv
diff --git a/common/polystar/common/target_pipeline/debug_pipeline.py b/common/polystar/common/target_pipeline/debug_pipeline.py
index 55e667a992bcfe18cac9baa2cec7769d5ada4372..e0e9ec8c0c3c90ddf9b637df96ddc58e9a6328a4 100644
--- a/common/polystar/common/target_pipeline/debug_pipeline.py
+++ b/common/polystar/common/target_pipeline/debug_pipeline.py
@@ -11,10 +11,11 @@ from polystar.common.target_pipeline.target_pipeline import TargetPipeline
 
 @dataclass
 class DebugInfo:
-    detected_robots: List[DetectedRobot] = field(init=False)
-    validated_robots: List[DetectedRobot] = field(init=False)
-    selected_armor: DetectedArmor = field(init=False)
-    target: TargetABC = field(init=False)
+    image: Image = None
+    detected_robots: List[DetectedRobot] = field(init=False, default_factory=list)
+    validated_robots: List[DetectedRobot] = field(init=False, default_factory=list)
+    selected_armor: DetectedArmor = field(init=False, default=None)
+    target: TargetABC = field(init=False, default=None)
 
 
 @dataclass
@@ -24,7 +25,7 @@ class DebugTargetPipeline(TargetPipeline):
     debug_info_: DebugInfo = field(init=False, default_factory=DebugInfo)
 
     def predict_target(self, image: Image) -> TargetABC:
-        self.debug_info_ = DebugInfo()
+        self.debug_info_ = DebugInfo(image)
         target = super().predict_target(image)
         self.debug_info_.target = target
         return target
diff --git a/common/polystar/common/target_pipeline/objects_validators/robot_color_validator.py b/common/polystar/common/target_pipeline/objects_validators/robot_color_validator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b50c2ed387caf931f963d691bf6fc3b0bb6c7aaa
--- /dev/null
+++ b/common/polystar/common/target_pipeline/objects_validators/robot_color_validator.py
@@ -0,0 +1,17 @@
+from dataclasses import dataclass
+
+import numpy as np
+
+from polystar.common.models.object import ArmorColor
+from polystar.common.target_pipeline.detected_objects.detected_robot import DetectedRobot
+from polystar.common.target_pipeline.objects_validators.objects_validator_abc import ObjectsValidatorABC
+
+
+@dataclass
+class RobotPercentageColorValidator(ObjectsValidatorABC[DetectedRobot]):
+    color: ArmorColor
+    min_percentage: 0.5
+
+    def validate_single(self, robot: DetectedRobot, image: np.ndarray) -> bool:
+        good_colors = [armor.color is self.color for armor in robot.armors]
+        return sum(good_colors) >= len(good_colors) * self.min_percentage
diff --git a/common/polystar/common/utils/str_utils.py b/common/polystar/common/utils/str_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dc38aa385425c3a65971dd6357d460414abe8f5
--- /dev/null
+++ b/common/polystar/common/utils/str_utils.py
@@ -0,0 +1,24 @@
+import re
+
+
+def snake2camel(snake_text: str):
+    """
+    >>> snake2camel("simple_test")
+    'SimpleTest'
+    """
+    return "".join(word.title() for word in snake_text.split("_"))
+
+
+CAP_LETTER_PATTERN = re.compile(r"(?<!^)(?=[A-Z])")
+
+
+def camel2snake(camel_text: str):
+    """
+    >>> camel2snake("SimpleCase")
+    'simple_case'
+
+    >>> camel2snake("simpleCase")
+    'simple_case'
+    """
+
+    return CAP_LETTER_PATTERN.sub("_", camel_text).lower()
diff --git a/common/polystar/common/view/results_viewer_abc.py b/common/polystar/common/view/results_viewer_abc.py
index 01b68c90934db1b65dcecb1b7bf06742e7b4a392..619b47e492abd518f1b3e23eb4bda82058d6e776 100644
--- a/common/polystar/common/view/results_viewer_abc.py
+++ b/common/polystar/common/view/results_viewer_abc.py
@@ -5,6 +5,7 @@ from typing import Iterable, NewType, Sequence, Tuple
 from polystar.common.models.image import Image
 from polystar.common.models.image_annotation import ImageAnnotation
 from polystar.common.models.object import Object
+from polystar.common.target_pipeline.debug_pipeline import DebugInfo
 from polystar.common.target_pipeline.detected_objects.detected_robot import DetectedRobot, FakeDetectedRobot
 
 ColorView = NewType("ColorView", Tuple[float, float, float])
@@ -53,6 +54,14 @@ class ResultViewerABC(ABC):
     def display_image_annotation(self, annotation: ImageAnnotation):
         self.display_image_with_objects(annotation.image, annotation.objects)
 
+    def display_debug_info(self, debug_info: DebugInfo):
+        self.new(debug_info.image)
+        self.add_robots(debug_info.detected_robots, forced_color=(0.3, 0.3, 0.3))
+        self.add_robots(debug_info.validated_robots)
+        if debug_info.selected_armor is not None:
+            self.add_object(debug_info.selected_armor)
+        self.display()
+
     def add_robot(self, robot: DetectedRobot, forced_color: ColorView = None):
         objects = robot.armors
         if not isinstance(robot, FakeDetectedRobot):
diff --git a/common/research_common/constants.py b/common/research_common/constants.py
index 784b4c6ca9708194646ad73a659d189950bce5d3..969bbb4490d9729c8c7cad53f3361654f64b5ab2 100644
--- a/common/research_common/constants.py
+++ b/common/research_common/constants.py
@@ -5,13 +5,15 @@ from polystar.common.constants import PROJECT_DIR
 DSET_DIR: Path = PROJECT_DIR / "dataset"
 
 TWITCH_DSET_DIR: Path = DSET_DIR / "twitch"
-ROCO_DSET_DIR: Path = DSET_DIR / "dji_roco"
+DJI_ROCO_DSET_DIR: Path = DSET_DIR / "dji_roco"
+DJI_ROCO_ZOOMED_DSET_DIR: Path = DSET_DIR / "dji_roco_zoomed_v1"
 TENSORFLOW_RECORDS_DIR: Path = DSET_DIR / "tf_records"
 TWITCH_ROBOTS_VIEWS_DIR: Path = TWITCH_DSET_DIR / "robots-views"
 TWITCH_DSET_ROBOTS_VIEWS_DIR: Path = TWITCH_DSET_DIR / "final-robots-views"
 
 TWITCH_DSET_DIR.mkdir(parents=True, exist_ok=True)
-ROCO_DSET_DIR.mkdir(parents=True, exist_ok=True)
+DJI_ROCO_DSET_DIR.mkdir(parents=True, exist_ok=True)
+DJI_ROCO_ZOOMED_DSET_DIR.mkdir(parents=True, exist_ok=True)
 TENSORFLOW_RECORDS_DIR.mkdir(parents=True, exist_ok=True)
 TWITCH_ROBOTS_VIEWS_DIR.mkdir(parents=True, exist_ok=True)
 TWITCH_DSET_ROBOTS_VIEWS_DIR.mkdir(parents=True, exist_ok=True)
diff --git a/common/research_common/dataset/dji/dji_roco_datasets.py b/common/research_common/dataset/dji/dji_roco_datasets.py
index b524a6b2c93d9e2477e8f87a6964c8b06ea93eeb..43ade1a796cc97978615dc25c24c093d777ef990 100644
--- a/common/research_common/dataset/dji/dji_roco_datasets.py
+++ b/common/research_common/dataset/dji/dji_roco_datasets.py
@@ -1,12 +1,12 @@
 from enum import Enum
 
-from research_common.constants import ROCO_DSET_DIR
+from research_common.constants import DJI_ROCO_DSET_DIR
 from research_common.dataset.directory_roco_dataset import DirectoryROCODataset
 
 
 class DJIROCODataset(DirectoryROCODataset, Enum):
     def __init__(self, competition_name: str):
-        super().__init__(ROCO_DSET_DIR / competition_name, self.name)
+        super().__init__(DJI_ROCO_DSET_DIR / competition_name, self.name)
 
     CentralChina = "robomaster_Central China Regional Competition"
     NorthChina = "robomaster_North China Regional Competition"
diff --git a/common/research_common/dataset/dji/dji_roco_zoomed_datasets.py b/common/research_common/dataset/dji/dji_roco_zoomed_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab2e2c6d5abc48674569d159cf6d7c8aa80dbef1
--- /dev/null
+++ b/common/research_common/dataset/dji/dji_roco_zoomed_datasets.py
@@ -0,0 +1,15 @@
+from enum import Enum, auto
+
+from polystar.common.utils.str_utils import camel2snake
+from research_common.constants import DJI_ROCO_ZOOMED_DSET_DIR
+from research_common.dataset.directory_roco_dataset import DirectoryROCODataset
+
+
+class DJIROCOZoomedDataset(DirectoryROCODataset, Enum):
+    def __init__(self, _):
+        super().__init__(DJI_ROCO_ZOOMED_DSET_DIR / camel2snake(self.name), f"{self.name}ZoomedV1")
+
+    CentralChina = auto()
+    NorthChina = auto()
+    SouthChina = auto()
+    Final = auto()
diff --git a/common/research_common/dataset/improvement/zoom.py b/common/research_common/dataset/improvement/zoom.py
index f26a3f5b2b87306a112c481be626ea16245bc9b0..c975a5896b59475b0abe69eb2a20067956414bf4 100644
--- a/common/research_common/dataset/improvement/zoom.py
+++ b/common/research_common/dataset/improvement/zoom.py
@@ -10,7 +10,7 @@ from polystar.common.view.plt_results_viewer import PltResultViewer
 from research_common.dataset.dji.dji_roco_datasets import DJIROCODataset
 
 
-def crop_image_annotation(image_annotation: ImageAnnotation, box: Box, min_coverage: float):
+def crop_image_annotation(image_annotation: ImageAnnotation, box: Box, min_coverage: float) -> ImageAnnotation:
     objects = InBoxValidator(box, min_coverage).filter(image_annotation.objects, image_annotation.image)
     objects = [copy(o) for o in objects]
     for obj in objects:
diff --git a/common/research_common/dataset/roco_dataset.py b/common/research_common/dataset/roco_dataset.py
index 866f9a184dd7eb0d8eaf1c49ba0b049e2eb024f9..b90eefc0d2e772937dd576381a828725d22500d6 100644
--- a/common/research_common/dataset/roco_dataset.py
+++ b/common/research_common/dataset/roco_dataset.py
@@ -2,6 +2,8 @@ from dataclasses import dataclass
 from pathlib import Path
 from typing import Iterable
 
+from more_itertools import ilen
+
 from polystar.common.models.image import Image
 from polystar.common.models.image_annotation import ImageAnnotation
 
@@ -21,3 +23,6 @@ class ROCODataset:
     def image_annotations(self) -> Iterable[ImageAnnotation]:
         for annotation_path in self.annotation_paths:
             yield ImageAnnotation.from_xml_file(annotation_path)
+
+    def __len__(self) -> int:
+        return ilen(self.image_annotations)
diff --git a/common/research_common/dataset/roco_dataset_descriptor.py b/common/research_common/dataset/roco_dataset_descriptor.py
index bbe63ffe3b18d0f0d28985d3f4968f59a11b4969..7f95b783d3f4542efe6493ecd45d7b56377c12ee 100644
--- a/common/research_common/dataset/roco_dataset_descriptor.py
+++ b/common/research_common/dataset/roco_dataset_descriptor.py
@@ -1,15 +1,15 @@
 from dataclasses import dataclass, field
+from itertools import chain
 from pathlib import Path
 from typing import Dict
 
 from pandas import DataFrame
 
-from polystar.common.models.object import ObjectType, Armor
+from polystar.common.models.object import Armor, ObjectType
 from polystar.common.utils.markdown import MarkdownFile
 from research_common.dataset.dji.dji_roco_datasets import DJIROCODataset
+from research_common.dataset.dji.dji_roco_zoomed_datasets import DJIROCOZoomedDataset
 from research_common.dataset.roco_dataset import ROCODataset
-from research_common.dataset.split import Split
-from research_common.dataset.split_dataset import SplitDataset
 from research_common.dataset.twitch.twitch_roco_datasets import TwitchROCODataset
 
 
@@ -69,14 +69,5 @@ def make_markdown_dataset_report(dataset: ROCODataset, report_dir: Path):
 
 
 if __name__ == "__main__":
-    for dset in TwitchROCODataset:
+    for dset in chain(TwitchROCODataset, DJIROCOZoomedDataset, DJIROCODataset):
         make_markdown_dataset_report(dset, dset.dataset_path)
-        # for split in Split:
-        #     split_dset = SplitDataset(dset, split)
-        #     make_markdown_dataset_report(split_dset, split_dset.dataset_path)
-
-    for dset in DJIROCODataset:
-        make_markdown_dataset_report(dset, dset.dataset_path)
-        for split in Split:
-            split_dset = SplitDataset(dset, split)
-            make_markdown_dataset_report(split_dset, split_dset.dataset_path)
diff --git a/common/research_common/dataset/tensorflow_record.py b/common/research_common/dataset/tensorflow_record.py
index a4ac852969e47e524cc1c1d8b7de86d4672912b5..93a0f5140f085f7b3a893d436e3de12f82abe966 100644
--- a/common/research_common/dataset/tensorflow_record.py
+++ b/common/research_common/dataset/tensorflow_record.py
@@ -1,5 +1,4 @@
 import hashlib
-from dataclasses import dataclass
 from shutil import move
 from typing import Iterable
 
@@ -8,65 +7,64 @@ from tensorflow_core.python.lib.io import python_io
 from tqdm import tqdm
 
 from polystar.common.models.image_annotation import ImageAnnotation
-from polystar.common.models.label_map import LabelMap
+from polystar.common.models.label_map import label_map
 from research_common.constants import TENSORFLOW_RECORDS_DIR
-from research_common.dataset.directory_roco_dataset import DirectoryROCODataset
 from research_common.dataset.roco_dataset import ROCODataset
 
 
-@dataclass
 class TensorflowRecordFactory:
-    label_map: LabelMap
-
-    def from_datasets(self, datasets: Iterable[DirectoryROCODataset], name: str):
+    @staticmethod
+    def from_datasets(datasets: Iterable[ROCODataset], name: str):
         writer = python_io.TFRecordWriter(str(TENSORFLOW_RECORDS_DIR / f"{name}.record"))
         c = 0
         for dataset in datasets:
-            for image_annotation in tqdm(dataset.image_annotations, desc=dataset.dataset_name):
-                writer.write(self.example_from_image_annotation(image_annotation).SerializeToString())
+            for image_annotation in tqdm(dataset.image_annotations, desc=dataset.dataset_name, total=len(dataset)):
+                writer.write(_example_from_image_annotation(image_annotation).SerializeToString())
                 c += 1
         writer.close()
         move(str(TENSORFLOW_RECORDS_DIR / f"{name}.record"), str(TENSORFLOW_RECORDS_DIR / f"{name}_{c}_imgs.record"))
 
-    def from_dataset(self, dataset: ROCODataset):
-        self.from_datasets([dataset], name=dataset.dataset_name)
-
-    def example_from_image_annotation(self, image_annotation: ImageAnnotation) -> tf.train.Example:
-        image_name = image_annotation.image_path.name
-        encoded_jpg = image_annotation.image_path.read_bytes()
-        key = hashlib.sha256(encoded_jpg).hexdigest()
-
-        width, height = image_annotation.width, image_annotation.height
-
-        xmin, ymin, xmax, ymax, classes, classes_text = [], [], [], [], [], []
-
-        for obj in image_annotation.objects:
-            xmin.append(float(obj.box.x1) / width)
-            ymin.append(float(obj.box.y1) / height)
-            xmax.append(float(obj.box.x2) / width)
-            ymax.append(float(obj.box.y2) / height)
-            classes_text.append(obj.type.name.lower().encode("utf8"))
-            classes.append(self.label_map.id_of(obj.type.name.lower()))
-
-        return tf.train.Example(
-            features=tf.train.Features(
-                feature={
-                    "image/filename": bytes_feature(image_name.encode("utf8")),
-                    "image/source_id": bytes_feature(image_name.encode("utf8")),
-                    "image/height": int64_feature(height),
-                    "image/width": int64_feature(width),
-                    "image/key/sha256": bytes_feature(key.encode("utf8")),
-                    "image/encoded": bytes_feature(encoded_jpg),
-                    "image/format": bytes_feature("jpeg".encode("utf8")),
-                    "image/object/bbox/xmin": float_list_feature(xmin),
-                    "image/object/bbox/xmax": float_list_feature(xmax),
-                    "image/object/bbox/ymin": float_list_feature(ymin),
-                    "image/object/bbox/ymax": float_list_feature(ymax),
-                    "image/object/class/text": bytes_list_feature(classes_text),
-                    "image/object/class/label": int64_list_feature(classes),
-                }
-            )
+    @staticmethod
+    def from_dataset(dataset: ROCODataset):
+        TensorflowRecordFactory.from_datasets([dataset], name=dataset.dataset_name)
+
+
+def _example_from_image_annotation(image_annotation: ImageAnnotation) -> tf.train.Example:
+    image_name = image_annotation.image_path.name
+    encoded_jpg = image_annotation.image_path.read_bytes()
+    key = hashlib.sha256(encoded_jpg).hexdigest()
+
+    width, height = image_annotation.width, image_annotation.height
+
+    xmin, ymin, xmax, ymax, classes, classes_text = [], [], [], [], [], []
+
+    for obj in image_annotation.objects:
+        xmin.append(float(obj.box.x1) / width)
+        ymin.append(float(obj.box.y1) / height)
+        xmax.append(float(obj.box.x2) / width)
+        ymax.append(float(obj.box.y2) / height)
+        classes_text.append(obj.type.name.lower().encode("utf8"))
+        classes.append(label_map.id_of(obj.type.name.lower()))
+
+    return tf.train.Example(
+        features=tf.train.Features(
+            feature={
+                "image/filename": bytes_feature(image_name.encode("utf8")),
+                "image/source_id": bytes_feature(image_name.encode("utf8")),
+                "image/height": int64_feature(height),
+                "image/width": int64_feature(width),
+                "image/key/sha256": bytes_feature(key.encode("utf8")),
+                "image/encoded": bytes_feature(encoded_jpg),
+                "image/format": bytes_feature("jpeg".encode("utf8")),
+                "image/object/bbox/xmin": float_list_feature(xmin),
+                "image/object/bbox/xmax": float_list_feature(xmax),
+                "image/object/bbox/ymin": float_list_feature(ymin),
+                "image/object/bbox/ymax": float_list_feature(ymax),
+                "image/object/class/text": bytes_list_feature(classes_text),
+                "image/object/class/label": int64_list_feature(classes),
+            }
         )
+    )
 
 
 # Functions copied from https://github.com/tensorflow/models/blob/master/research/object_detection/utils/dataset_util.py
diff --git a/common/research_common/dataset/twitch/twitch_roco_datasets.py b/common/research_common/dataset/twitch/twitch_roco_datasets.py
index 368a7c48ee12d59cb1f91df26554f85e1f936a7d..ff4ffa26332a5b5af91aa05eb8f51ff14096b6cc 100644
--- a/common/research_common/dataset/twitch/twitch_roco_datasets.py
+++ b/common/research_common/dataset/twitch/twitch_roco_datasets.py
@@ -1,18 +1,35 @@
-from enum import Enum
+"""
+>>> TwitchROCODataset.TWITCH_470149568.dataset_name
+'T470149568'
+
+>>> from research_common.constants import DSET_DIR
+>>> TwitchROCODataset.TWITCH_470149568.dataset_path.relative_to(DSET_DIR)
+PosixPath('twitch/v1/470149568')
+
+>>> TwitchROCODataset.TWITCH_470149568.video_url
+'https://www.twitch.tv/videos/470149568'
+"""
+
+from enum import Enum, auto
 
 from research_common.constants import TWITCH_DSET_DIR
 from research_common.dataset.directory_roco_dataset import DirectoryROCODataset
 
 
 class TwitchROCODataset(DirectoryROCODataset, Enum):
-    def __init__(self, competition_name: str):
-        super().__init__(TWITCH_DSET_DIR / "v1" / competition_name, self.name)
-
-    TWITCH_470149568 = "470149568"
-    TWITCH_470150052 = "470150052"
-    TWITCH_470151286 = "470151286"
-    TWITCH_470152289 = "470152289"
-    TWITCH_470152730 = "470152730"
-    TWITCH_470152838 = "470152838"
-    TWITCH_470153081 = "470153081"
-    TWITCH_470158483 = "470158483"
+    def __init__(self, _):
+        self.twitch_id = self.name[len("TWITCH_") :]
+        super().__init__(TWITCH_DSET_DIR / "v1" / self.twitch_id, f"T{self.twitch_id}")
+
+    @property
+    def video_url(self) -> str:
+        return f"https://www.twitch.tv/videos/{self.twitch_id}"
+
+    TWITCH_470149568 = auto()
+    TWITCH_470150052 = auto()
+    TWITCH_470151286 = auto()
+    TWITCH_470152289 = auto()
+    TWITCH_470152730 = auto()
+    TWITCH_470152838 = auto()
+    TWITCH_470153081 = auto()
+    TWITCH_470158483 = auto()
diff --git a/common/research_common/scripts/create_tensorflow_records.py b/common/research_common/scripts/create_tensorflow_records.py
index b5528ee032e46a99a0838520b1e74a5749ee621e..f7ea87b6ca5295b55a337aa819c22aecb124087a 100644
--- a/common/research_common/scripts/create_tensorflow_records.py
+++ b/common/research_common/scripts/create_tensorflow_records.py
@@ -1,29 +1,44 @@
-from injector import inject
+from itertools import chain
 
-from polystar.common.dependency_injection import make_common_injector
-from polystar.common.models.label_map import LabelMap
 from research_common.dataset.dji.dji_roco_datasets import DJIROCODataset
-from research_common.dataset.split import Split
-from research_common.dataset.split_dataset import SplitDataset
+from research_common.dataset.dji.dji_roco_zoomed_datasets import DJIROCOZoomedDataset
 from research_common.dataset.tensorflow_record import TensorflowRecordFactory
+from research_common.dataset.twitch.twitch_roco_datasets import TwitchROCODataset
+from research_common.dataset.union_dataset import UnionDataset
 
 
-@inject
-def create_one_record_per_roco_dset(label_map: LabelMap):
-    for roco_set in DJIROCODataset:
-        for split in Split:
-            TensorflowRecordFactory(label_map).from_dataset(SplitDataset(roco_set, split))
+def create_one_record_per_roco_dset():
+    for roco_set in chain(DJIROCODataset, DJIROCOZoomedDataset, TwitchROCODataset):
+        TensorflowRecordFactory.from_dataset(roco_set)
 
 
-@inject
-def create_one_roco_record(label_map: LabelMap):
-    for split in Split:
-        TensorflowRecordFactory(label_map).from_datasets(
-            [SplitDataset(roco_dset, split) for roco_dset in DJIROCODataset], f"DJI_ROCO_{split.name}"
+if __name__ == "__main__":
+    # create_one_record_per_roco_dset()
+
+    TensorflowRecordFactory.from_dataset(
+        UnionDataset(
+            TwitchROCODataset.TWITCH_470149568,
+            TwitchROCODataset.TWITCH_470150052,
+            TwitchROCODataset.TWITCH_470151286,
+            TwitchROCODataset.TWITCH_470152289,
+            TwitchROCODataset.TWITCH_470152730,
+        )
+    )
+
+    TensorflowRecordFactory.from_dataset(
+        UnionDataset(
+            TwitchROCODataset.TWITCH_470152838, TwitchROCODataset.TWITCH_470153081, TwitchROCODataset.TWITCH_470158483,
         )
+    )
 
+    TensorflowRecordFactory.from_dataset(
+        UnionDataset(DJIROCODataset.CentralChina, DJIROCODataset.NorthChina, DJIROCODataset.SouthChina)
+    )
+    TensorflowRecordFactory.from_dataset(DJIROCODataset.Final)
 
-if __name__ == "__main__":
-    injector = make_common_injector()
-    injector.call_with_injection(create_one_record_per_roco_dset)
-    injector.call_with_injection(create_one_roco_record)
+    TensorflowRecordFactory.from_dataset(
+        UnionDataset(
+            DJIROCOZoomedDataset.CentralChina, DJIROCOZoomedDataset.NorthChina, DJIROCOZoomedDataset.SouthChina
+        )
+    )
+    TensorflowRecordFactory.from_dataset(DJIROCOZoomedDataset.Final)
diff --git a/common/research_common/scripts/improve_roco_by_zooming.py b/common/research_common/scripts/improve_roco_by_zooming.py
new file mode 100644
index 0000000000000000000000000000000000000000..182697bd7f5d324932676ae4bbe91500615689ec
--- /dev/null
+++ b/common/research_common/scripts/improve_roco_by_zooming.py
@@ -0,0 +1,23 @@
+from tqdm import tqdm
+
+from research_common.dataset.dji.dji_roco_datasets import DJIROCODataset
+from research_common.dataset.dji.dji_roco_zoomed_datasets import DJIROCOZoomedDataset
+from research_common.dataset.improvement.zoom import Zoomer
+
+
+def improve_dji_roco_dataset_by_zooming(dset: DJIROCODataset, zoomer: Zoomer):
+    zoomed_dset: DJIROCOZoomedDataset = DJIROCOZoomedDataset[dset.name]
+    zoomed_dset.dataset_path.mkdir(parents=True, exist_ok=True)
+
+    for img in tqdm(dset.image_annotations, desc=f"Processing {dset}", unit="image", total=len(dset)):
+        for i, zoomed_image in enumerate(zoomer.zoom(img), 1):
+            zoomed_image.save_to_dir(zoomed_dset.dataset_path, f"{img.image_path.stem}_zoom_{i}")
+
+
+def improve_all_dji_datasets_by_zooming(zoomer: Zoomer):
+    for _dset in DJIROCODataset:
+        improve_dji_roco_dataset_by_zooming(zoomer=zoomer, dset=_dset)
+
+
+if __name__ == "__main__":
+    improve_all_dji_datasets_by_zooming(Zoomer(854, 480, 0.15, 0.5))
diff --git a/common/research_common/scripts/visualize_dataset.py b/common/research_common/scripts/visualize_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..f74696ed6e122331e571626490c320deecc74be0
--- /dev/null
+++ b/common/research_common/scripts/visualize_dataset.py
@@ -0,0 +1,17 @@
+from polystar.common.view.plt_results_viewer import PltResultViewer
+from research_common.dataset.dji.dji_roco_zoomed_datasets import DJIROCOZoomedDataset
+from research_common.dataset.roco_dataset import ROCODataset
+
+
+def visualize_dataset(dataset: ROCODataset, n_images: int):
+    viewer = PltResultViewer(dataset.dataset_name)
+
+    for i, image in enumerate(dataset.image_annotations, 1):
+        viewer.display_image_annotation(image)
+
+        if i == n_images:
+            return
+
+
+if __name__ == "__main__":
+    visualize_dataset(DJIROCOZoomedDataset.CentralChina, 10)
diff --git a/dataset/dji_roco_zoomed_v1/.gitignore b/dataset/dji_roco_zoomed_v1/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..94f2503bef834dfcefc90506ca5b9982c8c037a7
--- /dev/null
+++ b/dataset/dji_roco_zoomed_v1/.gitignore
@@ -0,0 +1,4 @@
+**/*.xml
+**/*.jpg
+**/colors
+**/digits
\ No newline at end of file
diff --git a/dataset/tf_records/label_map.pbtxt b/dataset/label_map.pbtxt
similarity index 100%
rename from dataset/tf_records/label_map.pbtxt
rename to dataset/label_map.pbtxt
diff --git a/poetry.lock b/poetry.lock
index cf38baba7fb9ed143921038d0933a1bdf66d022e..afa065702ca21c1c065efc5abf4cd2598fa4aa10 100644
Binary files a/poetry.lock and b/poetry.lock differ
diff --git a/pyproject.toml b/pyproject.toml
index 800ce1864fe9e6f7f28083780643831d91996d0e..4eefa8dd0759eefa660a8d77b0ee76e68ba496b7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,6 +22,7 @@ scikit-learn = "^0.22.2"
 memoized-property = "^1.0.3"
 dataclasses = "^0.6.0"
 imutils = "^0.5.3"
+more-itertools = "^8.4.0"
 
 [tool.poetry.dev-dependencies]
 tensorflow = "2.1.x"
diff --git a/resources/scripts/install-pypoetry.sh b/resources/scripts/install-pypoetry.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e83109b9f03b02f7ba058e660e3f4a9cf781464d
--- /dev/null
+++ b/resources/scripts/install-pypoetry.sh
@@ -0,0 +1,2 @@
+curl -sSL https://raw.githubusercontent.com/sdispater/poetry/master/get-poetry.py | python
+
diff --git a/robots-at-robots/research/demos/demo_pipeline.py b/robots-at-robots/research/demos/demo_pipeline.py
index 18ad42c3be844c2fba471f8ba78fc39c4678635d..ae48d4cddc7ae958e5f363cf402a574636aac2d1 100644
--- a/robots-at-robots/research/demos/demo_pipeline.py
+++ b/robots-at-robots/research/demos/demo_pipeline.py
@@ -55,14 +55,10 @@ if __name__ == "__main__":
                 try:
                     image = cv2.cvtColor(cv2.imread(str(image_path)), cv2.COLOR_BGR2RGB)
                     target = pipeline.predict_target(image)
-
-                    viewer.new(image)
-                    viewer.add_robots(pipeline.debug_info_.detected_robots, forced_color=(0.3, 0.3, 0.3))
-                    viewer.add_robots(pipeline.debug_info_.validated_robots)
-                    viewer.add_object(pipeline.debug_info_.selected_armor)
-                    viewer.display()
                 except NoTargetFoundException:
                     pass
+                finally:
+                    viewer.display_debug_info(pipeline.debug_info_)
 
                 if i == 5:
                     break