diff --git a/.gitignore b/.gitignore
index 436330e149a06dc985deda5a362185c2f2db9758..c4a537b5524ec804c80173efafcb0a4ec49ff5e9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,7 +5,6 @@
 /pipelines
 
 # TF models
-/models
 do_not_commit.py
 
 # Byte-compiled / optimized / DLL files
diff --git a/src/polystar/utils/path.py b/src/polystar/utils/path.py
index 72e8a997d5d2a10d07dd94ddefa7949be1d8b1c5..bdae2964a3366486511885820839d009b0251deb 100644
--- a/src/polystar/utils/path.py
+++ b/src/polystar/utils/path.py
@@ -1,4 +1,4 @@
-from os import remove
+from os import PathLike, remove
 from pathlib import Path
 from shutil import copy, make_archive, move
 from typing import Iterable
@@ -24,3 +24,9 @@ def copy_file(source: Path, destination_directory: Path) -> Path:
 
 def archive_directory(directory:Path):
     make_archive(str(directory), "zip", str(directory))
+
+
+def make_path(p: PathLike) -> Path:
+    path = Path(p)
+    path.mkdir(exist_ok=True, parents=True)
+    return path
diff --git a/src/research/roco_detection/__init__.py b/src/research/roco_detection/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/research/roco_detection/training/__init__.py b/src/research/roco_detection/training/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/research/roco_detection/training/tf1/__init__.py b/src/research/roco_detection/training/tf1/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/research/roco_detection/training/tf1/model_config.py b/src/research/roco_detection/training/tf1/model_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb4c7566d74b9a9d643181f60f41fdf3a8022cc1
--- /dev/null
+++ b/src/research/roco_detection/training/tf1/model_config.py
@@ -0,0 +1,83 @@
+from datetime import datetime
+from pathlib import Path
+from typing import List
+
+from google.protobuf.text_format import Merge, MessageToString
+from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig
+from object_detection.protos.preprocessor_pb2 import PreprocessingStep
+
+from polystar.constants import LABEL_MAP_PATH, PROJECT_DIR
+from research.roco_detection.training.tf1.records import Records
+from research.roco_detection.training.tf1.trainable_model import TrainableModel
+
+CONFIGS_DIR = PROJECT_DIR / "models/research/object_detection/samples/configs"
+
+
+class ModelConfig:
+    def __init__(self, pretrained_dir: Path, config_name: str):
+        self.pretrained_dir = pretrained_dir
+        self.config_name = config_name
+
+    def configure(self, record: Records, data_augm: bool, height: int, width: int, n_classes: int) -> TrainableModel:
+        config = self.read_config()
+
+        self._update_config(config, record, data_augm, height, width, n_classes)
+
+        config_path = self.pretrained_dir / "pipeline.config"
+
+        config_path.write_text(MessageToString(config))
+
+        size = f"{width}x{height}__" if width is not None else ""
+        full_name = (
+            f'{datetime.now().strftime("%y%m%d_%H%M%S")}__'
+            f"{self.pretrained_dir.stem[:-16]}__"
+            f"{size}"
+            f'{"AUGM__" * data_augm}'
+            f"{record.train.stem}"
+        )
+
+        return TrainableModel(config_path, full_name)
+
+    def _update_config(self, config, record: Records, data_augm: bool, height: int, width: int, n_classes: int):
+        model_config = getattr(config.model, config.model.WhichOneof("model"))
+        model_config.num_classes = n_classes
+        _configure_input_shape(model_config, width, height)
+        config.train_config.fine_tune_checkpoint = str(self.pretrained_dir / "model.ckpt")
+        config.eval_config.max_evals = 0
+        _configure_reader(config.train_input_reader, record.train)
+        _configure_reader(config.eval_input_reader[0], record.val)
+        if data_augm:
+            _add_augmentations(
+                config,
+                [
+                    # b"random_pixel_value_scale {}",
+                    b"random_adjust_brightness {}",
+                    b"random_adjust_contrast {}",
+                    b"random_adjust_hue {}",
+                    b"random_adjust_saturation {}",
+                    b"random_jitter_boxes {}",
+                ],
+            )
+
+    def read_config(self):
+        config = TrainEvalPipelineConfig()
+        Merge((CONFIGS_DIR / self.config_name).read_text(), config)
+        return config
+
+
+def _configure_reader(reader, record_path: Path):
+    reader.label_map_path = str(LABEL_MAP_PATH)
+    reader.tf_record_input_reader.input_path[0] = str(record_path)
+
+
+def _configure_input_shape(model_config, width: int, height: int):
+    if width is None:
+        return
+    shape_resizer = model_config.image_resizer.fixed_shape_resizer
+    shape_resizer.width = width
+    shape_resizer.height = height
+
+
+def _add_augmentations(config, augmentations: List[bytes]):
+    for augmentation in augmentations:
+        config.train_config.data_augmentation_options.append(Merge(augmentation, PreprocessingStep()))
diff --git a/src/research/roco_detection/training/tf1/pretrained_models.py b/src/research/roco_detection/training/tf1/pretrained_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e21b33b73db1c621a1eec05eefca15a66daa123
--- /dev/null
+++ b/src/research/roco_detection/training/tf1/pretrained_models.py
@@ -0,0 +1,48 @@
+import logging
+import tarfile
+from contextlib import closing
+from enum import Enum
+from urllib.request import urlretrieve
+
+from polystar.utils.path import make_path
+from research.roco_detection.training.tf1.model_config import ModelConfig
+from research.roco_detection.training.tf1.records import Records
+from research.roco_detection.training.tf1.trainable_model import EXPERIMENTS_DIR, TrainableModel
+
+logger = logging.getLogger(__name__)
+
+PRETRAINED_MODELS_DIR = make_path(EXPERIMENTS_DIR / "pretrained")
+
+
+class PretrainedModels(Enum):
+    SSD_MOBILENET_V2 = ("ssd_mobilenet_v2_coco_2018_03_29", "ssd_mobilenet_v2_coco.config")
+    FASTER_RCNN_INCEPTION_V2 = ("faster_rcnn_inception_v2_coco_2018_01_28", "faster_rcnn_inception_v2_pets.config")
+    RFCN_RESENET101 = ("rfcn_resnet101_coco_2018_01_28", "rfcn_resnet101_pets.config")
+
+    def __init__(self, model_name: str, config_name: str):
+        self.config_name = config_name
+        self.model_name = model_name
+        self.pretrained_dir = PRETRAINED_MODELS_DIR / model_name
+        self.config = ModelConfig(self.pretrained_dir, self.config_name)
+
+    def setup(
+        self, record: Records, data_augm: bool = False, height: int = None, width: int = None, n_classes: int = 5
+    ) -> TrainableModel:
+        self._download()
+        return self.config.configure(record, data_augm=data_augm, height=height, width=width, n_classes=n_classes)
+
+    def _download(self):
+        if self.pretrained_dir.exists():
+            logger.info(f"model {self.model_name} already downloaded")
+            return
+
+        zip_file = f"{self.pretrained_dir}.tar.gz"
+
+        # fetch
+        urlretrieve(
+            f"http://download.tensorflow.org/models/object_detection/{self.model_name}.tar.gz", zip_file,
+        )
+
+        # unzip
+        with closing(tarfile.open(zip_file)) as tar:
+            tar.extractall(PRETRAINED_MODELS_DIR)
diff --git a/src/research/roco_detection/training/tf1/records.py b/src/research/roco_detection/training/tf1/records.py
new file mode 100644
index 0000000000000000000000000000000000000000..31028779ad10c485ef826d8e9268367c5ac3e55d
--- /dev/null
+++ b/src/research/roco_detection/training/tf1/records.py
@@ -0,0 +1,12 @@
+from enum import Enum
+
+from research.constants import TENSORFLOW_RECORDS_DIR
+
+
+class Records(Enum):
+    TWITCH = "Twitch2_Train_T470149066_T470150052_T470152289_T470153081_T470158483_1775_imgs"
+    DJI_TWITCH = "Twitch2_Dji_Train_T470149066_T470150052_T470152289_T470153081_T470158483_FINAL_CENTRAL_CHINA_NORTH_CHINA_SOUTH_CHINA_12143_imgs"
+
+    def __init__(self, train_file: str):
+        self.train = (TENSORFLOW_RECORDS_DIR / train_file).with_suffix(".record")
+        self.val = TENSORFLOW_RECORDS_DIR / "Twitch2_Val_T470152932_T470149568_477_imgs.record"
diff --git a/src/research/roco_detection/training/tf1/train.py b/src/research/roco_detection/training/tf1/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..71ee2e3b3772f3704ffbd1998fff0a8febee6630
--- /dev/null
+++ b/src/research/roco_detection/training/tf1/train.py
@@ -0,0 +1,8 @@
+import logging
+
+from research.roco_detection.training.tf1.pretrained_models import PretrainedModels
+from research.roco_detection.training.tf1.records import Records
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel(logging.INFO)
+    PretrainedModels.SSD_MOBILENET_V2.setup(Records.TWITCH).train_and_export(nb_steps=6)
diff --git a/src/research/roco_detection/training/tf1/trainable_model.py b/src/research/roco_detection/training/tf1/trainable_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d3a32aeca2a5b9ac161e4524b2a8a6156a6c513
--- /dev/null
+++ b/src/research/roco_detection/training/tf1/trainable_model.py
@@ -0,0 +1,70 @@
+from pathlib import Path
+from typing import ClassVar
+
+from google.protobuf.text_format import Merge
+from object_detection.exporter import export_inference_graph
+from object_detection.model_lib import create_estimator_and_inputs
+from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig
+from tensorflow_estimator.python.estimator.exporter import FinalExporter
+from tensorflow_estimator.python.estimator.run_config import RunConfig
+from tensorflow_estimator.python.estimator.training import EvalSpec, TrainSpec, train_and_evaluate
+
+from polystar.utils.path import make_path
+from research.constants import EVALUATION_DIR, PIPELINES_DIR
+
+EXPERIMENTS_DIR = make_path(EVALUATION_DIR / "roco-detection" / "tf1")
+
+
+class TrainableModel:
+    SAVE_CHECKPOINTS_STEPS: ClassVar[int] = 5  # 1_000
+    EVAL_EVERY_SECS: ClassVar[int] = 30 * 60
+
+    def __init__(self, config_path: Path, name: str):
+        self.config_path = config_path
+        self.name = name
+        self.training_path = EXPERIMENTS_DIR / self.name
+
+    def train_and_export(self, nb_steps: int):
+        self.launch_training(nb_steps=nb_steps)
+        self.export()
+
+    def launch_training(self, nb_steps: int):
+        run_config = RunConfig(
+            model_dir=str(self.training_path), save_checkpoints_steps=self.SAVE_CHECKPOINTS_STEPS, keep_checkpoint_max=2
+        )
+        train_and_eval_dict = create_estimator_and_inputs(
+            run_config=run_config, pipeline_config_path=str(self.config_path)
+        )
+        estimator = train_and_eval_dict["estimator"]
+        train_input_fn = train_and_eval_dict["train_input_fn"]
+        eval_input_fns = train_and_eval_dict["eval_input_fns"]
+        predict_input_fn = train_and_eval_dict["predict_input_fn"]
+
+        train_spec = TrainSpec(train_input_fn, nb_steps)
+        eval_spec = EvalSpec(
+            name="0",
+            input_fn=eval_input_fns[0],
+            steps=None,
+            exporters=FinalExporter(name="Servo", serving_input_receiver_fn=predict_input_fn),
+            throttle_secs=self.EVAL_EVERY_SECS,
+        )
+
+        train_and_evaluate(estimator, train_spec, eval_spec)
+
+        return self
+
+    def export(self):
+        pipeline_config = TrainEvalPipelineConfig()
+        Merge(self.config_path.read_text(), pipeline_config)
+        last_ckpt = max(self.training_path.glob("model.ckpt-*.meta"), key=_get_ckpt_number_from_file).with_suffix("")
+        n_steps = last_ckpt.suffix.split("-")[-1]
+        export_inference_graph(
+            input_type="image_tensor",
+            pipeline_config=pipeline_config,
+            trained_checkpoint_prefix=str(last_ckpt),
+            output_directory=str(PIPELINES_DIR / "roco-detection" / f"{self.name}__{n_steps}_steps"),
+        )
+
+
+def _get_ckpt_number_from_file(ckpt_file: Path):
+    return int(ckpt_file.stem[len("model.ckpt-") :])