def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- EuRoC DATASETS -----------
        # import specific EuRoC datasets that we have reference results for
        for name, path in [
            ('EuRoC MH_01_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_01_easy')),
            ('EuRoC MH_04_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_04_difficult')),
        ]:
            self.import_dataset(
                name=name,
                module_name='arvet_slam.dataset.euroc.euroc_loader',
                path=path,
                task_manager=task_manager,
                path_manager=path_manager)

        # --------- SYSTEMS -----------
        # ORBSLAM2 - Create 2 variants, with different procesing modes
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        self.import_system(name='ORBSLAM2 monocular',
                           db_client=db_client,
                           system=orbslam2.ORBSLAM2(
                               vocabulary_file=vocab_path,
                               mode=orbslam2.SensorMode.MONOCULAR,
                               settings={
                                   'ORBextractor': {
                                       'nFeatures': 1000,
                                       'scaleFactor': 1.2,
                                       'nLevels': 8,
                                       'iniThFAST': 20,
                                       'minThFAST': 7
                                   }
                               }))
        self.import_system(name='ORBSLAM2 stereo',
                           db_client=db_client,
                           system=orbslam2.ORBSLAM2(
                               vocabulary_file=vocab_path,
                               mode=orbslam2.SensorMode.STEREO,
                               settings={
                                   'ThDepth': 35,
                                   'ORBextractor': {
                                       'nFeatures': 1200,
                                       'scaleFactor': 1.2,
                                       'nLevels': 8,
                                       'iniThFAST': 20,
                                       'minThFAST': 7
                                   }
                               }))
    def do_imports(self, task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- KITTI DATASETS -----------
        # import specific kitti datasets that we have reference results for
        for sequence_num in {0, 3}:
            self.import_dataset(
                name='KITTI {0:02}'.format(sequence_num),
                module_name='arvet_slam.dataset.kitti.kitti_loader',
                path=os.path.join('datasets', 'KITTI', 'dataset'),
                additional_args={'sequence_number': sequence_num},
                task_manager=task_manager,
                path_manager=path_manager
            )

        # --------- SYSTEMS -----------
        # ORBSLAM2 - Create 2 variants, with different procesing modes
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {orbslam2.SensorMode.STEREO, orbslam2.SensorMode.MONOCULAR}:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                db_client=db_client,
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={
                        'ThDepth': 35,
                        'ORBextractor': {
                            'nFeatures': 2000,
                            'scaleFactor': 1.2,
                            'nLevels': 8,
                            'iniThFAST': 20,
                            'minThFAST': 7
                        }
                    }
                )
            )
    def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- TUM DATASETS -----------
        # Import TUM datasets without using the manager, it is unnecessary
        for folder in [
                'rgbd_dataset_freiburg1_xyz', 'rgbd_dataset_freiburg1_desk'
        ]:
            self.import_dataset(
                name="TUM {0}".format(folder),
                module_name='arvet_slam.dataset.tum.tum_loader',
                path=os.path.join('datasets', 'TUM', folder),
                task_manager=task_manager,
                path_manager=path_manager)

        # --------- SYSTEMS -----------
        # ORBSLAM2 - Create 2 variants, with different procesing modes
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {
                orbslam2.SensorMode.RGBD, orbslam2.SensorMode.MONOCULAR
        }:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                db_client=db_client,
                system=orbslam2.ORBSLAM2(vocabulary_file=vocab_path,
                                         mode=sensor_mode,
                                         settings={
                                             'ThDepth': 35,
                                             'ORBextractor': {
                                                 'nFeatures': 1000,
                                                 'scaleFactor': 1.2,
                                                 'nLevels': 8,
                                                 'iniThFAST': 20,
                                                 'minThFAST': 7
                                             }
                                         }))
예제 #4
0
    def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- REAL WORLD DATASETS -----------
        # Import KITTI datasets
        for sequence_num in range(11):
            self.import_dataset(
                name='KITTI {0:02}'.format(sequence_num),
                module_name='arvet_slam.dataset.kitti.kitti_loader',
                path=os.path.join('datasets', 'KITTI', 'dataset'),
                additional_args={'sequence_number': sequence_num},
                task_manager=task_manager,
                path_manager=path_manager)

        # Import EuRoC datasets
        for name, path in [
            ('EuRoC MH_01_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_01_easy')),
            ('EuRoC MH_02_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_02_easy')),
            ('EuRoC MH_02_medium',
             os.path.join('datasets', 'EuRoC', 'MH_03_medium')),
            ('EuRoC MH_04_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_04_difficult')),
            ('EuRoC MH_05_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_05_difficult')),
            ('EuRoC V1_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V1_01_easy')),
            ('EuRoC V1_02_medium',
             os.path.join('datasets', 'EuRoC', 'V1_02_medium')),
            ('EuRoC V1_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V1_03_difficult')),
            ('EuRoC V2_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V2_01_easy')),
            ('EuRoC V2_02_medium',
             os.path.join('datasets', 'EuRoC', 'V2_02_medium')),
            ('EuRoC V2_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V2_03_difficult'))
        ]:
            self.import_dataset(
                name=name,
                module_name='arvet_slam.dataset.euroc.euroc_loader',
                path=path,
                task_manager=task_manager,
                path_manager=path_manager)

        # Import TUM datasets without using the manager, it is unnecessary
        for folder in arvet_slam.dataset.tum.tum_manager.dataset_names:
            self.import_dataset(
                name="TUM {0}".format(folder),
                module_name='arvet_slam.dataset.tum.tum_loader',
                path=os.path.join('datasets', 'TUM', folder),
                task_manager=task_manager,
                path_manager=path_manager)

        # --------- SYSTEMS -----------

        self.import_system(name='LibVisO',
                           system=libviso2.LibVisOSystem(),
                           db_client=db_client)

        # ORBSLAM2 - Create 3 variants, with different procesing modes
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {
                orbslam2.SensorMode.STEREO, orbslam2.SensorMode.RGBD,
                orbslam2.SensorMode.MONOCULAR
        }:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                db_client=db_client,
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={'ORBextractor': {
                        'nFeatures': 1500
                    }}))
    def do_imports(self, task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for finding dataset and simulator paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- SIMULATORS -----------
        # Add simulators explicitly, they have different metadata, so we can't just search
        for exe, world_name, environment_type, light_level, time_of_day in [
            (
                    'simulators/CorridorWorld/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'CorridorWorld', imeta.EnvironmentType.OUTDOOR_LANDSCAPE, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
            )
        ]:
            if world_name not in self._simulators:
                simulator_id = dh.add_unique(db_client.image_source_collection, uecv_sim.UnrealCVSimulator(
                    executable_path=exe,
                    world_name=world_name,
                    environment_type=environment_type,
                    light_level=light_level,
                    time_of_day=time_of_day
                ))
                self._simulators[world_name] = simulator_id
                self._set_property('simulators.{0}'.format(world_name), simulator_id)

        # --------- TRAJECTORY GROUPS -----------

        for name, path in [
            ('forwards', get_forwards_trajectory()),
            ('upwards', get_upward_trajectory()),
            ('left', get_left_trajectory()),
            ('line roll', get_line_roll_trajectory()),
            ('line pitch', get_line_pitch_trajectory()),
            ('line yaw', get_line_yaw_trajectory()),
        ]:
            if name not in self._trajectory_groups:
                # First, create the trajectory follow controller with the desired trajectory
                controller = follow_cont.TrajectoryFollowController(
                    trajectory=path,
                    trajectory_source='custom {0}'.format(name),
                    sequence_type=sequence_type.ImageSequenceType.SEQUENTIAL)
                controller_id = dh.add_unique(db_client.image_source_collection, controller)

                # Then create a trajectory group for it
                self._trajectory_groups[name] = TrajectoryGroup(
                    name=name, controller_id=controller_id,
                    simulators={'CorridorWorld': self._simulators['CorridorWorld']})
                self._set_property('trajectory_groups.{0}'.format(name), self._trajectory_groups[name].serialize())
        for group in self._trajectory_groups.values():
            if group.do_imports(task_manager):
                self._set_property('trajectory_groups.{0}'.format(group.name), group.serialize())

        # --------- SYSTEMS -----------
        if self._libviso_system is None:
            self._libviso_system = dh.add_unique(db_client.system_collection, libviso2.LibVisOSystem())
            self._set_property('libviso', self._libviso_system)

        # ORBSLAM2 - Create orbslam systems in each sensor mode
        for sensor_mode in {orbslam2.SensorMode.STEREO, orbslam2.SensorMode.RGBD, orbslam2.SensorMode.MONOCULAR}:
            name = 'ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()).replace('.', '-')
            vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
            is_valid_path = True
            try:
                path_manager.find_file(vocab_path)
            except FileNotFoundError:
                is_valid_path = False

            if name not in self._orbslam_systems and is_valid_path:
                orbslam_id = dh.add_unique(db_client.system_collection, orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={
                        'ORBextractor': {'nFeatures': 1500}
                    }
                ))
                self._orbslam_systems[name] = orbslam_id
                self._set_property('orbslam_systems.{}'.format(name), orbslam_id)
    def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """

        # --------- SIMULATORS -----------
        # Add simulators explicitly, they have different metadata, so we can't just search
        for exe, world_name, environment_type, light_level, time_of_day in [
            ('simulators/AIUE_V01_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_001', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_002/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_002', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_003/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_003', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_004/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_004', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            (
                'simulators/AIUE_V01_005/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                'AIUE_V01_005', imeta.EnvironmentType.INDOOR,
                imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY
                # ), (
                #         'simulators/AIUE_V02_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                #         'AIUE_V02_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                #         imeta.TimeOfDay.DAY
            )
        ]:
            self.import_simulator(executable_path=exe,
                                  world_name=world_name,
                                  environment_type=environment_type,
                                  light_level=light_level,
                                  time_of_day=time_of_day,
                                  db_client=db_client)

        # --------- REAL WORLD DATASETS -----------

        # Import EuRoC datasets with lists of trajectory start points for each simulator
        for name, path, mappings in [
            ('EuRoC MH_01_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_01_easy'),
             euroc_origins.get_MH_01_easy()),
            ('EuRoC MH_02_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_02_easy'),
             euroc_origins.get_MH_02_easy()),
            ('EuRoC MH_03_medium',
             os.path.join('datasets', 'EuRoC',
                          'MH_03_medium'), euroc_origins.get_MH_03_medium()),
            ('EuRoC MH_04_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_04_difficult'),
             euroc_origins.get_MH_04_difficult()),
            ('EuRoC MH_05_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_05_difficult'),
             euroc_origins.get_MH_05_difficult()),
            ('EuRoC V1_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V1_01_easy'),
             euroc_origins.get_V1_01_easy()),
            ('EuRoC V1_02_medium',
             os.path.join('datasets', 'EuRoC',
                          'V1_02_medium'), euroc_origins.get_V1_02_medium()),
            ('EuRoC V1_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V1_03_difficult'),
             euroc_origins.get_V1_03_difficult()),
            ('EuRoC V2_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V2_01_easy'),
             euroc_origins.get_V2_01_easy()),
            ('EuRoC V2_02_medium',
             os.path.join('datasets', 'EuRoC',
                          'V2_02_medium'), euroc_origins.get_V2_02_medium()),
            ('EuRoC V2_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V2_03_difficult'),
             euroc_origins.get_V2_03_difficult())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.euroc.euroc_loader',
                path=path,
                name=name,
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager)

        # Import TUM datasets with lists of trajectory start points for each simulator
        for folder, mappings in [
            ('rgbd_dataset_freiburg1_360', tum_origins.get_frieburg1_360()),
            ('rgbd_dataset_frieburg1_rpy', tum_origins.get_frieburg1_rpy()),
            ('rgbd_dataset_frieburg1_xyz', tum_origins.get_frieburg1_xyz()),
            ('rgbd_dataset_frieburg2_desk', tum_origins.get_frieburg2_desk()),
            ('rgbd_dataset_frieburg2_rpy', tum_origins.get_frieburg2_rpy()),
            ('rgbd_dataset_frieburg2_xyz', tum_origins.get_frieburg2_xyz()),
            ('rgbd_dataset_frieburg3_structure_texture_far',
             tum_origins.get_frieburg3_structure_texture_far()),
            ('rgbd_dataset_frieburg3_walking_xyz',
             tum_origins.get_frieburg3_walking_xyz())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.tum.tum_loader',
                path=os.path.join('datasets', 'TUM', folder),
                name="TUM {0}".format(folder),
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager)

        # Import KITTI datasets
        for sequence_num in range(11):
            self.import_dataset(
                module_name='arvet_slam.dataset.kitti.kitti_loader',
                name='KITTI trajectory {}'.format(sequence_num),
                path=os.path.join('datasets', 'KITTI', 'dataset'),
                additional_args={'sequence_number': sequence_num},
                mappings=kitti_origins.get_mapping(sequence_num),
                task_manager=task_manager,
                path_manager=path_manager)

        # --------- SYSTEMS -----------
        # LibVisO2
        self.import_system(name='LibVisO',
                           system=libviso2.LibVisOSystem(),
                           db_client=db_client)

        # ORBSLAM2 - Create 2 variants, stereo and mono
        # These datasets don't have
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {
                orbslam2.SensorMode.STEREO, orbslam2.SensorMode.MONOCULAR,
                orbslam2.SensorMode.RGBD
        }:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={'ORBextractor': {
                        'nFeatures': 1500
                    }}),
                db_client=db_client)

        # --------- BENCHMARKS -----------
        # Add benchmarks to calculate the errors on a per-estimate and per-frame basis
        self.import_benchmark(
            name='Estimate Errors',
            benchmark=estimate_errors_benchmark.EstimateErrorsBenchmark(),
            db_client=db_client)
        self.import_benchmark(
            name='Frame Errors',
            benchmark=frame_errors_benchmark.FrameErrorsBenchmark(),
            db_client=db_client)

        # --------- TRAJECTORY GROUPS -----------
        # Update the trajectory groups
        # We call this at the end so that any new ones created by import datasets will be updated and saved.
        for trajectory_group in self.trajectory_groups.values():
            self.update_trajectory_group(trajectory_group, task_manager,
                                         db_client)
    def do_imports(self, task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """

        # --------- SIMULATORS -----------
        # Add simulators explicitly, they have different metadata, so we can't just search
        for exe, world_name, environment_type, light_level, time_of_day in [
            (
                    'simulators/AIUE_V01_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'AIUE_V01_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
            ), (
                    'simulators/AIUE_V01_002/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'AIUE_V01_002', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
            ), (
                    'simulators/AIUE_V01_003/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'AIUE_V01_003', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
            ), (
                    'simulators/AIUE_V01_004/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'AIUE_V01_004', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
            ), (
                    'simulators/AIUE_V01_005/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    'AIUE_V01_005', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    imeta.TimeOfDay.DAY
                    # ), (
                    #         'simulators/AIUE_V02_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                    #         'AIUE_V02_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                    #         imeta.TimeOfDay.DAY
            )
        ]:
            self.import_simulator(
                executable_path=exe,
                world_name=world_name,
                environment_type=environment_type,
                light_level=light_level,
                time_of_day=time_of_day,
                db_client=db_client
            )

        # --------- REAL WORLD DATASETS -----------
        # Import EuRoC datasets with lists of trajectory start points for each simulator
        for name, path, mappings in [
            ('EuRoC MH_01_easy', os.path.join('datasets', 'EuRoC', 'MH_01_easy'), euroc_origins.get_MH_01_easy()),
            ('EuRoC MH_02_easy', os.path.join('datasets', 'EuRoC', 'MH_02_easy'), euroc_origins.get_MH_02_easy()),
            ('EuRoC MH_03_medium', os.path.join('datasets', 'EuRoC', 'MH_03_medium'),
             euroc_origins.get_MH_03_medium()),
            ('EuRoC MH_04_difficult', os.path.join('datasets', 'EuRoC', 'MH_04_difficult'),
             euroc_origins.get_MH_04_difficult()),
            ('EuRoC MH_05_difficult', os.path.join('datasets', 'EuRoC', 'MH_05_difficult'),
             euroc_origins.get_MH_05_difficult()),
            ('EuRoC V1_01_easy', os.path.join('datasets', 'EuRoC', 'V1_01_easy'), euroc_origins.get_V1_01_easy()),
            ('EuRoC V1_02_medium', os.path.join('datasets', 'EuRoC', 'V1_02_medium'),
             euroc_origins.get_V1_02_medium()),
            ('EuRoC V1_03_difficult', os.path.join('datasets', 'EuRoC', 'V1_03_difficult'),
             euroc_origins.get_V1_03_difficult()),
            ('EuRoC V2_01_easy', os.path.join('datasets', 'EuRoC', 'V2_01_easy'), euroc_origins.get_V2_01_easy()),
            ('EuRoC V2_02_medium', os.path.join('datasets', 'EuRoC', 'V2_02_medium'),
             euroc_origins.get_V2_02_medium()),
            ('EuRoC V2_03_difficult', os.path.join('datasets', 'EuRoC', 'V2_03_difficult'),
             euroc_origins.get_V2_03_difficult())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.euroc.euroc_loader',
                path=path,
                name=name,
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager
            )

        # Import TUM datasets with lists of trajectory start points for each simulator
        for folder, mappings in [
            ('rgbd_dataset_freiburg1_360', tum_origins.get_frieburg1_360()),
            ('rgbd_dataset_freiburg1_rpy', tum_origins.get_frieburg1_rpy()),
            ('rgbd_dataset_freiburg1_xyz', tum_origins.get_frieburg1_xyz()),
            ('rgbd_dataset_freiburg2_desk', tum_origins.get_frieburg2_desk()),
            ('rgbd_dataset_freiburg2_rpy', tum_origins.get_frieburg2_rpy()),
            ('rgbd_dataset_freiburg2_xyz', tum_origins.get_frieburg2_xyz()),
            ('rgbd_dataset_freiburg3_structure_texture_far', tum_origins.get_frieburg3_structure_texture_far()),
            ('rgbd_dataset_freiburg3_walking_xyz', tum_origins.get_frieburg3_walking_xyz())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.tum.tum_loader',
                path=os.path.join('datasets', 'TUM', folder),
                name="TUM {0}".format(folder),
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager
            )

        # --------- SYSTEMS -----------
        # LibVisO2
        self.import_system(
            name='LibVisO',
            system=libviso2.LibVisOSystem(),
            db_client=db_client
        )

        # ORBSLAM2 - Create 3 variants; stereo, mono, and rgbd
        # These datasets don't have
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {orbslam2.SensorMode.STEREO, orbslam2.SensorMode.MONOCULAR, orbslam2.SensorMode.RGBD}:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={'ORBextractor': {'nFeatures': 1500}}
                ),
                db_client=db_client
            )

        # Do the superclass imports
        super().do_imports(task_manager, path_manager, db_client)
    def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """
        # --------- SIMULATORS -----------
        # Add simulators explicitly, they have different metadata, so we can't just search
        for exe, world_name, environment_type, light_level, time_of_day in [
                # (
                #         'simulators/AIUE_V01_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                #         'AIUE_V01_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                #         imeta.TimeOfDay.DAY
                # )
        ]:
            self.import_simulator(executable_path=exe,
                                  world_name=world_name,
                                  environment_type=environment_type,
                                  light_level=light_level,
                                  time_of_day=time_of_day,
                                  db_client=db_client)

        # --------- REAL WORLD DATASETS -----------

        # Import KITTI datasets
        for sequence_num in range(11):
            self.import_dataset(
                module_name='arvet_slam.dataset.kitti.kitti_loader',
                name='KITTI trajectory {}'.format(sequence_num),
                path=os.path.join('datasets', 'KITTI', 'dataset'),
                additional_args={'sequence_number': sequence_num},
                mappings=kitti_origins.get_mapping(sequence_num),
                task_manager=task_manager,
                path_manager=path_manager,
                db_client=db_client,
            )

        # --------- SYSTEMS -----------
        # LibVisO2
        self.import_system(name='LibVisO',
                           system=libviso2.LibVisOSystem(),
                           db_client=db_client)

        # ORBSLAM2 - Create 3 variants, with different procesing modes
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {
                orbslam2.SensorMode.STEREO, orbslam2.SensorMode.RGBD,
                orbslam2.SensorMode.MONOCULAR
        }:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={'ORBextractor': {
                        'nFeatures': 1500
                    }}),
                db_client=db_client)

        # --------- BENCHMARKS -----------
        # Create and store the benchmarks for camera trajectories
        # Just using the default settings for now

        self.import_benchmark(name='Relative Pose Error',
                              db_client=db_client,
                              benchmark=rpe.BenchmarkRPE(max_pairs=10000,
                                                         fixed_delta=False,
                                                         delta=1.0,
                                                         delta_unit='s',
                                                         offset=0,
                                                         scale_=1))
        self.import_benchmark(name='Absolute Trajectory Error',
                              db_client=db_client,
                              benchmark=ate.BenchmarkATE(offset=0,
                                                         max_difference=0.2,
                                                         scale=1))
        self.import_benchmark(
            name='Trajectory Drift',
            db_client=db_client,
            benchmark=traj_drift.BenchmarkTrajectoryDrift(
                segment_lengths=[100, 200, 300, 400, 500, 600, 700, 800],
                step_size=10))
        self.import_benchmark(name='Tracking Statistics',
                              db_client=db_client,
                              benchmark=tracking_benchmark.TrackingBenchmark(
                                  initializing_is_lost=True))