コード例 #1
0
    def test_benchmark_results_estimates_no_error_for_noiseless_trajectory(
            self):
        # Create a new computed trajectory with no noise, but a fixed offset from the real trajectory
        # That is, the relative motions are the same, but the start point is different
        for trial_result in self.trial_results:
            comp_traj, _ = create_noise(
                trajectory=trial_result.ground_truth_trajectory,
                random_state=self.random,
                time_offset=0,
                time_noise=0,
                loc_noise=0,
                rot_noise=0)
            trial_result.computed_trajectory = comp_traj

        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(self.trial_results)

        # Check all the errors are zero
        values = collect_values(result, 0)
        self.assertNPClose(np.zeros(values.shape), values)
        values = collect_values(result, 1)
        self.assertNPClose(np.zeros(values.shape), values)
        values = collect_values(result, 2)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
        values = collect_values(result, 3)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
コード例 #2
0
 def test_benchmark_results_estimates_reasonable_trajectory_error_per_frame(
         self):
     benchmark = feb.FrameErrorsBenchmark()
     result = benchmark.benchmark_results(self.trial_results)
     # The noise added to each location is <10, converting to motions makes that <20, in each axis
     # But then, changes to the orientation tweak the relative location, and hence the motion
     self.assertLessEqual(np.max(collect_values(result, 0)), 100)
     self.assertLessEqual(np.max(collect_values(result, 2)), np.pi / 32)
コード例 #3
0
 def test_benchmark_results_returns_a_benchmark_result(self):
     benchmark = feb.FrameErrorsBenchmark()
     result = benchmark.benchmark_results(self.trial_results)
     self.assertIsInstance(result, arvet.core.benchmark.BenchmarkResult)
     self.assertNotIsInstance(result, arvet.core.benchmark.FailedBenchmark)
     self.assertEqual(benchmark.identifier, result.benchmark)
     self.assertEqual(
         set(trial_result.identifier
             for trial_result in self.trial_results),
         set(result.trial_results))
コード例 #4
0
    def test_benchmark_results_one_observation_per_frame(self):
        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(self.trial_results)

        if isinstance(result, arvet.core.benchmark.FailedBenchmark):
            print(result.reason)

        self.assertEqual(
            len(self.trial_results[0].ground_truth_trajectory) - 1,
            len(result.frame_errors))
        for error_measurement in result.frame_errors.values():
            self.assertEqual(18, len(error_measurement))
コード例 #5
0
    def test_benchmark_results_fails_for_trials_from_different_systems(self):
        trajectory = create_random_trajectory(self.random)
        mixed_trial_results = self.trial_results + [
            MockTrialResult(gt_trajectory=trajectory,
                            comp_trajectory=trajectory,
                            system_id=bson.ObjectId())
        ]

        # Perform the benchmark
        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(mixed_trial_results)
        self.assertIsInstance(result, arvet.core.benchmark.FailedBenchmark)
コード例 #6
0
    def test_benchmark_results_estimates_no_noise_for_identical_trajectory(
            self):
        # Make all the trial results have exactly the same computed trajectories
        for trial_result in self.trial_results[1:]:
            trial_result.computed_trajectory = copy.deepcopy(
                self.trial_results[0].computed_trajectory)

        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(self.trial_results)

        # Check all the errors are zero
        values = collect_values(result, 4)
        self.assertNPClose(np.zeros(values.shape), values)
        values = collect_values(result, 5)
        self.assertNPClose(np.zeros(values.shape), values)
        values = collect_values(result, 6)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
        values = collect_values(result, 7)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
コード例 #7
0
    def test_benchmark_results_estimates_no_error_for_identical_trajectory(
            self):
        # Copy the ground truth exactly
        for trial_result in self.trial_results:
            trial_result.computed_trajectory = copy.deepcopy(
                trial_result.ground_truth_trajectory)

        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(self.trial_results)

        if isinstance(result, arvet.core.benchmark.FailedBenchmark):
            print(result.reason)

        # Check all the errors are zero
        values = collect_values(result, 0)
        self.assertNPClose(np.zeros(values.shape), values)
        values = collect_values(result, 1)
        self.assertNPClose(np.zeros(values.shape), values)
        # We need more tolerance for the rotational error, because of the way the arccos
        # results in the smallest possible change producing a value around 2e-8
        values = collect_values(result, 2)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
        values = collect_values(result, 3)
        self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
コード例 #8
0
    def test_benchmark_results_fails_for_no_observations(self):
        # Adjust the computed timestamps so none of them match
        for trial_result in self.trial_results:
            trial_result.computed_trajectory = {
                time + 10000: pose
                for time, pose in trial_result.computed_trajectory.items()
            }
            trial_result.tracking_states = {
                time + 10000: state
                for time, state in trial_result.tracking_states.items()
            }
            trial_result.num_features = {
                time + 10000: features
                for time, features in trial_result.num_features.items()
            }
            trial_result.num_matches = {
                time + 10000: features
                for time, features in trial_result.num_matches.items()
            }

        # Perform the benchmark
        benchmark = feb.FrameErrorsBenchmark()
        result = benchmark.benchmark_results(self.trial_results)
        self.assertIsInstance(result, arvet.core.benchmark.FailedBenchmark)
    def do_imports(self,
                   task_manager: arvet.batch_analysis.task_manager.TaskManager,
                   path_manager: arvet.config.path_manager.PathManager,
                   db_client: arvet.database.client.DatabaseClient):
        """
        Import image sources for evaluation in this experiment
        :param task_manager: The task manager, for creating import tasks
        :param path_manager: The path manager, for resolving file system paths
        :param db_client: The database client, for saving declared objects too small to need a task
        :return:
        """

        # --------- SIMULATORS -----------
        # Add simulators explicitly, they have different metadata, so we can't just search
        for exe, world_name, environment_type, light_level, time_of_day in [
            ('simulators/AIUE_V01_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_001', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_002/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_002', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_003/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_003', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            ('simulators/AIUE_V01_004/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
             'AIUE_V01_004', imeta.EnvironmentType.INDOOR,
             imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY),
            (
                'simulators/AIUE_V01_005/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                'AIUE_V01_005', imeta.EnvironmentType.INDOOR,
                imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY
                # ), (
                #         'simulators/AIUE_V02_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest',
                #         'AIUE_V02_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT,
                #         imeta.TimeOfDay.DAY
            )
        ]:
            self.import_simulator(executable_path=exe,
                                  world_name=world_name,
                                  environment_type=environment_type,
                                  light_level=light_level,
                                  time_of_day=time_of_day,
                                  db_client=db_client)

        # --------- REAL WORLD DATASETS -----------

        # Import EuRoC datasets with lists of trajectory start points for each simulator
        for name, path, mappings in [
            ('EuRoC MH_01_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_01_easy'),
             euroc_origins.get_MH_01_easy()),
            ('EuRoC MH_02_easy', os.path.join('datasets', 'EuRoC',
                                              'MH_02_easy'),
             euroc_origins.get_MH_02_easy()),
            ('EuRoC MH_03_medium',
             os.path.join('datasets', 'EuRoC',
                          'MH_03_medium'), euroc_origins.get_MH_03_medium()),
            ('EuRoC MH_04_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_04_difficult'),
             euroc_origins.get_MH_04_difficult()),
            ('EuRoC MH_05_difficult',
             os.path.join('datasets', 'EuRoC', 'MH_05_difficult'),
             euroc_origins.get_MH_05_difficult()),
            ('EuRoC V1_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V1_01_easy'),
             euroc_origins.get_V1_01_easy()),
            ('EuRoC V1_02_medium',
             os.path.join('datasets', 'EuRoC',
                          'V1_02_medium'), euroc_origins.get_V1_02_medium()),
            ('EuRoC V1_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V1_03_difficult'),
             euroc_origins.get_V1_03_difficult()),
            ('EuRoC V2_01_easy', os.path.join('datasets', 'EuRoC',
                                              'V2_01_easy'),
             euroc_origins.get_V2_01_easy()),
            ('EuRoC V2_02_medium',
             os.path.join('datasets', 'EuRoC',
                          'V2_02_medium'), euroc_origins.get_V2_02_medium()),
            ('EuRoC V2_03_difficult',
             os.path.join('datasets', 'EuRoC', 'V2_03_difficult'),
             euroc_origins.get_V2_03_difficult())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.euroc.euroc_loader',
                path=path,
                name=name,
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager)

        # Import TUM datasets with lists of trajectory start points for each simulator
        for folder, mappings in [
            ('rgbd_dataset_freiburg1_360', tum_origins.get_frieburg1_360()),
            ('rgbd_dataset_frieburg1_rpy', tum_origins.get_frieburg1_rpy()),
            ('rgbd_dataset_frieburg1_xyz', tum_origins.get_frieburg1_xyz()),
            ('rgbd_dataset_frieburg2_desk', tum_origins.get_frieburg2_desk()),
            ('rgbd_dataset_frieburg2_rpy', tum_origins.get_frieburg2_rpy()),
            ('rgbd_dataset_frieburg2_xyz', tum_origins.get_frieburg2_xyz()),
            ('rgbd_dataset_frieburg3_structure_texture_far',
             tum_origins.get_frieburg3_structure_texture_far()),
            ('rgbd_dataset_frieburg3_walking_xyz',
             tum_origins.get_frieburg3_walking_xyz())
        ]:
            self.import_dataset(
                module_name='arvet_slam.dataset.tum.tum_loader',
                path=os.path.join('datasets', 'TUM', folder),
                name="TUM {0}".format(folder),
                mappings=mappings,
                task_manager=task_manager,
                path_manager=path_manager)

        # Import KITTI datasets
        for sequence_num in range(11):
            self.import_dataset(
                module_name='arvet_slam.dataset.kitti.kitti_loader',
                name='KITTI trajectory {}'.format(sequence_num),
                path=os.path.join('datasets', 'KITTI', 'dataset'),
                additional_args={'sequence_number': sequence_num},
                mappings=kitti_origins.get_mapping(sequence_num),
                task_manager=task_manager,
                path_manager=path_manager)

        # --------- SYSTEMS -----------
        # LibVisO2
        self.import_system(name='LibVisO',
                           system=libviso2.LibVisOSystem(),
                           db_client=db_client)

        # ORBSLAM2 - Create 2 variants, stereo and mono
        # These datasets don't have
        vocab_path = os.path.join('systems', 'ORBSLAM2', 'ORBvoc.txt')
        for sensor_mode in {
                orbslam2.SensorMode.STEREO, orbslam2.SensorMode.MONOCULAR,
                orbslam2.SensorMode.RGBD
        }:
            self.import_system(
                name='ORBSLAM2 {mode}'.format(mode=sensor_mode.name.lower()),
                system=orbslam2.ORBSLAM2(
                    vocabulary_file=vocab_path,
                    mode=sensor_mode,
                    settings={'ORBextractor': {
                        'nFeatures': 1500
                    }}),
                db_client=db_client)

        # --------- BENCHMARKS -----------
        # Add benchmarks to calculate the errors on a per-estimate and per-frame basis
        self.import_benchmark(
            name='Estimate Errors',
            benchmark=estimate_errors_benchmark.EstimateErrorsBenchmark(),
            db_client=db_client)
        self.import_benchmark(
            name='Frame Errors',
            benchmark=frame_errors_benchmark.FrameErrorsBenchmark(),
            db_client=db_client)

        # --------- TRAJECTORY GROUPS -----------
        # Update the trajectory groups
        # We call this at the end so that any new ones created by import datasets will be updated and saved.
        for trajectory_group in self.trajectory_groups.values():
            self.update_trajectory_group(trajectory_group, task_manager,
                                         db_client)
コード例 #10
0
 def make_instance(self, *args, **kwargs):
     return feb.FrameErrorsBenchmark(*args, **kwargs)