def test_benchmark_results_returns_a_benchmark_result(self): benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result) self.assertIsInstance(result, core.benchmark.BenchmarkResult) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark) self.assertEqual(benchmark.identifier, result.benchmark) self.assertEqual(self.trial_result.identifier, result.trial_result)
def test_benchmark_results_fails_for_no_matching_timestaps(self): # Create a new computed trajectory with no matching timestamps self.trial_result.computed_trajectory = { time + 10000: pose for time, pose in self.trial_result.ground_truth_trajectory.items() } # Perform the benchmark benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result) self.assertIsInstance(result, core.benchmark.FailedBenchmark)
def main(): random = np.random.RandomState( 1311) # Use a random stream to make the results consistent trajectory = create_random_trajectory(random) noisy_trajectory, noise = create_noise(trajectory, random) trial_result = MockTrialResult(gt_trajectory=trajectory, comp_trajectory=noisy_trajectory) benchmark = rpe.BenchmarkRPE() profile.runctx('benchmark.benchmark_results(trial_result)', globals=globals(), locals=locals(), sort='ncalls')
def test_benchmark_results_estimates_no_error_for_identical_trajectory( self): # Copy the ground truth exactly self.trial_result.computed_trajectory = copy.deepcopy( self.trial_result.ground_truth_trajectory) benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result) if isinstance(result, core.benchmark.FailedBenchmark): print(result.reason) for time, error in result.translational_error.items(): self.assertAlmostEqual(0, error) for time, error in result.rotational_error.items(): self.assertAlmostEqual(0, error)
def test_offset_shifts_query_trajectory_time(self): # Create a new noise trajectory with a large time offset comp_traj, noise = create_noise( self.trial_result.ground_truth_trajectory, self.random, time_offset=1000) self.trial_result.computed_trajectory = comp_traj # This should fail due to the offset benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result) self.assertIsInstance(result, core.benchmark.FailedBenchmark) # This one should work, since the offset brings things back close together benchmark.offset = -1000 result = benchmark.benchmark_results(self.trial_result) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
def test_scale_affects_trajectory_position(self): # Manually scale the computed trajectory scale = 4243 self.trial_result.computed_trajectory = {} for key, pose in self.trial_result.ground_truth_trajectory.items(): self.trial_result.computed_trajectory[key] = tf.Transform( location=pose.location / scale, rotation=pose.rotation_quat(True), w_first=True) # This should have a large error due to the bad scale benchmark = rpe.BenchmarkRPE() unscaled_result = benchmark.benchmark_results(self.trial_result) # This one should have a more reasonable error benchmark.scale = scale result = benchmark.benchmark_results(self.trial_result) self.assertLess(result.trans_max, unscaled_result.trans_max)
def test_benchmark_results_estimates_no_error_for_noiseless_trajectory( self): # Create a new computed trajectory with no noise comp_traj, noise = create_noise( self.trial_result.ground_truth_trajectory, self.random, time_offset=0, time_noise=0, loc_noise=0, rot_noise=0) self.trial_result.computed_trajectory = comp_traj benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result) for time, error in result.translational_error.items(): self.assertAlmostEqual(0, error) for time, error in result.rotational_error.items(): self.assertAlmostEqual(0, error)
def do_imports(self, task_manager: batch_analysis.task_manager.TaskManager, db_client: database.client.DatabaseClient): """ Import image sources for evaluation in this experiment :param task_manager: The task manager, for creating import tasks :param db_client: The database client, for saving declared objects too small to need a task :return: """ # --------- SIMULATORS ----------- # Add simulators explicitly, they have different metadata, so we can't just search for exe, world_name, environment_type, light_level, time_of_day in [ ('/media/john/Storage/simulators/AIUE_V01_001/LinuxNoEditor/tempTest/Binaries/Linux/tempTest', 'AIUE_V01_001', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY), #( # '/media/john/Storage/simulators/AIUE_V01_002/LinuxNoEditor/tempTest/Binaries/Linux/tempTest', # 'AIUE_V01_002', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT, # imeta.TimeOfDay.DAY #), ('/media/john/Storage/simulators/AIUE_V01_005/LinuxNoEditor/tempTest/Binaries/Linux/tempTest', 'AIUE_V01_005', imeta.EnvironmentType.INDOOR, imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY) ]: if world_name not in self._simulators: simulator_id = dh.add_unique( db_client.image_source_collection, uecv_sim.UnrealCVSimulator( executable_path=exe, world_name=world_name, environment_type=environment_type, light_level=light_level, time_of_day=time_of_day)) self._simulators[world_name] = simulator_id self._set_property('simulators.{0}'.format(world_name), simulator_id) # --------- REAL WORLD DATASETS ----------- # Import KITTI dataset for sequence_num in range(11): path = os.path.expanduser( os.path.join('~', 'datasets', 'KITTI', 'dataset')) if os.path.isdir(path) and os.path.isdir( os.path.join(path, 'sequences', "{0:02}".format(sequence_num))): task = task_manager.get_import_dataset_task( module_name='dataset.kitti.kitti_loader', path=path, additional_args={'sequence_number': sequence_num}, num_cpus=1, num_gpus=0, memory_requirements='3GB', expected_duration='12:00:00') if task.is_finished: trajectory_group = self._add_trajectory_group( 'KITTI trajectory {}'.format(sequence_num), task.result) self._update_trajectory_group(trajectory_group, task_manager, db_client) else: task_manager.do_task(task) # Import EuRoC datasets for name, path in [ ('EuRoC MH_01_easy', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'MH_01_easy'))), ('EuRoC MH_02_easy', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'MH_02_easy'))), ('EuRoC MH_02_medium', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'MH_03_medium'))), ('EuRoC MH_04_difficult', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'MH_04_difficult'))), ('EuRoC MH_05_difficult', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'MH_05_difficult'))), ('EuRoC V1_01_easy', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V1_01_easy'))), ('EuRoC V1_02_medium', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V1_02_medium'))), ('EuRoC V1_03_difficult', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V1_03_difficult'))), ('EuRoC V2_01_easy', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V2_01_easy'))), ('EuRoC V2_02_medium', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V2_02_medium'))), ('EuRoC V2_03_difficult', os.path.expanduser( os.path.join('~', 'datasets', 'EuRoC', 'V2_03_difficult'))) ]: if os.path.isdir(path): task = task_manager.get_import_dataset_task( module_name='dataset.euroc.euroc_loader', path=path, num_cpus=1, num_gpus=0, memory_requirements='3GB', expected_duration='4:00:00') if task.is_finished: trajectory_group = self._add_trajectory_group( name, task.result) self._update_trajectory_group(trajectory_group, task_manager, db_client) else: task_manager.do_task(task) # Import TUM datasets using the manager. Load all the TUM datasets we can tum_manager = dataset.tum.tum_manager.TUMManager( {name: True for name in dataset.tum.tum_manager.dataset_names}) tum_manager.do_imports( os.path.expanduser(os.path.join('~', 'datasets', 'TUM')), task_manager) for name, dataset_id in tum_manager.datasets: trajectory_group = self._add_trajectory_group( "TUM {}".format(name), dataset_id) self._update_trajectory_group(trajectory_group, task_manager, db_client) # --------- SYSTEMS ----------- if self._libviso_system is None: self._libviso_system = dh.add_unique(db_client.system_collection, libviso2.LibVisOSystem()) self._set_property('libviso', self._libviso_system) # ORBSLAM2 - Create 9 variants, with different procesing modes settings_list = [(sensor_mode, n_features) for sensor_mode in { orbslam2.SensorMode.STEREO, orbslam2.SensorMode.RGBD, orbslam2.SensorMode.MONOCULAR } for n_features in {1500}] if len(self._orbslam_systems) < len(settings_list): for sensor_mode, n_features in settings_list: name = 'ORBSLAM2 {mode} ({features})'.format( mode=sensor_mode.name.lower(), features=n_features) vocab_path = os.path.join('systems', 'slam', 'ORBSLAM2', 'ORBvoc.txt') if name not in self._orbslam_systems and os.path.isfile( vocab_path): orbslam_id = dh.add_unique( db_client.system_collection, orbslam2.ORBSLAM2(vocabulary_file=vocab_path, mode=sensor_mode, settings={ 'ORBextractor': { 'nFeatures': n_features } })) self._orbslam_systems[name] = orbslam_id self._set_property('orbslam_systems.{}'.format(name), orbslam_id) # --------- BENCHMARKS ----------- # Create and store the benchmarks for camera trajectories # Just using the default settings for now if self._benchmark_rpe is None: self._benchmark_rpe = dh.add_unique( db_client.benchmarks_collection, rpe.BenchmarkRPE(max_pairs=10000, fixed_delta=False, delta=1.0, delta_unit='s', offset=0, scale_=1)) self._set_property('benchmark_rpe', self._benchmark_rpe) if self._benchmark_ate is None: self._benchmark_ate = dh.add_unique( db_client.benchmarks_collection, ate.BenchmarkATE(offset=0, max_difference=0.2, scale=1)) self._set_property('benchmark_ate', self._benchmark_ate) if self._benchmark_trajectory_drift is None: self._benchmark_trajectory_drift = dh.add_unique( db_client.benchmarks_collection, traj_drift.BenchmarkTrajectoryDrift( segment_lengths=[100, 200, 300, 400, 500, 600, 700, 800], step_size=10)) self._set_property('benchmark_trajectory_drift', self._benchmark_trajectory_drift) if self._benchmark_tracking is None: self._benchmark_tracking = dh.add_unique( db_client.benchmarks_collection, tracking_benchmark.TrackingBenchmark( initializing_is_lost=True)) self._set_property('benchmark_tracking', self._benchmark_tracking)
def make_instance(self, *args, **kwargs): return rpe.BenchmarkRPE(*args, **kwargs)
def test_benchmark_results_estimates_reasonable_trajectory_noise(self): benchmark = rpe.BenchmarkRPE() result = benchmark.benchmark_results(self.trial_result)
def do_imports(self, task_manager: batch_analysis.task_manager.TaskManager, db_client: database.client.DatabaseClient): """ Import image sources for evaluation in this experiment :param task_manager: The task manager, for creating import tasks :param db_client: The database client, for saving declared objects too small to need a task :return: """ # --------- SIMULATORS ----------- # Add simulators explicitly, they have different metadata, so we can't just search for exe, world_name, environment_type, light_level, time_of_day in [ ('/media/john/Storage/simulators/CorridorWorld/LinuxNoEditor/tempTest/Binaries/Linux/tempTest', 'CorridorWorld', imeta.EnvironmentType.OUTDOOR_LANDSCAPE, imeta.LightingLevel.WELL_LIT, imeta.TimeOfDay.DAY) ]: if world_name not in self._simulators: simulator_id = dh.add_unique( db_client.image_source_collection, uecv_sim.UnrealCVSimulator( executable_path=exe, world_name=world_name, environment_type=environment_type, light_level=light_level, time_of_day=time_of_day)) self._simulators[world_name] = simulator_id self._set_property('simulators.{0}'.format(world_name), simulator_id) # --------- TRAJECTORY GROUPS ----------- for name, path in [ ('forwards', get_forwards_trajectory()), ('upwards', get_upward_trajectory()), ('left', get_left_trajectory()), ('on the spot roll', get_on_the_spot_roll_trajectory()), ('on the spot pitch', get_on_the_spot_pitch_trajectory()), ('on the spot yaw', get_on_the_spot_yaw_trajectory()), ('circle roll', get_circle_roll_trajectory()), ('circle pitch', get_circle_pitch_trajectory()), ('circle yaw', get_circle_yaw_trajectory()), ]: if name not in self._trajectory_groups: # First, create the trajectory follow controller with the desired trajectory controller = follow_cont.TrajectoryFollowController( trajectory=path, trajectory_source='custom {0}'.format(name), sequence_type=core.sequence_type.ImageSequenceType. SEQUENTIAL) controller_id = dh.add_unique( db_client.image_source_collection, controller) # Then create a trajectory group for it self._trajectory_groups[name] = TrajectoryGroup( name=name, controller_id=controller_id, simulators={ 'CorridorWorld': self._simulators['CorridorWorld'] }) self._set_property('trajectory_groups.{0}'.format(name), self._trajectory_groups[name].serialize()) for group in self._trajectory_groups.values(): if group.do_imports(task_manager, db_client): self._set_property('trajectory_groups.{0}'.format(group.name), group.serialize()) # --------- SYSTEMS ----------- if self._libviso_system is None: self._libviso_system = dh.add_unique(db_client.system_collection, libviso2.LibVisOSystem()) self._set_property('libviso', self._libviso_system) # ORBSLAM2 - Create orbslam systems in each sensor mode for sensor_mode in { orbslam2.SensorMode.STEREO, orbslam2.SensorMode.RGBD, orbslam2.SensorMode.MONOCULAR }: name = 'ORBSLAM2 {mode}'.format( mode=sensor_mode.name.lower()).replace('.', '-') vocab_path = os.path.join('systems', 'slam', 'ORBSLAM2', 'ORBvoc.txt') if name not in self._orbslam_systems and os.path.isfile( vocab_path): orbslam_id = dh.add_unique( db_client.system_collection, orbslam2.ORBSLAM2( vocabulary_file=vocab_path, mode=sensor_mode, settings={'ORBextractor': { 'nFeatures': 1500 }})) self._orbslam_systems[name] = orbslam_id self._set_property('orbslam_systems.{}'.format(name), orbslam_id) # --------- BENCHMARKS ----------- # Create and store the benchmarks for camera trajectories # Just using the default settings for now if self._benchmark_rpe is None: self._benchmark_rpe = dh.add_unique( db_client.benchmarks_collection, rpe.BenchmarkRPE(max_pairs=10000, fixed_delta=False, delta=1.0, delta_unit='s', offset=0, scale_=1)) self._set_property('benchmark_rpe', self._benchmark_rpe) if self._benchmark_ate is None: self._benchmark_ate = dh.add_unique( db_client.benchmarks_collection, ate.BenchmarkATE(offset=0, max_difference=0.2, scale=1)) self._set_property('benchmark_ate', self._benchmark_ate) if self._benchmark_trajectory_drift is None: self._benchmark_trajectory_drift = dh.add_unique( db_client.benchmarks_collection, traj_drift.BenchmarkTrajectoryDrift( segment_lengths=[100, 200, 300, 400, 500, 600, 700, 800], step_size=10)) self._set_property('benchmark_trajectory_drift', self._benchmark_trajectory_drift) if self._benchmark_tracking is None: self._benchmark_tracking = dh.add_unique( db_client.benchmarks_collection, tracking_benchmark.TrackingBenchmark( initializing_is_lost=True)) self._set_property('benchmark_tracking', self._benchmark_tracking)