def test_benchmark_results_estimates_no_error_for_noiseless_trajectory( self): # Create a new computed trajectory with no noise, but a fixed offset from the real trajectory # That is, the relative motions are the same, but the start point is different for trial_result in self.trial_results: comp_traj, _ = create_noise( trajectory=trial_result.ground_truth_trajectory, random_state=self.random, time_offset=0, time_noise=0, loc_noise=0, rot_noise=0) trial_result.computed_trajectory = comp_traj benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) # Check all the errors are zero values = collect_values(result, 0) self.assertNPClose(np.zeros(values.shape), values) values = collect_values(result, 1) self.assertNPClose(np.zeros(values.shape), values) values = collect_values(result, 2) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7) values = collect_values(result, 3) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
def test_benchmark_results_returns_a_benchmark_result(self): benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) self.assertIsInstance(result, arvet.core.benchmark.BenchmarkResult) self.assertNotIsInstance(result, arvet.core.benchmark.FailedBenchmark) self.assertEqual(benchmark.identifier, result.benchmark) self.assertEqual(set(trial_result.identifier for trial_result in self.trial_results), set(result.trial_results))
def test_benchmark_results_estimates_reasonable_trajectory_error_per_frame( self): benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) # The noise added to each location is <10, converting to motions makes that <20, in each axis # But then, changes to the orientation tweak the relative location, and hence the motion self.assertLessEqual(np.max(collect_values(result, 0)), 100) self.assertLessEqual(np.max(collect_values(result, 2)), np.pi / 32)
def test_benchmark_results_estimates_reasonable_trajectory_error_per_frame(self): benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) # The noise added to each location is <10, converting to motions makes that <20, in each axis # But then, changes to the orientation tweak the relative location, and hence the motion for errors in result.errors_by_trial.values(): errors = np.array(errors) self.assertLessEqual(np.max(errors[:, 3]), 150) self.assertLessEqual(np.max(errors[:, 5]), np.pi/32)
def test_benchmark_results_one_observation_per_motion_per_trial(self): benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) if isinstance(result, arvet.core.benchmark.FailedBenchmark): print(result.reason) self.assertEqual(len(self.trial_results), len(result.errors_by_trial)) for trial_result in self.trial_results: self.assertIn(trial_result.identifier, result.errors_by_trial) errors = np.array(result.errors_by_trial[trial_result.identifier]) self.assertEqual((len(trial_result.ground_truth_trajectory) - 1, 23), errors.shape)
def test_benchmark_results_one_observation_per_frame(self): benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) if isinstance(result, arvet.core.benchmark.FailedBenchmark): print(result.reason) self.assertEqual( len(self.trial_results[0].ground_truth_trajectory) - 1, len(result.frame_errors)) for error_measurement in result.frame_errors.values(): self.assertEqual(18, len(error_measurement))
def test_benchmark_results_fails_for_trials_from_different_systems(self): trajectory = create_random_trajectory(self.random) mixed_trial_results = self.trial_results + [ MockTrialResult(gt_trajectory=trajectory, comp_trajectory=trajectory, system_id=bson.ObjectId()) ] # Perform the benchmark benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(mixed_trial_results) self.assertIsInstance(result, arvet.core.benchmark.FailedBenchmark)
def test_benchmark_results_estimates_no_noise_for_identical_trajectory(self): # Make all the trial results have exactly the same computed trajectories for trial_result in self.trial_results[1:]: trial_result.computed_trajectory = copy.deepcopy(self.trial_results[0].computed_trajectory) benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) for errors in result.errors_by_trial.values(): errors = np.array(errors) self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 9]))) self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 11], atol=1e-7)))
def test_benchmark_results_estimates_no_error_for_identical_trajectory(self): # Copy the ground truth exactly for trial_result in self.trial_results: trial_result.computed_trajectory = copy.deepcopy(trial_result.ground_truth_trajectory) benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) if isinstance(result, arvet.core.benchmark.FailedBenchmark): print(result.reason) # Check all the errors are zero for errors in result.errors_by_trial.values(): errors = np.array(errors) self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 3]))) # We need more tolerance for the rotational error, because of the way the arccos # results in the smallest possible change producing a value around 2e-8 self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 5], atol=1e-7)))
def test_benchmark_results_estimates_no_noise_for_identical_trajectory( self): # Make all the trial results have exactly the same computed trajectories for trial_result in self.trial_results[1:]: trial_result.computed_trajectory = copy.deepcopy( self.trial_results[0].computed_trajectory) benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) # Check all the errors are zero values = collect_values(result, 4) self.assertNPClose(np.zeros(values.shape), values) values = collect_values(result, 5) self.assertNPClose(np.zeros(values.shape), values) values = collect_values(result, 6) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7) values = collect_values(result, 7) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)
def test_benchmark_results_fails_for_no_observations(self): # Adjust the computed timestamps so none of them match for trial_result in self.trial_results: trial_result.computed_trajectory = { time + 10000: pose for time, pose in trial_result.computed_trajectory.items() } trial_result.tracking_states = { time + 10000: state for time, state in trial_result.tracking_states.items() } trial_result.num_features = { time + 10000: features for time, features in trial_result.num_features.items() } trial_result.num_matches = { time + 10000: features for time, features in trial_result.num_matches.items() } # Perform the benchmark benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) self.assertIsInstance(result, arvet.core.benchmark.FailedBenchmark)
def test_benchmark_results_estimates_no_error_for_noiseless_trajectory(self): # Create a new computed trajectory with no noise, but a fixed offset from the real trajectory # That is, the relative motions are the same, but the start point is different for trial_result in self.trial_results: comp_traj, _ = create_noise( trajectory=trial_result.ground_truth_trajectory, random_state=self.random, time_offset=0, time_noise=0, loc_noise=0, rot_noise=0 ) trial_result.computed_trajectory = comp_traj benchmark = eeb.EstimateTrialErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) for errors in result.errors_by_trial.values(): errors = np.array(errors) self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 3]))) self.assertTrue(np.all(np.isclose(np.zeros(errors.shape[0]), errors[:, 5], atol=1e-7)))
def test_benchmark_results_estimates_no_error_for_identical_trajectory( self): # Copy the ground truth exactly for trial_result in self.trial_results: trial_result.computed_trajectory = copy.deepcopy( trial_result.ground_truth_trajectory) benchmark = feb.FrameErrorsBenchmark() result = benchmark.benchmark_results(self.trial_results) if isinstance(result, arvet.core.benchmark.FailedBenchmark): print(result.reason) # Check all the errors are zero values = collect_values(result, 0) self.assertNPClose(np.zeros(values.shape), values) values = collect_values(result, 1) self.assertNPClose(np.zeros(values.shape), values) # We need more tolerance for the rotational error, because of the way the arccos # results in the smallest possible change producing a value around 2e-8 values = collect_values(result, 2) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7) values = collect_values(result, 3) self.assertNPClose(np.zeros(values.shape), values, atol=1e-7)