def test_matches_too_close_are_trivial_matches(self):
        trajectory = {
            1.33333: tf.Transform(location=(100, 100, 0),
                                  rotation=(0, 0, 0, 1)),
            3.66667: tf.Transform(location=(100, 100, 0),
                                  rotation=(0, 0, 0, 1)),
            14.33333: tf.Transform(location=(-100, 100, 0),
                                   rotation=(0, 0, 0, 1)),
            15.66667: tf.Transform(location=(-100, 100, 0),
                                   rotation=(0, 0, 0, 1))
        }
        closures = {3.66667: 1.33333}
        trial_result = MockTrialResult(gt_trajectory=trajectory,
                                       loop_closures=closures)

        # Perform the benchmark with a larger threshold
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20,
                                            trivial_closure_index_distance=10)
        result = benchmark.benchmark_results(trial_result)
        self.assertEqual(match_res.MatchType.FALSE_POSITIVE,
                         result.matches[3.66667])
        self.assertEqual(match_res.MatchType.TRUE_NEGATIVE,
                         result.matches[15.66667])

        # Try again with a smaller threshold, should become a true positive,
        # since the indexes are further appart than the threshold
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20,
                                            trivial_closure_index_distance=1)
        result = benchmark.benchmark_results(trial_result)
        self.assertEqual(match_res.MatchType.TRUE_POSITIVE,
                         result.matches[3.66667])
        self.assertEqual(match_res.MatchType.FALSE_NEGATIVE,
                         result.matches[15.66667])
    def test_distance_threshold_determines_acceptable_matches(self):
        trajectory = {
            1.33333: tf.Transform(location=(100, 100, 0),
                                  rotation=(0, 0, 0, 1)),
            10.66667: tf.Transform(location=(110, 100, 0),
                                   rotation=(0, 0, 0, 1)),
            15.33333: tf.Transform(location=(-100, 100, 0),
                                   rotation=(0, 0, 0, 1)),
            20.66667: tf.Transform(location=(-110, 100, 0),
                                   rotation=(0, 0, 0, 1))
        }
        closures = {10.66667: 1.33333}
        trial_result = MockTrialResult(gt_trajectory=trajectory,
                                       loop_closures=closures)

        # Perform the benchmark with a larger threshold
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20)
        result = benchmark.benchmark_results(trial_result)
        self.assertEqual(match_res.MatchType.TRUE_POSITIVE,
                         result.matches[10.66667])
        self.assertEqual(match_res.MatchType.FALSE_NEGATIVE,
                         result.matches[20.66667])

        # Try again with a smaller threshold, should become a false positive
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=1)
        result = benchmark.benchmark_results(trial_result)
        self.assertEqual(match_res.MatchType.FALSE_POSITIVE,
                         result.matches[10.66667])
        self.assertEqual(match_res.MatchType.TRUE_NEGATIVE,
                         result.matches[20.66667])
    def test_benchmark_produces_expected_results(self):
        # Perform the benchmark
        benchmark = tracking.TrackingBenchmark()
        result = benchmark.benchmark_results(self.trial_result)
        self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
        # The expectations are based on the trajectory and tracking stats in setUp
        self.assertEqual(4, result.times_lost)
        self.assertEqual(1.3333, result.lost_intervals[0].start_time)
        self.assertEqual(1.6667, result.lost_intervals[0].end_time)
        self.assertEqual(1.6667 - 1.3333, result.lost_intervals[0].duration)
        self.assertEqual(10, result.lost_intervals[0].distance)
        self.assertEqual(1, result.lost_intervals[0].frames)

        self.assertEqual(2.3333, result.lost_intervals[1].start_time)
        self.assertEqual(3, result.lost_intervals[1].end_time)
        self.assertEqual(3 - 2.3333, result.lost_intervals[1].duration)
        self.assertEqual(20, result.lost_intervals[1].distance)
        self.assertEqual(2, result.lost_intervals[1].frames)

        self.assertEqual(3.3333, result.lost_intervals[2].start_time)
        self.assertEqual(4.3333, result.lost_intervals[2].end_time)
        self.assertAlmostEqual(1, result.lost_intervals[2].duration)
        self.assertEqual(30, result.lost_intervals[2].distance)
        self.assertEqual(3, result.lost_intervals[2].frames)

        self.assertEqual(4.6667, result.lost_intervals[3].start_time)
        self.assertEqual(4.6667, result.lost_intervals[3].end_time)
        self.assertEqual(0, result.lost_intervals[3].duration)
        self.assertEqual(0, result.lost_intervals[3].distance)
        self.assertEqual(1, result.lost_intervals[3].frames)
 def test_benchmark_results_returns_a_benchmark_result(self):
     benchmark = tracking.TrackingBenchmark()
     result = benchmark.benchmark_results(self.trial_result)
     self.assertIsInstance(result, core.benchmark.BenchmarkResult)
     self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
     self.assertEqual(benchmark.identifier, result.benchmark)
     self.assertEqual(self.trial_result.identifier, result.trial_result)
    def test_benchmark_accepts_any_of_multiple_closures(self):
        # Create a trial result with multiple valid closures
        trajectory = {
            1.33333: tf.Transform(location=(100, 100, 0),
                                  rotation=(0, 0, 0, 1)),
            5.33333: tf.Transform(location=(-100, -100, 0),
                                  rotation=(0, 0, 0, 1)),
            10.66667: tf.Transform(location=(100, 100, 0),
                                   rotation=(0, 0, 0, 1)),
            12.33333: tf.Transform(location=(-100, -100, 0),
                                   rotation=(0, 0, 0, 1)),
            15: tf.Transform(location=(100, 100, 0), rotation=(0, 0, 0, 1)),
            17.66667: tf.Transform(location=(-100, -100, 0),
                                   rotation=(0, 0, 0, 1)),
            20.66667: tf.Transform(location=(100, 100, 0),
                                   rotation=(0, 0, 0, 1))
        }
        trial_result = MockTrialResult(gt_trajectory=trajectory,
                                       loop_closures={})
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20)

        # Simple cartesian product to get all the different combinations of closures for the location
        group1 = [20.66667, 15, 10.66667, 1.33333]
        valid_pairs = [(idx, closure) for idx in group1 for closure in group1
                       if closure < idx]

        # Test different possible closures
        for idx, closure in valid_pairs:
            trial_result.loop_closures = {idx: closure}
            result = benchmark.benchmark_results(trial_result)
            self.assertEqual(match_res.MatchType.TRUE_POSITIVE,
                             result.matches[idx])
Beispiel #6
0
    def test_offset_shifts_query_trajectory_time(self):
        # Create a new noise trajectory with a large time offset
        comp_traj, noise = create_noise(
            self.trial_result.ground_truth_trajectory,
            self.random,
            time_offset=1000)
        self.trial_result.computed_trajectory = comp_traj

        # This should fail due to the offset
        benchmark = rpe.BenchmarkRPE()
        result = benchmark.benchmark_results(self.trial_result)
        self.assertIsInstance(result, core.benchmark.FailedBenchmark)

        # This one should work, since the offset brings things back close together
        benchmark.offset = -1000
        result = benchmark.benchmark_results(self.trial_result)
        self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
Beispiel #7
0
    def test_scale_affects_trajectory_position(self):
        # Manually scale the computed trajectory
        scale = 4243
        self.trial_result.computed_trajectory = {}
        for key, pose in self.trial_result.ground_truth_trajectory.items():
            self.trial_result.computed_trajectory[key] = tf.Transform(
                location=pose.location / scale,
                rotation=pose.rotation_quat(True),
                w_first=True)

        # This should have a large error due to the bad scale
        benchmark = rpe.BenchmarkRPE()
        unscaled_result = benchmark.benchmark_results(self.trial_result)

        # This one should have a more reasonable error
        benchmark.scale = scale
        result = benchmark.benchmark_results(self.trial_result)
        self.assertLess(result.trans_max, unscaled_result.trans_max)
Beispiel #8
0
    def test_benchmark_results_returns_a_benchmark_result(self):
        trial_result = MockTrialResult(
            gt_bboxes={oid.ObjectId(): [bbox_trial.BoundingBox(('cup',), 0.8256, 15, 22, 100, 100)]},
            bboxes={oid.ObjectId(): [bbox_trial.BoundingBox(('cup',), 0.8256, 15, 22, 100, 100)]})

        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)
        self.assertIsInstance(result, core.benchmark.BenchmarkResult)
        self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
        self.assertIsInstance(result, bbox_result.BoundingBoxOverlapBenchmarkResult)
        self.assertEqual(benchmark.identifier, result.benchmark)
        self.assertEqual(trial_result.identifier, result.trial_result)
Beispiel #9
0
    def test_benchmark_results_fails_for_no_matching_timestaps(self):
        # Create a new computed trajectory with no matching timestamps
        self.trial_result.computed_trajectory = {
            time + 10000: pose
            for time, pose in
            self.trial_result.ground_truth_trajectory.items()
        }

        # Perform the benchmark
        benchmark = rpe.BenchmarkRPE()
        result = benchmark.benchmark_results(self.trial_result)
        self.assertIsInstance(result, core.benchmark.FailedBenchmark)
    def test_benchmark_measures_match_for_all_stamps(self):
        random = np.random.RandomState(1563)
        trajectory = {
            random.uniform(0, 600): tf.Transform(location=(100, 100, 0),
                                                 rotation=(0, 0, 0, 1))
            for _ in range(100)
        }
        trial_result = MockTrialResult(gt_trajectory=trajectory,
                                       loop_closures={})
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20)
        result = benchmark.benchmark_results(trial_result)

        for stamp in trajectory.keys():
            self.assertIn(stamp, result.matches)
Beispiel #11
0
    def test_benchmark_results_estimates_no_error_for_identical_trajectory(
            self):
        # Copy the ground truth exactly
        self.trial_result.computed_trajectory = copy.deepcopy(
            self.trial_result.ground_truth_trajectory)

        benchmark = rpe.BenchmarkRPE()
        result = benchmark.benchmark_results(self.trial_result)

        if isinstance(result, core.benchmark.FailedBenchmark):
            print(result.reason)

        for time, error in result.translational_error.items():
            self.assertAlmostEqual(0, error)
        for time, error in result.rotational_error.items():
            self.assertAlmostEqual(0, error)
    def test_benchmark_results_returns_a_benchmark_result(self):
        trial_result = MockTrialResult(gt_trajectory={
            1.33333:
            tf.Transform(location=(100, 100, 0), rotation=(0, 0, 0, 1)),
            10.66667:
            tf.Transform(location=(100, 100, 0), rotation=(0, 0, 0, 1))
        },
                                       loop_closures={})

        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20)
        result = benchmark.benchmark_results(trial_result)
        self.assertIsInstance(result, core.benchmark.BenchmarkResult)
        self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
        self.assertIsInstance(result, match_res.MatchBenchmarkResult)
        self.assertEqual(benchmark.identifier, result.benchmark)
        self.assertEqual(trial_result.identifier, result.trial_result)
    def test_benchmark_detects_false_negative(self):
        # Create a trial result with a correct loop closure
        trajectory = {
            1.33333: tf.Transform(location=(100, 100, 0),
                                  rotation=(0, 0, 0, 1)),
            10.66667: tf.Transform(location=(100, 100, 0),
                                   rotation=(0, 0, 0, 1))
        }
        closures = {}
        trial_result = MockTrialResult(gt_trajectory=trajectory,
                                       loop_closures=closures)

        # Perform the benchmark
        benchmark = lc.BenchmarkLoopClosure(distance_threshold=20)
        result = benchmark.benchmark_results(trial_result)
        self.assertEqual(match_res.MatchType.FALSE_NEGATIVE,
                         result.matches[10.66667])
Beispiel #14
0
    def test_benchmark_results_estimates_no_error_for_noiseless_trajectory(
            self):
        # Create a new computed trajectory with no noise
        comp_traj, noise = create_noise(
            self.trial_result.ground_truth_trajectory,
            self.random,
            time_offset=0,
            time_noise=0,
            loc_noise=0,
            rot_noise=0)
        self.trial_result.computed_trajectory = comp_traj

        benchmark = rpe.BenchmarkRPE()
        result = benchmark.benchmark_results(self.trial_result)

        for time, error in result.translational_error.items():
            self.assertAlmostEqual(0, error)
        for time, error in result.rotational_error.items():
            self.assertAlmostEqual(0, error)
Beispiel #15
0
    def test_benchmark_matches_each_gt_box_only_once(self):
        id1 = oid.ObjectId()
        trial_result = MockTrialResult(
            gt_bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)]
            },
            bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100),
                      bbox_trial.BoundingBox({'cup'}, 1, 115, 122, 50, 50),
                      bbox_trial.BoundingBox({'cup'}, 1, 165, 172, 25, 25)],
            }
        )
        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)

        self.assertIn(id1, result.overlaps)
        self.assertEqual(3, len(result.overlaps[id1]))
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 30625,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': ('cup',)
        }, result.overlaps[id1][0])
        self.assertEqual({
            'overlap': 0,
            'bounding_box_area': 2500,
            'ground_truth_area': 0,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': tuple()
        }, result.overlaps[id1][1])
        self.assertEqual({
            'overlap': 0,
            'bounding_box_area': 625,
            'ground_truth_area': 0,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': tuple()
        }, result.overlaps[id1][2])
Beispiel #16
0
def benchmark_results(benchmark,
                      database_client,
                      config=None,
                      trained_state_id=None):
    if (not isinstance(benchmark, core.benchmark.Benchmark) or
            not isinstance(database_client, database.client.DatabaseClient)):
        return

    if config is None:
        config = {}
    else:
        config = dict(config)
    config = du.defaults(config, {})

    existing_results_query = {'benchmark': benchmark.identifier}
    if trained_state_id is not None:
        existing_results_query['trained_state'] = trained_state_id
    existing_results = database_client.results_collection.find(
        existing_results_query, {
            '_id': False,
            'trial_result': True
        })
    existing_results = [result['trial_result'] for result in existing_results]
    trial_results = database_client.trials_collection.find(
        du.defaults(benchmark.get_benchmark_requirements(),
                    {'_id': {
                        '$nin': existing_results
                    }}))

    for s_trial_result in trial_results:
        trial_result = database_client.deserialize_entity(s_trial_result)
        s_dataset = database_client.dataset_collection.find_one(
            {'_id': trial_result.image_source_id})
        dataset = database_client.deserialize_entity(s_dataset)
        dataset_images = dataset.load_images(database_client)
        benchmark_result = benchmark.benchmark_results(dataset_images,
                                                       trial_result)
        database_client.results_collection.insert(benchmark_result.serialize())
Beispiel #17
0
    def test_benchmark_measures_score_per_gt_bounding_box(self):
        id1 = oid.ObjectId()
        id2 = oid.ObjectId()
        id3 = oid.ObjectId()
        id4 = oid.ObjectId()
        trial_result = MockTrialResult(
            gt_bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100)],
                id2: [bbox_trial.BoundingBox({'car'}, 1, 15, 22, 100, 100)],
                id3: [bbox_trial.BoundingBox({'cow'}, 1, 15, 22, 100, 100)],
                id4: [bbox_trial.BoundingBox({'cat'}, 1, 15, 22, 100, 100)]
            },
            bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100)],       # Matched exactly
                id2: [bbox_trial.BoundingBox({'car'}, 0.8256, 15, 22, 100, 100)],  # Only confidence reduced
                id3: [bbox_trial.BoundingBox({'cow'}, 1, 25, 32, 95, 95)],         # Slightly misplaced
                id4: [bbox_trial.BoundingBox({'cat'}, 0.75, 25, 32, 95, 95)]       # Reduced confidence and slightly misplaced
            }
        )
        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)

        self.assertIn(id1, result.overlaps)
        self.assertIn(id2, result.overlaps)
        self.assertIn(id3, result.overlaps)
        self.assertIn(id4, result.overlaps)
        self.assertEqual(1, len(result.overlaps[id1]))
        self.assertEqual(1, len(result.overlaps[id2]))
        self.assertEqual(1, len(result.overlaps[id3]))
        self.assertEqual(1, len(result.overlaps[id4]))
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 10000,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': ('cup',)
        }, result.overlaps[id1][0])
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 10000,
            'confidence': 0.8256,
            'bounding_box_classes': ('car',),
            'ground_truth_classes': ('car',)
        }, result.overlaps[id2][0])
        self.assertEqual({
            'overlap': 8100,
            'bounding_box_area': 9025,
            'ground_truth_area': 10000,
            'confidence': 1.0,
            'bounding_box_classes': ('cow',),
            'ground_truth_classes': ('cow',)
        }, result.overlaps[id3][0])
        self.assertEqual({
            'overlap': 8100,
            'bounding_box_area': 9025,
            'ground_truth_area': 10000,
            'confidence': 0.75,
            'bounding_box_classes': ('cat',),
            'ground_truth_classes': ('cat',)
        }, result.overlaps[id4][0])
Beispiel #18
0
 def test_benchmark_results_estimates_reasonable_trajectory_noise(self):
     benchmark = rpe.BenchmarkRPE()
     result = benchmark.benchmark_results(self.trial_result)