Пример #1
0
 def test_OffRoadRate(self):
     with patch.object(metrics.OffRoadRate, 'load_drivable_area_masks'):
         helper = MagicMock(spec=PredictHelper)
         off_road_rate = metrics.OffRoadRate(helper, [metrics.RowMean()])
         self.assertDictEqual(off_road_rate.serialize(), {
             'name': 'OffRoadRate',
             'aggregators': [{
                 'name': 'RowMean'
             }]
         })
Пример #2
0
 def test_MinFDEK(self):
     min_fde = metrics.MinFDEK([1, 5, 10], [metrics.RowMean()])
     self.assertDictEqual(
         min_fde.serialize(), {
             'name': 'MinFDEK',
             'k_to_report': [1, 5, 10],
             'aggregators': [{
                 'name': 'RowMean'
             }]
         })
Пример #3
0
 def test_MissRateTopK(self):
     hit_rate = metrics.MissRateTopK([1, 5, 10], [metrics.RowMean()], 2)
     self.assertDictEqual(
         hit_rate.serialize(), {
             'k_to_report': [1, 5, 10],
             'name': 'MissRateTopK',
             'aggregators': [{
                 'name': 'RowMean'
             }],
             'tolerance': 2
         })
Пример #4
0
    def test_flatten_metrics(self):
        results = {
            "MinFDEK": {
                "RowMean": [5.92, 6.1, 7.2]
            },
            "MinADEK": {
                "RowMean": [2.48, 3.29, 3.79]
            },
            "MissRateTopK_2": {
                "RowMean": [0.37, 0.45, 0.55]
            }
        }

        metric_functions = [
            metrics.MinFDEK([1, 5, 10], aggregators=[metrics.RowMean()]),
            metrics.MinADEK([1, 5, 10], aggregators=[metrics.RowMean()]),
            metrics.MissRateTopK([1, 5, 10],
                                 tolerance=2,
                                 aggregators=[metrics.RowMean()])
        ]

        flattened = metrics.flatten_metrics(results, metric_functions)

        answer = {
            'MinFDEK_1': 5.92,
            'MinFDEK_5': 6.1,
            'MinFDEK_10': 7.2,
            'MinADEK_1': 2.48,
            'MinADEK_5': 3.29,
            'MinADEK_10': 3.79,
            'MissRateTopK_2_1': 0.37,
            'MissRateTopK_2_5': 0.45,
            'MissRateTopK_2_10': 0.55
        }

        self.assertDictEqual(flattened, answer)
Пример #5
0
    def _do_test(self, map_name, predictions, answer):
        with patch.object(PredictHelper,
                          'get_map_name_from_sample_token') as get_map_name:
            get_map_name.return_value = map_name
            nusc = NuScenes('v1.0-mini', dataroot=os.environ['NUSCENES'])
            helper = PredictHelper(nusc)

            off_road_rate = metrics.OffRoadRate(helper, [metrics.RowMean()])

            probabilities = np.array([1 / 3] * predictions.shape[0])
            prediction = Prediction('foo-instance', 'foo-sample', predictions,
                                    probabilities)

            # Two violations out of three trajectories
            np.testing.assert_allclose(off_road_rate(np.array([]), prediction),
                                       np.array([answer]))
Пример #6
0
    def test_RowMean(self):
        rm = metrics.RowMean()
        value = rm(np.arange(20).reshape(2, 10))
        self.assertListEqual(list(value), [5, 6, 7, 8, 9, 10, 11, 12, 13, 14])

        self.assertDictEqual(rm.serialize(), {'name': 'RowMean'})