예제 #1
0
    def _mock_results(nsamples, ngt, npred, detection_name):

        def random_attr():
            """
            This is the most straight-forward way to generate a random attribute.
            Not currently used b/c we want the test fixture to be back-wards compatible.
            """
            # Get relevant attributes.
            rel_attributes = detection_name_to_rel_attributes(detection_name)

            if len(rel_attributes) == 0:
                # Empty string for classes without attributes.
                return ''
            else:
                # Pick a random attribute otherwise.
                return rel_attributes[np.random.randint(0, len(rel_attributes))]

        pred = EvalBoxes()
        gt = EvalBoxes()

        for sample_itt in range(nsamples):

            this_gt = []

            for box_itt in range(ngt):

                this_gt.append(EvalBox(
                    sample_token=str(sample_itt),
                    translation=tuple(list(np.random.rand(2)*15) + [0.0]),
                    size=tuple(np.random.rand(3)*4),
                    rotation=tuple(np.random.rand(4)),
                    velocity=tuple(np.random.rand(3)[:2]*4),
                    detection_name=detection_name,
                    detection_score=random.random(),
                    attribute_name=random_attr(),
                    ego_dist=random.random()*10,
                ))
            gt.add_boxes(str(sample_itt), this_gt)

        for sample_itt in range(nsamples):
            this_pred = []

            for box_itt in range(npred):

                this_pred.append(EvalBox(
                    sample_token=str(sample_itt),
                    translation=tuple(list(np.random.rand(2) * 10) + [0.0]),
                    size=tuple(np.random.rand(3) * 4),
                    rotation=tuple(np.random.rand(4)),
                    velocity=tuple(np.random.rand(3)[:2] * 4),
                    detection_name=detection_name,
                    detection_score=random.random(),
                    attribute_name=random_attr(),
                    ego_dist=random.random() * 10,
                ))

            pred.add_boxes(str(sample_itt), this_pred)

        return gt, pred
예제 #2
0
    def test_serialization(self):
        """ Test that instance serialization protocol works with json encoding. """
        boxes = EvalBoxes()
        for i in range(10):
            boxes.add_boxes(str(i), [EvalBox(), EvalBox(), EvalBox()])

        recovered = EvalBoxes.deserialize(
            json.loads(json.dumps(boxes.serialize())))
        self.assertEqual(boxes, recovered)
예제 #3
0
def get_metric_data(gts: Dict[str, List[Dict]],
                    preds: Dict[str, List[Dict]],
                    detection_name: str,
                    dist_th: float) -> MetricData:
        """
        Calculate and check the AP value.
        :param gts: Ground truth data.
        :param preds: Predictions.
        :param detection_name: Name of the class we are interested in.
        :param dist_th: Distance threshold for matching.
        """

        # Some or all of the defaults will be replaced by if given.
        defaults = {'trans': (0, 0, 0), 'size': (1, 1, 1), 'rot': (0, 0, 0, 0),
                    'vel': (0, 0), 'attr': 'vehicle.parked', 'score': -1.0, 'name': 'car'}
        # Create GT EvalBoxes instance.
        gt_eval_boxes = EvalBoxes()
        for sample_token, data in gts.items():
            gt_boxes = []
            for gt in data:
                gt = {**defaults, **gt}  # The defaults will be replaced by gt if given.
                eb = EvalBox(sample_token=sample_token, translation=gt['trans'], size=gt['size'], rotation=gt['rot'],
                             detection_name=gt['name'], attribute_name=gt['attr'], velocity=gt['vel'])
                gt_boxes.append(eb)

            gt_eval_boxes.add_boxes(sample_token, gt_boxes)

        # Create Predictions EvalBoxes instance.
        pred_eval_boxes = EvalBoxes()
        for sample_token, data in preds.items():
            pred_boxes = []
            for pred in data:
                pred = {**defaults, **pred}
                eb = EvalBox(sample_token=sample_token, translation=pred['trans'], size=pred['size'],
                             rotation=pred['rot'], detection_name=pred['name'], detection_score=pred['score'],
                             velocity=pred['vel'], attribute_name=pred['attr'])
                pred_boxes.append(eb)
            pred_eval_boxes.add_boxes(sample_token, pred_boxes)

        metric_data = accumulate(gt_eval_boxes, pred_eval_boxes, class_name=detection_name,
                                 dist_fcn_name='center_distance', dist_th=dist_th)

        return metric_data
예제 #4
0
def load_gt(nusc, eval_split: str, verbose: bool = False) -> EvalBoxes:
    """ Loads ground truth boxes from DB. """

    # Init.
    attribute_map = {a['token']: a['name'] for a in nusc.attribute}

    if verbose:
        print('Loading annotations for {} split from nuScenes version: {}'.
              format(eval_split, nusc.version))
    # Read out all sample_tokens in DB.
    sample_tokens_all = [s['token'] for s in nusc.sample]
    assert len(sample_tokens_all) > 0, "Error: Database has no samples!"

    # Only keep samples from this split.
    splits = create_splits_scenes()

    # Check compatibility of split with nusc_version.
    version = nusc.version
    if eval_split in {'train', 'val', 'train_detect', 'train_track'}:
        assert version.endswith('trainval'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split in {'mini_train', 'mini_val'}:
        assert version.endswith('mini'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split == 'test':
        assert version.endswith('test'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    else:
        raise ValueError(
            'Error: Requested split {} which this function cannot map to the correct NuScenes version.'
            .format(eval_split))

    if eval_split == 'test':
        # Check that you aren't trying to cheat :).
        assert len(nusc.sample_annotation) > 0, \
            'Error: You are trying to evaluate on the test set but you do not have the annotations!'

    sample_tokens = []
    for sample_token in sample_tokens_all:
        scene_token = nusc.get('sample', sample_token)['scene_token']
        scene_record = nusc.get('scene', scene_token)
        if scene_record['name'] in splits[eval_split]:
            sample_tokens.append(sample_token)

    all_annotations = EvalBoxes()

    # Load annotations and filter predictions and annotations.
    for sample_token in tqdm.tqdm(sample_tokens):

        sample = nusc.get('sample', sample_token)
        sample_annotation_tokens = sample['anns']

        sample_boxes = []
        for sample_annotation_token in sample_annotation_tokens:

            # Get label name in detection task and filter unused labels.
            sample_annotation = nusc.get('sample_annotation',
                                         sample_annotation_token)
            detection_name = category_to_detection_name(
                sample_annotation['category_name'])
            if detection_name is None:
                continue

            # Get attribute_name.
            attr_tokens = sample_annotation['attribute_tokens']
            attr_count = len(attr_tokens)
            if attr_count == 0:
                attribute_name = ''
            elif attr_count == 1:
                attribute_name = attribute_map[attr_tokens[0]]
            else:
                raise Exception(
                    'Error: GT annotations must not have more than one attribute!'
                )

            sample_boxes.append(
                EvalBox(
                    sample_token=sample_token,
                    translation=sample_annotation['translation'],
                    size=sample_annotation['size'],
                    rotation=sample_annotation['rotation'],
                    velocity=nusc.box_velocity(sample_annotation['token'])[:2],
                    detection_name=detection_name,
                    detection_score=-1.0,  # GT samples do not have a score.
                    attribute_name=attribute_name,
                    num_pts=sample_annotation['num_lidar_pts'] +
                    sample_annotation['num_radar_pts']))
        all_annotations.add_boxes(sample_token, sample_boxes)

    if verbose:
        print("Loaded ground truth annotations for {} samples.".format(
            len(all_annotations.sample_tokens)))

    return all_annotations
예제 #5
0
    def test_filter_eval_boxes(self):
        """
        This tests runs the evaluation for an arbitrary random set of predictions.
        This score is then captured in this very test such that if we change the eval code,
        this test will trigger if the results changed.
        """

        # Get the maximum distance from the config
        this_dir = os.path.dirname(os.path.abspath(__file__))
        cfg_name = 'cvpr_2019.json'
        cfg_path = os.path.join(this_dir, '..', 'configs', cfg_name)
        with open(cfg_path, 'r') as f:
            cfg = DetectionConfig.deserialize(json.load(f))
        max_dist = cfg.class_range

        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        nusc = NuScenes(version='v1.0-mini',
                        dataroot=os.environ['NUSCENES'],
                        verbose=False)

        sample_token = '0af0feb5b1394b928dd13d648de898f5'
        # This sample has a bike rack instance 'bfe685042aa34ab7b2b2f24ee0f1645f' with these parameters
        # 'translation': [683.681, 1592.002, 0.809],
        # 'size': [1.641, 14.465, 1.4],
        # 'rotation': [0.3473693995546558, 0.0, 0.0, 0.9377283723195315]

        # Test bicycle filtering by creating a box at the same position as the bike rack.
        box1 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         0)  # box1 should be filtered.

        # Test motorcycle filtering by creating a box at the same position as the bike rack.
        box2 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='motorcycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         0)  # both box1 and box2 should be filtered.

        # Now create a car at the same position as the bike rack.
        box3 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='car')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         1)  # box1 and box2 to be filtered. box3 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')

        # Now add a bike outside the bike rack.
        box4 = EvalBox(sample_token=sample_token,
                       translation=(68.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2 to be filtered. box3, box4 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Add another bike on the bike rack center but set the ego_dist higher than what's defined in max_dist
        box5 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle',
                       ego_dist=100.0)

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4, box5])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2, box5 filtered. box3, box4 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Add another bike on the bike rack center but set the num_pts to be zero so that it gets filtered.
        box6 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle',
                       num_pts=0)

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token,
                             [box1, box2, box3, box4, box5, box6])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2, box5, box6 filtered. box3, box4 stay
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Check for a sample where there are no bike racks. Everything should be filtered correctly.
        sample_token = 'ca9a282c9e77460f8360f564131a8af5'  # This sample has no bike-racks.

        box1 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle',
                       ego_dist=25.0)

        box2 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='motorcycle',
                       ego_dist=45.0)

        box3 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='car',
                       ego_dist=45.0)

        box4 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='car',
                       ego_dist=55.0)

        box5 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle',
                       num_pts=1)

        box6 = EvalBox(sample_token=sample_token,
                       translation=(683.681, 1592.002, 0.809),
                       size=(1, 1, 1),
                       detection_name='bicycle',
                       num_pts=0)

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token,
                             [box1, box2, box3, box4, box5, box6])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         3)  # box2, box4, box6 filtered. box1, box3, box5 stay
        self.assertEqual(filtered_boxes.boxes[sample_token][0].ego_dist, 25.0)
        self.assertEqual(filtered_boxes.boxes[sample_token][1].ego_dist, 45.0)
        self.assertEqual(filtered_boxes.boxes[sample_token][2].num_pts, 1)