Пример #1
0
def load_gt(nusc: NuScenes,
            eval_split: str,
            box_cls,
            verbose: bool = False) -> EvalBoxes:
    """
    Loads ground truth boxes from DB.
    :param nusc: A NuScenes instance.
    :param eval_split: The evaluation split for which we load GT boxes.
    :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.
    :param verbose: Whether to print messages to stdout.
    :return: The GT boxes.
    """
    # Init.
    if box_cls == DetectionBox:
        attribute_map = {a['token']: a['name'] for a in nusc.attribute}

    if verbose:
        print('Loading annotations for {} split from nuScenes version: {}'.
              format(eval_split, nusc.version))
    # Read out all sample_tokens in DB.
    sample_tokens_all = [s['token'] for s in nusc.sample]
    assert len(sample_tokens_all) > 0, "Error: Database has no samples!"

    # Only keep samples from this split.
    splits = create_splits_scenes()

    # Check compatibility of split with nusc_version.
    version = nusc.version
    if eval_split in {'train', 'val', 'train_detect', 'train_track'}:
        assert version.endswith('trainval'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split in {'mini_train', 'mini_val'}:
        assert version.endswith('mini'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split == 'test':
        assert version.endswith('test'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    else:
        raise ValueError(
            'Error: Requested split {} which this function cannot map to the correct NuScenes version.'
            .format(eval_split))

    if eval_split == 'test':
        # Check that you aren't trying to cheat :).
        assert len(nusc.sample_annotation) > 0, \
            'Error: You are trying to evaluate on the test set but you do not have the annotations!'

    sample_tokens = []
    for sample_token in sample_tokens_all:
        scene_token = nusc.get('sample', sample_token)['scene_token']
        scene_record = nusc.get('scene', scene_token)
        if scene_record['name'] in splits[eval_split]:
            sample_tokens.append(sample_token)

    all_annotations = EvalBoxes()

    # Load annotations and filter predictions and annotations.
    tracking_id_set = set()
    for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):

        sample = nusc.get('sample', sample_token)
        sample_annotation_tokens = sample['anns']

        sample_boxes = []
        for sample_annotation_token in sample_annotation_tokens:

            sample_annotation = nusc.get('sample_annotation',
                                         sample_annotation_token)
            if box_cls == DetectionBox:
                # Get label name in detection task and filter unused labels.
                detection_name = category_to_detection_name(
                    sample_annotation['category_name'])
                if detection_name is None:
                    continue

                # Get attribute_name.
                attr_tokens = sample_annotation['attribute_tokens']
                attr_count = len(attr_tokens)
                if attr_count == 0:
                    attribute_name = ''
                elif attr_count == 1:
                    attribute_name = attribute_map[attr_tokens[0]]
                else:
                    raise Exception(
                        'Error: GT annotations must not have more than one attribute!'
                    )

                sample_boxes.append(
                    box_cls(
                        sample_token=sample_token,
                        translation=sample_annotation['translation'],
                        size=sample_annotation['size'],
                        rotation=sample_annotation['rotation'],
                        velocity=nusc.box_velocity(
                            sample_annotation['token'])[:2],
                        num_pts=sample_annotation['num_lidar_pts'] +
                        sample_annotation['num_radar_pts'],
                        detection_name=detection_name,
                        detection_score=-1.0,  # GT samples do not have a score.
                        attribute_name=attribute_name))
            elif box_cls == TrackingBox:
                # Use nuScenes token as tracking id.
                tracking_id = sample_annotation['instance_token']
                tracking_id_set.add(tracking_id)

                # Get label name in detection task and filter unused labels.
                tracking_name = category_to_tracking_name(
                    sample_annotation['category_name'])
                if tracking_name is None:
                    continue

                sample_boxes.append(
                    box_cls(
                        sample_token=sample_token,
                        translation=sample_annotation['translation'],
                        size=sample_annotation['size'],
                        rotation=sample_annotation['rotation'],
                        velocity=nusc.box_velocity(
                            sample_annotation['token'])[:2],
                        num_pts=sample_annotation['num_lidar_pts'] +
                        sample_annotation['num_radar_pts'],
                        tracking_id=tracking_id,
                        tracking_name=tracking_name,
                        tracking_score=-1.0  # GT samples do not have a score.
                    ))
            else:
                raise NotImplementedError('Error: Invalid box_cls %s!' %
                                          box_cls)

        all_annotations.add_boxes(sample_token, sample_boxes)

    if verbose:
        print("Loaded ground truth annotations for {} samples.".format(
            len(all_annotations.sample_tokens)))

    return all_annotations
Пример #2
0
    def _mock_submission(nusc: NuScenes, split: str) -> Dict[str, dict]:
        """
        Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT
        prediction per sample. Predictions will be permuted randomly along all axes.
        """
        def random_class(category_name: str) -> str:
            # Alter 10% of the valid labels.
            class_names = sorted(DETECTION_NAMES)
            tmp = category_to_detection_name(category_name)
            if tmp is not None and np.random.rand() < .9:
                return tmp
            else:
                return class_names[np.random.randint(0, len(class_names) - 1)]

        def random_attr(name: str) -> str:
            """
            This is the most straight-forward way to generate a random attribute.
            Not currently used b/c we want the test fixture to be back-wards compatible.
            """
            # Get relevant attributes.
            rel_attributes = detection_name_to_rel_attributes(name)

            if len(rel_attributes) == 0:
                # Empty string for classes without attributes.
                return ''
            else:
                # Pick a random attribute otherwise.
                return rel_attributes[np.random.randint(
                    0, len(rel_attributes))]

        mock_meta = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }
        mock_results = {}
        splits = create_splits_scenes()
        val_samples = []
        for sample in nusc.sample:
            if nusc.get('scene',
                        sample['scene_token'])['name'] in splits[split]:
                val_samples.append(sample)

        for sample in tqdm(val_samples, leave=False):
            sample_res = []
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                detection_name = random_class(ann['category_name'])
                sample_res.append({
                    'sample_token':
                    sample['token'],
                    'translation':
                    list(
                        np.array(ann['translation']) + 5 *
                        (np.random.rand(3) - 0.5)),
                    'size':
                    list(
                        np.array(ann['size']) * 2 * (np.random.rand(3) + 0.5)),
                    'rotation':
                    list(
                        np.array(ann['rotation']) +
                        ((np.random.rand(4) - 0.5) * .1)),
                    'velocity':
                    list(
                        nusc.box_velocity(ann_token)[:2] *
                        (np.random.rand(3)[:2] + 0.5)),
                    'detection_name':
                    detection_name,
                    'detection_score':
                    random.random(),
                    'attribute_name':
                    random_attr(detection_name)
                })
            mock_results[sample['token']] = sample_res
        mock_submission = {'meta': mock_meta, 'results': mock_results}
        return mock_submission
Пример #3
0
    def _mock_submission(nusc: NuScenes,
                         split: str,
                         add_errors: bool = False) -> Dict[str, dict]:
        """
        Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT
        prediction per sample. Predictions will be permuted randomly along all axes.
        :param nusc: NuScenes instance.
        :param split: Dataset split to use.
        :param add_errors: Whether to use GT or add errors to it.
        """

        def random_class(category_name: str, _add_errors: bool = False) -> Optional[str]:
            # Alter 10% of the valid labels.
            class_names = sorted(TRACKING_NAMES)
            tmp = category_to_tracking_name(category_name)

            if tmp is None:
                return None
            else:
                if not _add_errors or np.random.rand() < .9:
                    return tmp
                else:
                    return class_names[np.random.randint(0, len(class_names) - 1)]

        def random_id(instance_token: str, _add_errors: bool = False) -> str:
            # Alter 10% of the valid ids to be a random string, which hopefully corresponds to a new track.
            if not _add_errors or np.random.rand() < .9:
                _tracking_id = instance_token + '_pred'
            else:
                _tracking_id = str(np.random.randint(0, sys.maxsize))

            return _tracking_id

        mock_meta = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }
        mock_results = {}

        # Get all samples in the current evaluation split.
        splits = create_splits_scenes()
        val_samples = []
        for sample in nusc.sample:
            if nusc.get('scene', sample['scene_token'])['name'] in splits[split]:
                val_samples.append(sample)

        # Prepare results.
        instance_to_score = dict()
        for sample in tqdm(val_samples, leave=False):
            sample_res = []
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                translation = np.array(ann['translation'])
                size = np.array(ann['size'])
                rotation = np.array(ann['rotation'])
                velocity = nusc.box_velocity(ann_token)[:2]
                tracking_id = random_id(ann['instance_token'], _add_errors=add_errors)
                tracking_name = random_class(ann['category_name'], _add_errors=add_errors)

                # Skip annotations for classes not part of the detection challenge.
                if tracking_name is None:
                    continue

                # Skip annotations with 0 lidar/radar points.
                num_pts = ann['num_lidar_pts'] + ann['num_radar_pts']
                if num_pts == 0:
                    continue

                # If we randomly assign a score in [0, 1] to each box and later average over the boxes in the track,
                # the average score will be around 0.5 and we will have 0 predictions above that.
                # Therefore we assign the same scores to each box in a track.
                if ann['instance_token'] not in instance_to_score:
                    instance_to_score[ann['instance_token']] = random.random()
                tracking_score = instance_to_score[ann['instance_token']]
                tracking_score = np.clip(tracking_score + random.random() * 0.3, 0, 1)

                if add_errors:
                    translation += 4 * (np.random.rand(3) - 0.5)
                    size *= (np.random.rand(3) + 0.5)
                    rotation += (np.random.rand(4) - 0.5) * .1
                    velocity *= np.random.rand(3)[:2] + 0.5

                sample_res.append({
                        'sample_token': sample['token'],
                        'translation': list(translation),
                        'size': list(size),
                        'rotation': list(rotation),
                        'velocity': list(velocity),
                        'tracking_id': tracking_id,
                        'tracking_name': tracking_name,
                        'tracking_score': tracking_score
                    })
            mock_results[sample['token']] = sample_res
        mock_submission = {
            'meta': mock_meta,
            'results': mock_results
        }
        return mock_submission
Пример #4
0
def load_merge_from_pkl(nusc: NuScenes,
                        pkl_path: str,
                        box_cls,
                        verbose: bool = False) -> EvalBoxes:
    # Init.
    if box_cls == DetectionBox:
        attribute_map = {a['token']: a['name'] for a in nusc.attribute}

    if verbose:
        print('Loading annotations for {} split from nuScenes version: {}'.
              format(pkl_path, nusc.version))

    import mmcv
    infos = mmcv.load(pkl_path)['infos']
    samples = []
    for info in infos:
        samples.append(nusc.get('sample', info['token']))
    all_annotations = EvalBoxes()

    # Load annotations and filter predictions and annotations.
    merge_map = dict(car='vehicle',
                     truck='vehicle',
                     bus='vehicle',
                     trailer='vehicle',
                     construction_vehicle='vehicle',
                     pedestrian='pedestrian',
                     motorcycle='bike',
                     bicycle='bike',
                     traffic_cone='traffic_boundary',
                     barrier='traffic_boundary')
    for sample in tqdm.tqdm(samples, leave=verbose):
        sample_token = sample['token']
        cam_token = sample['data']['CAM_FRONT']
        _, boxes_cam, _ = nusc.get_sample_data(cam_token)
        sample_annotation_tokens = [box.token for box in boxes_cam]

        # sample = nusc.get('sample', sample_token)
        # sample_annotation_tokens = sample['anns']

        sample_boxes = []
        for sample_annotation_token in sample_annotation_tokens:

            sample_annotation = nusc.get('sample_annotation',
                                         sample_annotation_token)
            if box_cls == DetectionBox:
                # Get label name in detection task and filter unused labels.
                detection_name = category_to_detection_name(
                    sample_annotation['category_name'])
                if detection_name is None:
                    continue
                detection_name = merge_map[detection_name]

                # Get attribute_name.
                attr_tokens = sample_annotation['attribute_tokens']
                attr_count = len(attr_tokens)
                if attr_count == 0:
                    attribute_name = ''
                elif attr_count == 1:
                    attribute_name = attribute_map[attr_tokens[0]]
                else:
                    raise Exception(
                        'Error: GT annotations must not have more than one attribute!'
                    )

                sample_boxes.append(
                    box_cls(
                        sample_token=sample_token,
                        translation=sample_annotation['translation'],
                        size=sample_annotation['size'],
                        rotation=sample_annotation['rotation'],
                        velocity=nusc.box_velocity(
                            sample_annotation['token'])[:2],
                        num_pts=sample_annotation['num_lidar_pts'] +
                        sample_annotation['num_radar_pts'],
                        detection_name=detection_name,
                        detection_score=-1.0,  # GT samples do not have a score.
                        attribute_name=attribute_name))
            else:
                raise NotImplementedError('Error: Invalid box_cls %s!' %
                                          box_cls)

        all_annotations.add_boxes(sample_token, sample_boxes)

    if verbose:
        print("Loaded ground truth annotations for {} samples.".format(
            len(all_annotations.sample_tokens)))

    return all_annotations