예제 #1
0
    def lyft_eval(self, det_annos, class_names, iou_thresholds=[0.5]):
        from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft
        from . import lyft_utils
        # from lyft_dataset_sdk.eval.detection.mAP_evaluation import get_average_precisions
        from .lyft_mAP_eval.lyft_eval import get_average_precisions

        lyft = Lyft(json_path=self.root_path / 'data',
                    data_path=self.root_path,
                    verbose=True)

        det_lyft_boxes, sample_tokens = lyft_utils.convert_det_to_lyft_format(
            lyft, det_annos)
        gt_lyft_boxes = lyft_utils.load_lyft_gt_by_tokens(lyft, sample_tokens)

        average_precisions = get_average_precisions(gt_lyft_boxes,
                                                    det_lyft_boxes,
                                                    class_names,
                                                    iou_thresholds)

        ap_result_str, ap_dict = lyft_utils.format_lyft_results(
            average_precisions,
            class_names,
            iou_thresholds,
            version=self.dataset_cfg.VERSION)

        return ap_result_str, ap_dict
예제 #2
0
def export_2d_annotation(root_path, info_path, version):
    """Export 2d annotation from the info file and raw data.

    Args:
        root_path (str): Root path of the raw data.
        info_path (str): Path of the info file.
        version (str): Dataset version.
    """
    warning.warn('DeprecationWarning: 2D annotations are not used on the '
                 'Lyft dataset. The function export_2d_annotation will be '
                 'deprecated.')
    # get bbox annotations for camera
    camera_types = [
        'CAM_FRONT',
        'CAM_FRONT_RIGHT',
        'CAM_FRONT_LEFT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT',
    ]
    lyft_infos = mmcv.load(info_path)['infos']
    lyft = Lyft(data_path=osp.join(root_path, version),
                json_path=osp.join(root_path, version, version),
                verbose=True)
    # info_2d_list = []
    cat2Ids = [
        dict(id=lyft_categories.index(cat_name), name=cat_name)
        for cat_name in lyft_categories
    ]
    coco_ann_id = 0
    coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
    for info in mmcv.track_iter_progress(lyft_infos):
        for cam in camera_types:
            cam_info = info['cams'][cam]
            coco_infos = get_2d_boxes(lyft,
                                      cam_info['sample_data_token'],
                                      visibilities=['', '1', '2', '3', '4'])
            (height, width, _) = mmcv.imread(cam_info['data_path']).shape
            coco_2d_dict['images'].append(
                dict(file_name=cam_info['data_path'],
                     id=cam_info['sample_data_token'],
                     width=width,
                     height=height))
            for coco_info in coco_infos:
                if coco_info is None:
                    continue
                # add an empty key for coco format
                coco_info['segmentation'] = []
                coco_info['id'] = coco_ann_id
                coco_2d_dict['annotations'].append(coco_info)
                coco_ann_id += 1
    mmcv.dump(coco_2d_dict, f'{info_path[:-4]}.coco.json')
예제 #3
0
    def __init__(self, dataset_path, out_path, version='v1.01-train'):

        assert version in ('v1.01-train', 'v1.01-test', 'sample')
        self.is_test = 'test' in version
        self.out_path = out_path

        self.lyft = Lyft(data_path=join(dataset_path, version),
                         json_path=join(dataset_path, version, 'data'),
                         verbose=True)

        if version == 'v1.01-train':
            train_scenes = open(
                join(dirname(__file__),
                     '../ml3d/datasets/_resources/lyft/train.txt'),
                'r').read().split('\n')
            val_scenes = open(
                join(dirname(__file__),
                     '../ml3d/datasets/_resources/lyft/val.txt'),
                'r').read().split('\n')
        elif version == 'v1.01-test':
            train_scenes = open(
                join(dirname(__file__),
                     '../ml3d/datasets/_resources/lyft/test.txt'),
                'r').read().split('\n')
            val_scenes = []
        else:
            raise ValueError('unknown')

        self.version = version
        self.mapping = self.get_mapping()

        available_scenes = self.get_available_scenes()
        names = [sc['name'] for sc in available_scenes]
        train_scenes = list(filter(lambda x: x in names, train_scenes))
        val_scenes = list(filter(lambda x: x in names, val_scenes))

        train_scenes = set(
            [available_scenes[names.index(s)]['token'] for s in train_scenes])
        val_scenes = set(
            [available_scenes[names.index(s)]['token'] for s in val_scenes])

        if not self.is_test:
            print(
                f"train_scenes : {len(train_scenes)}, val_scenes : {len(val_scenes)}"
            )
        else:
            print(f"test scenes : {len(train_scenes)}")

        self.train_scenes = train_scenes
        self.val_scenes = val_scenes
예제 #4
0
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric='bbox',
                         result_name='pts_bbox'):
        """Evaluation for a single model in Lyft protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """

        output_dir = osp.join(*osp.split(result_path)[:-1])
        lyft = Lyft(data_path=osp.join(self.data_root, self.version),
                    json_path=osp.join(self.data_root, self.version,
                                       self.version),
                    verbose=True)
        eval_set_map = {
            'v1.01-train': 'val',
        }
        metrics = lyft_eval(lyft, self.data_root, result_path,
                            eval_set_map[self.version], output_dir, logger)

        # record metrics
        detail = dict()
        metric_prefix = f'{result_name}_Lyft'

        for i, name in enumerate(metrics['class_names']):
            AP = float(metrics['mAPs_cate'][i])
            detail[f'{metric_prefix}/{name}_AP'] = AP

        detail[f'{metric_prefix}/mAP'] = metrics['Final mAP']
        return detail
예제 #5
0
def create_lyft_infos(root_path,
                      info_prefix,
                      version='v1.01-train',
                      max_sweeps=10):
    """Create info file of lyft dataset.

    Given the raw data, generate its related info file in pkl format.

    Args:
        root_path (str): Path of the data root.
        info_prefix (str): Prefix of the info file to be generated.
        version (str): Version of the data.
            Default: 'v1.01-train'
        max_sweeps (int): Max number of sweeps.
            Default: 10
    """
    lyft = Lyft(data_path=osp.join(root_path, version),
                json_path=osp.join(root_path, version, version),
                verbose=True)
    available_vers = ['v1.01-train', 'v1.01-test']
    assert version in available_vers
    if version == 'v1.01-train':
        train_scenes = mmcv.list_from_file('data/lyft/train.txt')
        val_scenes = mmcv.list_from_file('data/lyft/val.txt')
    elif version == 'v1.01-test':
        train_scenes = mmcv.list_from_file('data/lyft/test.txt')
        val_scenes = []
    else:
        raise ValueError('unknown')

    # filter existing scenes.
    available_scenes = get_available_scenes(lyft)
    available_scene_names = [s['name'] for s in available_scenes]
    train_scenes = list(
        filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in train_scenes
    ])
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]['token']
        for s in val_scenes
    ])

    test = 'test' in version
    if test:
        print(f'test scene: {len(train_scenes)}')
    else:
        print(f'train scene: {len(train_scenes)}, \
                val scene: {len(val_scenes)}')
    train_lyft_infos, val_lyft_infos = _fill_trainval_infos(
        lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps)

    metadata = dict(version=version)
    if test:
        print(f'test sample: {len(train_lyft_infos)}')
        data = dict(infos=train_lyft_infos, metadata=metadata)
        info_name = f'{info_prefix}_infos_test'
        info_path = osp.join(root_path, f'{info_name}.pkl')
        mmcv.dump(data, info_path)
    else:
        print(f'train sample: {len(train_lyft_infos)}, \
                val sample: {len(val_lyft_infos)}')
        data = dict(infos=train_lyft_infos, metadata=metadata)
        train_info_name = f'{info_prefix}_infos_train'
        info_path = osp.join(root_path, f'{train_info_name}.pkl')
        mmcv.dump(data, info_path)
        data['infos'] = val_lyft_infos
        val_info_name = f'{info_prefix}_infos_val'
        info_val_path = osp.join(root_path, f'{val_info_name}.pkl')
        mmcv.dump(data, info_val_path)