Пример #1
0
 def __init__(self, imgset, classwise, ann_file, img_prefix, *args,
              **kwargs):
     self.imgset = imgset
     if classwise:
         HRSCDataset.CLASSES = bt.get_classes('hrsc_cls')
     else:
         HRSCDataset.CLASSES = bt.get_classes('hrsc')
     super(HRSCDataset, self).__init__(*args,
                                       ann_file=ann_file,
                                       img_prefix=img_prefix,
                                       **kwargs)
Пример #2
0
    def get_classes(cls, classes=None):
        if classes is None:
            cls.custom_classes = False
            return cls.CLASSES

        cls.custom_classes = True
        return bt.get_classes(classes)
Пример #3
0
def main():
    args = parse_args()

    print(f'{args.load_type} loading!')
    load_func = getattr(bt.datasets, 'load_' + args.load_type)
    contents, classes = load_func(img_dir=args.img_dir,
                                  ann_dir=args.ann_dir,
                                  classes=args.classes,
                                  nproc=args.load_nproc)
    if args.prior_annfile is not None:
        prior, _ = bt.load_pkl(args.prior_annfile, classes=classes)
        bt.merge_prior_contents(contents, prior, merge_type=args.merge_type)

    shown_names = classes if args.shown_names is None \
            else bt.get_classes(args.shown_names)
    assert len(shown_names) == len(classes)

    if isinstance(args.ids, (list, type(None))):
        ids = args.ids
    elif isinstance(args.ids, str):
        if osp.isfile(args.ids):
            with open(args.ids, 'r') as f:
                ids = [l.strip() for l in f]
        else:
            ids = args.ids.split('|')
    else:
        raise TypeError('Wrong base_json input in `ids`')

    tasks, max_label = [], 0
    for content in contents:
        if ids is not None and content['id'] not in ids:
            pass

        imgpath = osp.join(args.img_dir, content['filename'])
        out_file = osp.join(args.save_dir, content['filename']) \
                if args.save_dir else None
        if 'ann' in content:
            ann = content['ann']
            bboxes = ann['bboxes']
            labels = ann['labels']
            scores = ann.get('scores', None)
        else:
            bboxes = np.zeros((0, 4), dtype=np.float)
            labels = np.zeros((0, ), dtype=np.int)
            scores = None

        if (scores is not None) and (args.score_thr > 0):
            bboxes = bboxes[scores > args.score_thr]
            labels = labels[scores > args.score_thr]
            scores = scores[scores > args.score_thr]

        if args.skip_empty and bboxes.size == 0:
            continue

        if labels.size > 0:
            max_label = max(max_label, labels.max())
        tasks.append((imgpath, out_file, bboxes, labels, scores))

    if args.colors == 'random':
        args.colors = bt.random_colors(max_label + 1)

    if args.random_vis:
        shuffle(tasks)

    if args.save_dir and (not osp.exists(args.save_dir)):
        os.makedirs(args.save_dir)

    if args.show_off:
        plt.switch_backend('Agg')

    manager = Manager()
    _vis_func = partial(single_vis,
                        btype=args.shown_btype,
                        class_names=shown_names,
                        colors=args.colors,
                        thickness=args.thickness,
                        text_off=args.text_off,
                        font_size=args.font_size,
                        show_off=args.show_off,
                        wait_time=args.wait_time,
                        lock=manager.Lock(),
                        prog=manager.Value('i', 0),
                        total=len(tasks))
    if args.show_off and args.vis_nproc > 1:
        pool = Pool(args.vis_nproc)
        pool.map(_vis_func, tasks)
        pool.close()
    else:
        list(map(_vis_func, tasks))

    if args.save_dir:
        arg_dict = vars(args)
        arg_dict.pop('base_json', None)
        with open(osp.join(args.save_dir, 'vis_config.json'), 'w') as f:
            json.dump(arg_dict, f, indent=4)
Пример #4
0
class DIORDataset(CustomDataset):

    CLASSES = bt.get_classes('dior')

    def __init__(self,
                 xmltype,
                 imgset,
                 ann_file,
                 img_prefix,
                 *args,
                 **kwargs):
        assert xmltype in ['hbb', 'obb']
        self.xmltype = xmltype
        self.imgset = imgset
        super(DIORDataset, self).__init__(*args,
                                          ann_file=ann_file,
                                          img_prefix=img_prefix,
                                          **kwargs)

    @classmethod
    def get_classes(cls, classes=None):
        if classes is None:
            cls.custom_classes = False
            return cls.CLASSES

        cls.custom_classes = True
        return bt.get_classes(classes)

    def load_annotations(self, ann_file):
        contents, _ = bt.load_dior(
            img_dir=self.img_prefix,
            ann_dir=ann_file,
            classes=self.CLASSES,
            xmltype=self.xmltype)
        if self.imgset is not None:
            contents = bt.split_imgset(contents, self.imgset)
        return contents

    def pre_pipeline(self, results):
        results['cls'] = self.CLASSES
        super().pre_pipeline(results)

    def format_results(self, results, save_dir=None, **kwargs):
        assert len(results) == len(self.data_infos)
        contents = []
        for result, data_info in zip(results, self.data_infos):
            info = copy.deepcopy(data_info)
            info.pop('ann')

            ann, bboxes, labels, scores = dict(), list(), list(), list()
            for i, dets in enumerate(result):
                bboxes.append(dets[:, :-1])
                scores.append(dets[:, -1])
                labels.append(np.zeros((dets.shape[0], ), dtype=np.int) + i)
            ann['bboxes'] = np.concatenate(bboxes, axis=0)
            ann['labels'] = np.concatenate(labels, axis=0)
            ann['scores'] = np.concatenate(scores, axis=0)
            info['ann'] = ann
            contents.append(info)

        if save_dir is not None:
            bt.save_pkl(save_dir, contents, self.CLASSES)
        return contents

    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 iou_thr=0.5,
                 scale_ranges=None,
                 use_07_metric=True,
                 proposal_nums=(100, 300, 1000)):

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            mean_ap, _ = eval_arb_map(
                results,
                annotations,
                scale_ranges=scale_ranges,
                iou_thr=iou_thr,
                use_07_metric=use_07_metric,
                dataset=self.CLASSES,
                logger=logger)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_arb_recalls(
                gt_bboxes, results, True, proposal_nums, iou_thr, logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results
Пример #5
0
def print_map_summary(mean_ap,
                      results,
                      dataset=None,
                      scale_ranges=None,
                      logger=None):
    """Print mAP and results of each class.

    A table will be printed to show the gts/dets/recall/AP of each class and
    the mAP.

    Args:
        mean_ap (float): Calculated from `eval_map()`.
        results (list[dict]): Calculated from `eval_map()`.
        dataset (list[str] | str | None): Dataset name or dataset classes.
        scale_ranges (list[tuple] | None): Range of scales to be evaluated.
        logger (logging.Logger | str | None): The way to print the mAP
            summary. See `mmdet.utils.print_log()` for details. Default: None.
    """

    if logger == 'silent':
        return

    if isinstance(results[0]['ap'], np.ndarray):
        num_scales = len(results[0]['ap'])
    else:
        num_scales = 1

    if scale_ranges is not None:
        assert len(scale_ranges) == num_scales

    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result['recall'].size > 0:
            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
        aps[:, i] = cls_result['ap']
        num_gts[:, i] = cls_result['num_gts']

    if dataset is None:
        label_names = [str(i) for i in range(num_classes)]
    elif mmcv.is_str(dataset):
        label_names = bt.get_classes(dataset)
    else:
        label_names = dataset

    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]

    header = ['class', 'gts', 'dets', 'recall', 'ap']
    for i in range(num_scales):
        if scale_ranges is not None:
            print_log(f'Scale range {scale_ranges[i]}', logger=logger)
        table_data = [header]
        for j in range(num_classes):
            row_data = [
                label_names[j], num_gts[i, j], results[j]['num_dets'],
                f'{recalls[i, j]:.4f}', f'{aps[i, j]:.4f}'
            ]
            table_data.append(row_data)
        table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.4f}'])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print_log('\n' + table.table, logger=logger)
Пример #6
0
class RCTW17Dataset(CustomDataset):

    CLASSES = bt.get_classes('RCTW-17')

    def __init__(self, ann_file, img_prefix, imgset=None, *args, **kwargs):
        self.imgset = imgset
        super(RCTW17Dataset, self).__init__(*args,
                                            ann_file=ann_file,
                                            img_prefix=img_prefix,
                                            **kwargs)

    @classmethod
    def get_classes(cls, classes=None):
        if classes is None:
            cls.custom_classes = False
            return cls.CLASSES

        cls.custom_classes = True
        return bt.get_classes(classes)

    def load_annotations(self, ann_file):
        contents, _ = bt.load_rctw_17(img_dir=self.img_prefix,
                                      ann_dir=ann_file,
                                      classes=self.CLASSES)
        if self.imgset is not None:
            contents = bt.split_imgset(contents, self.imgset)
        return contents

    def pre_pipeline(self, results):
        results['cls'] = self.CLASSES
        super().pre_pipeline(results)

    def format_results(self, results, save_dir=None, **kwargs):
        assert len(results) == len(self.data_infos)
        id_list = [info['id'] for info in self.data_infos]
        if save_dir is not None:
            bt.save_rctw_17(save_dir, id_list, results)
        return results

    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 iou_thr=0.5,
                 scale_ranges=None,
                 use_07_metric=True,
                 proposal_nums=(100, 300, 1000)):

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            mean_ap, _ = eval_arb_map(results,
                                      annotations,
                                      scale_ranges=scale_ranges,
                                      iou_thr=iou_thr,
                                      use_07_metric=use_07_metric,
                                      dataset=self.CLASSES,
                                      logger=logger)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_arb_recalls(gt_bboxes,
                                       results,
                                       True,
                                       proposal_nums,
                                       iou_thr,
                                       logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results