def main(): args = parse_args() print('Loading ground truth and results information') gt_load_func = getattr(bt.datasets, 'load_' + args.gt_type) res_load_func = getattr(bt.datasets, 'load_' + args.res_type) gt_infos, gt_cls = gt_load_func(img_dir=args.img_dir, ann_dir=args.gt_ann_dir, classes=args.classes, nproc=args.nproc) res_infos, res_cls = res_load_func(img_dir=args.img_dir, ann_dir=args.res_ann_dir, classes=args.classes, nproc=args.nproc) bt.change_cls_order(res_infos, res_cls, gt_cls) print('Parsing ground truth and results information') id_mapper = {info['id']: i for i, info in enumerate(res_infos)} gts, res = [], [] for gt_info in gt_infos: img_id = gt_info['id'] res_info = res_infos[id_mapper[img_id]] assert 'scores' in res_info['ann'], \ "f{args.res_type} don't have scores information" res_bboxes = res_info['ann']['bboxes'] res_labels = res_info['ann']['labels'] res_scores = res_info['ann']['scores'] res_dets = np.concatenate([res_bboxes, res_scores[..., None]], axis=1) res_dets = [res_dets[res_labels == i] for i in range(len(gt_cls))] res.append(res_dets) gt_bboxes = gt_info['ann']['bboxes'] gt_labels = gt_info['ann']['labels'] diffs = gt_info['ann'].get('diffs', np.zeros(gt_bboxes.shape[0], dtype=np.int)) gt_ann = {} if args.ign_diff > 0: gt_ann['bboxes_ignore'] = gt_bboxes[diffs == 1] gt_ann['labels_ignore'] = gt_labels[diffs == 1] gt_bboxes = gt_bboxes[diffs == 0] gt_labels = gt_labels[diffs == 0] gt_ann['bboxes'] = gt_bboxes gt_ann['labels'] = gt_labels gts.append(gt_ann) print('Starting calculating mAP') bt.eval_map(res, gts, iou_thr=args.iou_thr, use_07_metric=args.voc_metric == '07', nproc=args.nproc, dataset=gt_cls)
def get_subset_by_classes(self): bt.change_cls_order(self.data_infos, self.ori_CLASSES, self.CLASSES) return self.data_infos