예제 #1
0
def kitti_eval(det_results, dataset, iou_thr=0.5):
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
        # if i>10:
        # 	break
    if not gt_ignore:
        gt_ignore = None

    dataset_name = 'kitti'

    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=gt_ignore,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset_name,
             print_summary=True)
예제 #2
0
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []

    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        #if 'bboxes_ignore' in ann:
        #ignore = np.concatenate([
        #    np.zeros(bboxes.shape[0], dtype=np.bool),
        #    np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
        #])
        #gt_ignore.append(ignore)
        #bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
        #print(ann)
        #labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)

    if not gt_ignore:
        gt_ignore = None

    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=gt_ignore,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset.CLASSES,
             print_summary=True)
예제 #3
0
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            print("!")
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=gt_ignore,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset_name,
             print_summary=True)
예제 #4
0
def voc_eval(result_file, dataset, iou_thr=0.5, nproc=4):
    det_results = mmcv.load(result_file)
    annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES

    eval_map(det_results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name)
예제 #5
0
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = gt_ignore
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    if iou_thr == -1:
        print("Start COCO-style evaluation")
        mean_ap_list = {}
        for iou_thr in np.arange(0.5, 1.0, 0.05):
            mean_ap, eval_results = eval_map(det_results,
                                             gt_bboxes,
                                             gt_labels,
                                             gt_ignore=gt_ignore,
                                             scale_ranges=None,
                                             iou_thr=iou_thr,
                                             dataset=dataset_name,
                                             print_summary=False)
            mean_ap_list[iou_thr] = mean_ap
        mean_ap = np.mean(list(mean_ap_list.values()))
        mean_ap_list[-1] = mean_ap
        print("Path: {} mAP: {}".format(result_file, mean_ap_list))
        return mean_ap_list
    else:
        mean_ap, eval_results = eval_map(det_results,
                                         gt_bboxes,
                                         gt_labels,
                                         gt_ignore=gt_ignore,
                                         scale_ranges=None,
                                         iou_thr=iou_thr,
                                         dataset=dataset_name,
                                         print_summary=True)
        print("mAP: {}".format({iou_thr: mean_ap}))
        return {iou_thr: mean_ap}
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = gt_ignore
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True)
    # added by WSK
    # this is added to compute the C0C0-style AP
    iou_thr_list = [0.5, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95]
    mean_ap_list = []
    ap_COCO = 0
    for thr in iou_thr_list:
        mean_ap, eval_results = eval_map(
            det_results,
            gt_bboxes,
            gt_labels,
            gt_ignore=gt_ignore,
            scale_ranges=None,
            iou_thr=thr,
            dataset=dataset_name,
            print_summary=True)
        ap_COCO = ap_COCO + mean_ap*0.1
        mean_ap_list.append(mean_ap)
    print('the ap_COCO is ', ap_COCO)
    print('the mean_ap_list is ', mean_ap_list)
예제 #7
0
def voc_eval(result_file, dataset, cfg, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        #print(ann.keys())
        print(ann['filename'])
        filename = ann['filename']
        bboxes = ann['bboxes']
        labels = ann['labels']
        #print("bboxes----")
        #print(bboxes)
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)

        # resize back
        for class_id in range(len(det_results[i])):
            det_bboxes = det_results[i][class_id]
            det_bboxes[:, 0] = det_bboxes[:, 0] / cfg.input_size * ann['width']
            det_bboxes[:,
                       1] = det_bboxes[:, 1] / cfg.input_size * ann['height']
            det_bboxes[:, 2] = det_bboxes[:, 2] / cfg.input_size * ann['width']
            det_bboxes[:,
                       3] = det_bboxes[:, 3] / cfg.input_size * ann['height']
            det_results[i][class_id] = det_bboxes

    if not gt_ignore:
        gt_ignore = gt_ignore

    gt_ignore = None
    #print(len(gt_ignore))
    print("*************************************")
    #print(det_results)
    dataset_name = 'aicourt'
    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=gt_ignore,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset_name,
             print_summary=True)
예제 #8
0
def eval(result_file, dataset, iou_thr=0.5, dataset_name='voc', split='16_4'):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None

    mean_ap, eval_results = eval_map(det_results,
                                     gt_bboxes,
                                     gt_labels,
                                     gt_ignore=gt_ignore,
                                     scale_ranges=None,
                                     iou_thr=iou_thr,
                                     dataset=dataset.CLASSES,
                                     dataset_name=dataset_name,
                                     print_summary=True,
                                     split=split)

    return mean_ap, eval_results
예제 #9
0
 def evaluate(self,
              results,
              metric='mAP',
              logger=None,
              iou_thr=0.5,
              nproc=4,
              **kwargs):
     if not isinstance(metric, str):
         assert len(metric) == 1
         metric = metric[0]
     if 'logger' in kwargs.keys():
         logger = kwargs['logger']
     ds_name = self.CLASSES
     allowed_metrics = ['mAP']
     if metric not in allowed_metrics:
         raise KeyError('metric {} is not supported'.format(metric))
     annotations = [self.get_ann_info(i) for i in range(len(self))]
     eval_results = {}
     assert isinstance(iou_thr, float)
     mean_ap, _ = eval_map(results,
                           annotations,
                           scale_ranges=None,
                           iou_thr=iou_thr,
                           dataset=ds_name,
                           logger=logger)
     eval_results['mAP'] = mean_ap
     return eval_results
def voc_eval_with_return(result_file,
                         dataset,
                         iou_thr=0.5,
                         logger='print',
                         only_ap=True):
    det_results = mmcv.load(result_file)
    annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    mean_ap, eval_results = eval_map(
        det_results,
        annotations,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        logger=logger)

    if only_ap:
        eval_results = [{
            'ap': eval_results[i]['ap']
        } for i in range(len(eval_results))]

    return mean_ap, eval_results
예제 #11
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = [
            'mAP',
        ]
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')

        annotations = [self.get_ann_info(i) for i in range(len(self))]

        eval_results = {}

        if metric == 'mAP':
            assert isinstance(iou_thr, float)

            for stage in range(3):

                if stage == 0:
                    ds_name = self.dataset.CLASSES_1
                    label = 'category'
                else:
                    ds_name = self.dataset.CLASSES_2
                    label = 'pests'

                stage_annotations = []
                for anno in annotations:
                    anno['labels'] = anno[label]
                    stage_annotations.append(anno)
                '''
                result (temp) : [[det_results_1], [det_results_2], [det_results_3]]
                det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
                    The outer list indicates images, and the inner list indicates
                    per-class detected bboxes.
                annotations (list[dict]): Ground truth annotations where each item of
                    the list indicates an image. Keys of annotations are:

                - `bboxes`: numpy array of shape (n, 4)
                - `labels`: numpy array of shape (n, )
                - `bboxes_ignore` (optional): numpy array of shape (k, 4)
                - `labels_ignore` (optional): numpy array of shape (k, )
        
                '''
                print('eval stage %d mAP from outputs' % (stage + 1))
                mean_ap, _ = eval_map(results[stage],
                                      stage_annotations,
                                      scale_ranges=None,
                                      iou_thr=iou_thr,
                                      dataset=ds_name,
                                      logger=logger)
                eval_results['mAP'] = mean_ap

        return eval_results
예제 #12
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. Default: 0.5.
            scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
                Default: None.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = OrderedDict()
        iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
        if metric == 'mAP':
            assert isinstance(iou_thrs, list)
            mean_aps = []
            for iou_thr in iou_thrs:
                print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
                mean_ap, _ = eval_map(results,
                                      annotations,
                                      scale_ranges=scale_ranges,
                                      iou_thr=iou_thr,
                                      dataset=self.CLASSES,
                                      logger=logger)
                mean_aps.append(mean_ap)
                eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
            eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            recalls = eval_recalls(gt_bboxes,
                                   results,
                                   proposal_nums,
                                   iou_thr,
                                   logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thrs):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results
예제 #13
0
    def evaluate(self,
                 results,
                 metric=None,
                 iou_thr=(0.25, 0.5),
                 iou_thr_2d=(0.5, ),
                 logger=None,
                 show=False,
                 out_dir=None):

        # evaluate 3D detection performance
        if isinstance(results[0], dict):
            return super().evaluate(results, metric, iou_thr, logger, show,
                                    out_dir)
        # evaluate 2D detection performance
        else:
            eval_results = OrderedDict()
            annotations = [self.get_ann_info(i) for i in range(len(self))]
            iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
                                                    float) else iou_thr_2d
            for iou_thr_2d_single in iou_thr_2d:
                mean_ap, _ = eval_map(results,
                                      annotations,
                                      scale_ranges=None,
                                      iou_thr=iou_thr_2d_single,
                                      dataset=self.CLASSES,
                                      logger=logger)
                eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
            return eval_results
예제 #14
0
파일: custom.py 프로젝트: zyg11/TSD
    def evaluate(
        self,
        results,
        metric="mAP",
        logger=None,
        proposal_nums=(100, 300, 1000),
        iou_thr=0.5,
        scale_ranges=None,
    ):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
                Default: None.
        """
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ["mAP", "recall"]
        if metric not in allowed_metrics:
            raise KeyError("metric {} is not supported".format(metric))
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == "mAP":
            assert isinstance(iou_thr, float)
            mean_ap, _ = eval_map(
                results,
                annotations,
                scale_ranges=scale_ranges,
                iou_thr=iou_thr,
                dataset=self.CLASSES,
                logger=logger,
            )
            eval_results["mAP"] = mean_ap
        elif metric == "recall":
            gt_bboxes = [ann["bboxes"] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_recalls(
                gt_bboxes, results, proposal_nums, iou_thr, logger=logger
            )
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results["recall@{}@{}".format(num, iou)] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results["AR@{}".format(num)] = ar[i]
        return eval_results
예제 #15
0
def get_iou_recall_curve(cfg_path, pth_path, out_path):
    cfg = mmcv.Config.fromfile(cfg_path)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.

    distributed = False

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = init_detector(cfg_path, pth_path, device='cuda:0')

    results = [None for _ in range(len(dataset))]
    print(dataset)
    for idx in range(0, len(dataset), 1):
        data = dataset[idx]['img'][0].numpy()
        data = data.transpose((1, 2, 0))

        # compute output
        result = inference_detector(model, data)
        results[idx] = result

    annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
    out_list = []
    for thr in range(0, 21):
        mean_ap, eval_results = eval_map(
                                        results,
                                        annotations,
                                        scale_ranges=None,
                                        iou_thr=thr/20,
                                        dataset=None,
                                        logger=None,
                                        nproc=1)

        out_list.append([thr/20, eval_results[0]['ap'][-1], eval_results[1]['ap'][-1],mean_ap])
    print(out_list)
    f = open(out_path, 'w')
    for ln in out_list:
        f.write(str(ln[0]))
        f.write(',')
        f.write(str(ln[1]))
        f.write(',')
        f.write(str(ln[2]))
        f.write('\n')
    f.close()
예제 #16
0
파일: vid_eval.py 프로젝트: youthHan/HVRNet
def vid_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = ('airplane', 'antelope', 'bear', 'bicycle', 'bird',
                        'bus', 'car', 'cattle', 'dog', 'domestic_cat',
                        'elephant', 'fox', 'giant_panda', 'hamster', 'horse',
                        'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
                        'red_panda', 'sheep', 'snake', 'squirrel', 'tiger',
                        'train', 'turtle', 'watercraft', 'whale', 'zebra')
        # dataset_name = dataset.CLASSES
    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=gt_ignore,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset_name,
             print_summary=True)
예제 #17
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError('metric {} is not supported'.format(metric))
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            if self.year == 2007:
                ds_name = 'voc07'
            else:

                #ds_name = self.dataset.CLASSES
                ds_name = (
                    'Benign',
                    'Malignant',
                )
            mean_ap, _ = eval_map(results,
                                  annotations,
                                  scale_ranges=None,
                                  iou_thr=iou_thr,
                                  dataset=ds_name,
                                  logger=logger)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_recalls(gt_bboxes,
                                   results,
                                   proposal_nums,
                                   iou_thr,
                                   logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results['recall@{}@{}'.format(num, iou)] = recalls[i,
                                                                            j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results['AR@{}'.format(num)] = ar[i]
        return eval_results
예제 #18
0
def chips_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    gt_masks = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        masks = ann['masks']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            # masks = np.vstack([bboxes, ann['masks_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
        gt_masks.append(masks)
    if not gt_ignore:
        gt_ignore = None
    print(len(det_results[1][1]))
    print(det_results[1][1])
    dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        # gt_masks,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True)
예제 #19
0
파일: voc.py 프로젝트: zyg11/TSD
 def evaluate(
         self,
         results,
         metric="mAP",
         logger=None,
         proposal_nums=(100, 300, 1000),
         iou_thr=0.5,
         scale_ranges=None,
 ):
     if not isinstance(metric, str):
         assert len(metric) == 1
         metric = metric[0]
     allowed_metrics = ["mAP", "recall"]
     if metric not in allowed_metrics:
         raise KeyError("metric {} is not supported".format(metric))
     annotations = [self.get_ann_info(i) for i in range(len(self))]
     eval_results = {}
     if metric == "mAP":
         assert isinstance(iou_thr, float)
         if self.year == 2007:
             ds_name = "voc07"
         else:
             ds_name = self.dataset.CLASSES
         mean_ap, _ = eval_map(
             results,
             annotations,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=ds_name,
             logger=logger,
         )
         eval_results["mAP"] = mean_ap
     elif metric == "recall":
         gt_bboxes = [ann["bboxes"] for ann in annotations]
         if isinstance(iou_thr, float):
             iou_thr = [iou_thr]
         recalls = eval_recalls(gt_bboxes,
                                results,
                                proposal_nums,
                                iou_thr,
                                logger=logger)
         for i, num in enumerate(proposal_nums):
             for j, iou in enumerate(iou_thr):
                 eval_results["recall@{}@{}".format(num, iou)] = recalls[i,
                                                                         j]
         if recalls.shape[1] > 1:
             ar = recalls.mean(axis=1)
             for i, num in enumerate(proposal_nums):
                 eval_results["AR@{}".format(num)] = ar[i]
     return eval_results
예제 #20
0
def txt_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        # for k, v in ann.items():
        #     print(k)
        if ('bboxes_ignore' in ann) and ('labels_ignore' in ann):
            print('1')
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)

    if not gt_ignore:
        #print('2')
        gt_ignore = gt_ignore

    dataset_name = dataset.CLASSES
    print(dataset_name)
    eval_map(det_results,
             gt_bboxes,
             gt_labels,
             gt_ignore=None,
             scale_ranges=None,
             iou_thr=iou_thr,
             dataset=dataset_name,
             print_summary=True)
예제 #21
0
    def evaluate(self,
                 results,
                 metric=None,
                 iou_thr=(0.25, 0.5),
                 iou_thr_2d=(0.5, ),
                 logger=None,
                 show=False,
                 out_dir=None,
                 pipeline=None):
        """Evaluate.

        Evaluation in indoor protocol.

        Args:
            results (list[dict]): List of results.
            metric (str | list[str]): Metrics to be evaluated.
            iou_thr (list[float]): AP IoU thresholds.
            iou_thr_2d (list[float]): AP IoU thresholds for 2d evaluation.
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.

        Returns:
            dict: Evaluation results.
        """
        # evaluate 3D detection performance
        if isinstance(results[0], dict):
            return super().evaluate(results, metric, iou_thr, logger, show,
                                    out_dir, pipeline)
        # evaluate 2D detection performance
        else:
            eval_results = OrderedDict()
            annotations = [self.get_ann_info(i) for i in range(len(self))]
            iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
                                                    float) else iou_thr_2d
            for iou_thr_2d_single in iou_thr_2d:
                mean_ap, _ = eval_map(
                    results,
                    annotations,
                    scale_ranges=None,
                    iou_thr=iou_thr_2d_single,
                    dataset=self.CLASSES,
                    logger=logger)
                eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
            return eval_results
예제 #22
0
파일: holly_head.py 프로젝트: lhwcv/mmdet
    def evaluate(self,
                 results,
                 metric=['mAP'],
                 logger=None,
                 proposal_nums=(4, 10, 50),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
                Default: None.
        """
        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['mAP']

        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = {}

        for metric in metrics:
            if metric == 'mAP':
                assert isinstance(iou_thr, float)
                mean_ap, _ = eval_map(results,
                                      annotations,
                                      scale_ranges=scale_ranges,
                                      iou_thr=iou_thr,
                                      dataset=self.CLASSES,
                                      logger=logger)
                eval_results['mAP'] = mean_ap
        return eval_results
예제 #23
0
파일: voc_eval.py 프로젝트: 13282803166/-
def voc_eval(result_file, dataset):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = gt_ignore
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES

    ious = [0.1, 0.3, 0.5]
    mean_aps = 0
    for iou in ious:
        mean_ap, _ = eval_map(det_results,
                              gt_bboxes,
                              gt_labels,
                              gt_ignore=gt_ignore,
                              scale_ranges=None,
                              iou_thr=iou,
                              dataset=dataset_name,
                              print_summary=True)
        mean_aps += mean_ap

    print("\naverage mean_ap = {}\n".format(mean_aps / 3))
예제 #24
0
def evaluate(result_file, dataset, iou_thresholds):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES

    mean_aps = []
    for iou_thr in iou_thresholds:
        mean_ap, _ = eval_map(det_results,
                              gt_bboxes,
                              gt_labels,
                              gt_ignore=gt_ignore,
                              scale_ranges=None,
                              iou_thr=iou_thr,
                              dataset=dataset_name,
                              print_summary=False)
        mean_aps.append(mean_ap)
    print(f'MAP: {np.mean(mean_aps)}')
예제 #25
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    if args.out is None:
        dataset_name = dataset.name if hasattr(dataset, 'name') else 'val'
        if hasattr(cfg.data.test, 'task'):
            dataset_name = dataset_name + '_' + cfg.data.test.task
        model_name = os.path.basename(args.checkpoint).split('.')[0]
        model_dir = os.path.dirname(args.checkpoint)
        args.out = os.path.join(model_dir, 'raw_results',
                                dataset_name + '_' + model_name + '.pkl')
    elif not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    mmcv.mkdir_or_exist(os.path.dirname(args.out))

    rank, _ = get_dist_info()
    eval_types = args.eval
    if not os.path.isfile(args.out):
        # build the model and load checkpoint
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        checkpoint = load_checkpoint(
            model, args.checkpoint, map_location='cpu')
        # old versions did not save class info in checkpoints, this walkaround is
        # for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show)
        else:
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)

        if rank == 0:
            if hasattr(dataset, 'raw_annotations'):
                filenames = [
                    dataset.raw_annotations[dataset.ids[i]]['filename']
                    for i in range(len(dataset))
                ]
            else:
                filenames = [
                    img_info['filename'] for img_info in dataset.img_infos
                ]

            print('\nwriting results to {}'.format(args.out))
            results = {
                'file_names': filenames,
                'outputs': outputs,
            }
            mmcv.dump(results, args.out, protocol=2)
    elif rank == 0:
        results = mmcv.load(args.out, encoding='latin1')
        outputs = results['outputs']

    if eval_types and rank == 0:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if not hasattr(dataset, 'coco'):
            if hasattr(dataset, 'raw_annotations'):
                gt_bboxes = [
                    dataset.raw_annotations[dataset.ids[i]]['ann']['bboxes']
                    for i in range(len(dataset))
                ]
                gt_labels = [
                    dataset.raw_annotations[dataset.ids[i]]['ann']['classes']
                    for i in range(len(dataset))
                ]

                if cfg.data.test.with_ignore:
                    gt_ignores = [l <= 0 for l in gt_labels]
                else:
                    gt_ignores = [l == 0 for l in gt_labels]
                gt_labels = [np.abs(l) for l in gt_labels]
                if 'corners' in eval_types:
                    gt_corners = [
                        dataset.raw_annotations[dataset.ids[i]]['ann']
                        ['corners'] for i in range(len(dataset))
                    ]
                    gt_poses = [
                        dataset.raw_annotations[dataset.ids[i]]['ann']['poses']
                        for i in range(len(dataset))
                    ]
                    eval_corners(
                        outputs,
                        gt_corners,
                        gt_poses,
                        gt_labels,
                        gt_ignores,
                        gt_bboxes=gt_bboxes,
                        display=True)
                    det_bboxes = corners2bboxes(outputs,
                                                len(dataset.CLASSES) - 1)
                    eval_map(
                        det_bboxes, gt_bboxes, gt_labels, gt_ignore=gt_ignores)
                else:
                    eval_map(
                        outputs, gt_bboxes, gt_labels, gt_ignore=gt_ignores)
            else:
                gt_bboxes = [
                    img_info['ann']['bboxes'] for img_info in dataset.img_infos
                ]
                gt_labels = [
                    img_info['ann']['labels'] for img_info in dataset.img_infos
                ]
                if len(outputs[0]) == 5:
                    outputs = corners2bboxes(outputs, len(dataset.classes) - 1)
                eval_map(outputs, gt_bboxes, gt_labels, iou_thr=0.4)
        else:
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco, CLASSES=dataset.CLASSES)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco, CLASSES=dataset.CLASSES, show=True)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco, CLASSES=dataset.CLASSES, show=True)
예제 #26
0
    def evaluate(self,
                 results,
                 metric='mAP',
                 logger=None,
                 proposal_nums=(100, 300, 1000),
                 iou_thr=0.5,
                 scale_ranges=None):
        """Evaluate in VOC protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'mAP', 'recall'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thr (float | list[float]): IoU threshold. It must be a float
                when evaluating mAP, and can be a list when evaluating recall.
                Default: 0.5.
            scale_ranges (list[tuple], optional): Scale ranges for evaluating
                mAP. If not specified, all bounding boxes would be included in
                evaluation. Default: None.

        Returns:
            dict[str, float]: AP/recall metrics.
        """
        if not isinstance(metric, str):  # isinstance判断metric是不是str类别
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mAP', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        annotations = [self.get_ann_info(i) for i in range(len(self))]
        eval_results = OrderedDict()
        if metric == 'mAP':
            assert isinstance(iou_thr, float)
            if self.year == 2007:
                ds_name = 'voc07'
            else:
                ds_name = self.CLASSES
            mean_ap, _ = eval_map(results,
                                  annotations,
                                  scale_ranges=None,
                                  iou_thr=iou_thr,
                                  dataset=ds_name,
                                  logger=logger)
            eval_results['mAP'] = mean_ap
        elif metric == 'recall':
            gt_bboxes = [ann['bboxes'] for ann in annotations]
            if isinstance(iou_thr, float):
                iou_thr = [iou_thr]
            recalls = eval_recalls(gt_bboxes,
                                   results,
                                   proposal_nums,
                                   iou_thr,
                                   logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        return eval_results
예제 #27
0
    # merge
    results = []
    iter = 1
    for img_id in tqdm(img_list):
        iter += 1
        img_name = img_id + '.jpg'
        det_nms = []
        if img_name in detecions:
            det = np.array(detecions[img_name])
            det = det[det[:, -2] > args.score_thr]
            for i in range(args.nclass):
                det_nms.append(nms(det[det[:, -1] == i, :5], iou_thr=0.5)[0])
        else:
            det_nms = [np.array([]).reshape(0, 5) for i in range(args.nclass)]
        results.append(det_nms)

        # ground truth
        xml_file = osp.join(source_anno, img_id + '.xml')
        bboxes, labels = getGtFromXml(xml_file)
        annotations.append({"bboxes": bboxes, "labels": labels})

        # show
        img_file = osp.join(source_img, img_name)
        # model.show_result(img_file, det_nms, out_file='source_result.jpg')

    # voc metric
    eval_results = eval_map(results, annotations, iou_thr=0.5,
                            logger="print")[0]
    pass
예제 #28
0
    def evaluate(self,
                 results,
                 metric='track',
                 logger=None,
                 resfile_path=None,
                 bbox_iou_thr=0.5,
                 track_iou_thr=0.5):
        eval_results = dict()
        if isinstance(metric, list):
            metrics = metric
        elif isinstance(metric, str):
            metrics = [metric]
        else:
            raise TypeError('metric must be a list or a str.')
        allowed_metrics = ['bbox', 'track']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported.')

        if 'track' in metrics:
            resfiles, names, tmp_dir = self.format_results(
                results, resfile_path, metrics, logger)
            print_log('Evaluate CLEAR MOT results.', logger=logger)
            distth = 1 - track_iou_thr

            accs = []
            for name in names:
                if 'half-train' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-train.txt')
                elif 'half-val' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-val.txt')
                else:
                    gt_file = osp.join(self.img_prefix, f'{name}/gt/gt.txt')
                res_file = osp.join(resfiles['track'], f'{name}.txt')
                gt = mm.io.loadtxt(gt_file)
                res = mm.io.loadtxt(res_file)
                ini_file = osp.join(self.img_prefix, f'{name}/seqinfo.ini')
                if osp.exists(ini_file):
                    acc, ana = mm.utils.CLEAR_MOT_M(
                        gt, res, ini_file, distth=distth)
                else:
                    acc = mm.utils.compare_to_groundtruth(
                        gt, res, distth=distth)
                accs.append(acc)

            mh = mm.metrics.create()
            summary = mh.compute_many(
                accs,
                names=names,
                metrics=mm.metrics.motchallenge_metrics,
                generate_overall=True)
            str_summary = mm.io.render_summary(
                summary,
                formatters=mh.formatters,
                namemap=mm.io.motchallenge_metric_names)
            print(str_summary)

            eval_results.update({
                mm.io.motchallenge_metric_names[k]: v['OVERALL']
                for k, v in summary.to_dict().items()
            })

            if tmp_dir is not None:
                tmp_dir.cleanup()

        if 'bbox' in metrics:
            if isinstance(results, dict):
                bbox_results = results['bbox_results']
            elif isinstance(results, list):
                bbox_results = results
            else:
                raise TypeError('results must be a dict or a list.')
            annotations = [self.get_ann_info(info) for info in self.data_infos]
            mean_ap, _ = eval_map(
                bbox_results,
                annotations,
                iou_thr=bbox_iou_thr,
                dataset=self.CLASSES,
                logger=logger)
            eval_results['mAP'] = mean_ap

        for k, v in eval_results.items():
            if isinstance(v, float):
                eval_results[k] = float(f'{(v):.3f}')

        return eval_results
예제 #29
0
def main():
    args = parse_args()

    # assert args.show or args.json_out, \
    #     ('Please specify at least one operation (save or show the results) '
    #      'with the argument "--out" or "--show" or "--json_out"')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)

    checkpoint_file = args.checkpoint
    if not checkpoint_file:

        def _epoch_num(name):
            return int(
                re.findall('epoch_[0-9]*.pth',
                           name)[0].replace('epoch_', '').replace('.pth', ''))

        pths = sorted(glob.glob(os.path.join(cfg.work_dir, 'epoch_*.pth')),
                      key=_epoch_num)
        if len(pths) > 0:
            print("Found {}, use it as checkpoint by default.".format(
                pths[-1]))
            checkpoint_file = pths[-1]
    if not checkpoint_file:
        raise ValueError("Checkpoints not found, check work_dir non empty.")
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=args.shuffle)  # TODO: hack shuffle True

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, checkpoint_file, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    num_evals = args.num_evals
    if num_evals < 0:
        num_evals = len(data_loader)
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, num_evals, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, num_evals, args.tmpdir)

    rank, _ = get_dist_info()
    if rank == 0:
        gt_bboxes, gt_labels, gt_ignore, dataset_name = get_seq_gts(dataset)
        print('\nStarting evaluate {}'.format(dataset_name))
        eval_map(outputs,
                 gt_bboxes,
                 gt_labels,
                 gt_ignore,
                 scale_ranges=None,
                 iou_thr=0.5,
                 dataset=dataset_name,
                 print_summary=True)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
예제 #30
0
    def evaluate(self,
                 results,
                 metric='track',
                 logger=None,
                 resfile_path=None,
                 bbox_iou_thr=0.5,
                 track_iou_thr=0.5):
        """Evaluation in MOT Challenge.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'bbox', 'track'. Defaults to 'track'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            resfile_path (str, optional): Path to save the formatted results.
                Defaults to None.
            bbox_iou_thr (float, optional): IoU threshold for detection
                evaluation. Defaults to 0.5.
            track_iou_thr (float, optional): IoU threshold for tracking
                evaluation.. Defaults to 0.5.

        Returns:
            dict[str, float]: MOTChallenge style evaluation metric.
        """
        eval_results = dict()
        if isinstance(metric, list):
            metrics = metric
        elif isinstance(metric, str):
            metrics = [metric]
        else:
            raise TypeError('metric must be a list or a str.')
        allowed_metrics = ['bbox', 'track']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported.')

        if 'track' in metrics:
            resfiles, names, tmp_dir = self.format_results(
                results, resfile_path, metrics)
            print_log('Evaluate CLEAR MOT results.', logger=logger)
            distth = 1 - track_iou_thr

            accs = []
            for name in names:
                if 'half-train' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-train.txt')
                elif 'half-val' in self.ann_file:
                    gt_file = osp.join(self.img_prefix,
                                       f'{name}/gt/gt_half-val.txt')
                else:
                    gt_file = osp.join(self.img_prefix, f'{name}/gt/gt.txt')
                res_file = osp.join(resfiles['track'], f'{name}.txt')
                gt = mm.io.loadtxt(gt_file)
                res = mm.io.loadtxt(res_file)
                ini_file = osp.join(self.img_prefix, f'{name}/seqinfo.ini')
                if osp.exists(ini_file) and 'MOT15' not in self.img_prefix:
                    acc, ana = mm.utils.CLEAR_MOT_M(gt,
                                                    res,
                                                    ini_file,
                                                    distth=distth)
                else:
                    acc = mm.utils.compare_to_groundtruth(gt,
                                                          res,
                                                          distth=distth)
                accs.append(acc)

            mh = mm.metrics.create()
            summary = mh.compute_many(accs,
                                      names=names,
                                      metrics=mm.metrics.motchallenge_metrics,
                                      generate_overall=True)
            str_summary = mm.io.render_summary(
                summary,
                formatters=mh.formatters,
                namemap=mm.io.motchallenge_metric_names)
            print(str_summary)

            eval_results.update({
                mm.io.motchallenge_metric_names[k]: v['OVERALL']
                for k, v in summary.to_dict().items()
            })

            if tmp_dir is not None:
                tmp_dir.cleanup()

        if 'bbox' in metrics:
            if isinstance(results, dict):
                bbox_results = results['bbox_results']
            elif isinstance(results, list):
                bbox_results = results
            else:
                raise TypeError('results must be a dict or a list.')
            annotations = [self.get_ann_info(info) for info in self.data_infos]
            mean_ap, _ = eval_map(bbox_results,
                                  annotations,
                                  iou_thr=bbox_iou_thr,
                                  dataset=self.CLASSES,
                                  logger=logger)
            eval_results['mAP'] = mean_ap

        for k, v in eval_results.items():
            if isinstance(v, float):
                eval_results[k] = float(f'{(v):.3f}')

        return eval_results