Exemplo n.º 1
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            if self._mask_eval_class == 'all':
                metrics = np.hstack((coco_metrics, mcoco_eval.stats))
            else:
                mask_coco_metrics = mcoco_eval.category_stats
                val_catg_idx = np.isin(mcoco_eval.params.catIds,
                                       self._eval_categories)
                # Gather the valid evaluation of the eval categories.
                if np.any(val_catg_idx):
                    mean_val_metrics = []
                    for mid in range(len(self._metric_names) // 2):
                        mean_val_metrics.append(
                            np.nanmean(mask_coco_metrics[mid][val_catg_idx]))

                    mean_val_metrics = np.array(mean_val_metrics)
                else:
                    mean_val_metrics = np.zeros(len(self._metric_names) // 2)
                metrics = np.hstack((coco_metrics, mean_val_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
Exemplo n.º 2
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            logging.info('There is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            logging.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)

        # Adds metrics per category.
        if self._per_category_metrics:
            metrics_dict.update(self._retrieve_per_category_metrics(coco_eval))

            if self._include_mask:
                metrics_dict.update(
                    self._retrieve_per_category_metrics(mcoco_eval,
                                                        prefix='mask'))

        return metrics_dict
Exemplo n.º 3
0
def get_metrics(
    labels_path: pathlib.Path,
    predictions_path: pathlib.Path,
    metrics: List[float] = [30, 50, 75],
) -> dict:
    iou_thresholds = np.array(metrics) / 100

    coco_gt = coco.COCO(labels_path)
    coco_predicted = coco_gt.loadRes(str(predictions_path))
    cocoEval = cocoeval.COCOeval(coco_gt, coco_predicted, "bbox")
    cocoEval.params.iouThrs = iou_thresholds
    cocoEval.params.areaRngLbl = "all"
    cocoEval.params.maxDets = [100]
    cocoEval.evaluate()
    cocoEval.accumulate()

    results = {}
    for eval_type in ["ap", "ar"]:
        for iou in metrics:
            results[f"{eval_type}{iou}"] = _summarize(
                cocoEval.eval,
                iou / 100,
                iou_thresholds,
                average_precision=(eval_type == "ap"),
            )
    return results
Exemplo n.º 4
0
    def score_coco(dmet):
        from pycocotools import coco
        from pycocotools import cocoeval
        # The original pycoco-api prints to much, supress it
        with util.SupressPrint(coco, cocoeval):
            cocoGt = dmet.true._aspycoco()
            cocoDt = dmet.pred._aspycoco()

            for ann in cocoGt.dataset['annotations']:
                w, h = ann['bbox'][-2:]
                ann['ignore'] = ann['weight'] < .5
                ann['area'] = w * h
                ann['iscrowd'] = False

            for ann in cocoDt.dataset['annotations']:
                w, h = ann['bbox'][-2:]
                ann['area'] = w * h

            evaler = cocoeval.COCOeval(cocoGt, cocoDt, iouType='bbox')
            evaler.evaluate()
            evaler.accumulate()
            evaler.summarize()
            coco_ap = evaler.stats[1]
            coco_scores = {
                'mAP': coco_ap,
            }
        return coco_scores
Exemplo n.º 5
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
Exemplo n.º 6
0
def bbox_evaluate(coco_gt, res_file):
    coco_evaluator = coco_eval.COCOeval(cocoGt=coco_gt,
                                        cocoDt=coco_gt.loadRes(res_file),
                                        iouType='bbox')
    coco_evaluator.evaluate()
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    return coco_evaluator.stats
Exemplo n.º 7
0
def coco_bbox_eval(result_file, annotation_file):

    ann_type = 'bbox'
    coco_gt = COCO.COCO(annotation_file)
    coco_dt = coco_gt.loadRes(result_file)
    cocoevaler = COCOeval.COCOeval(coco_gt, coco_dt, ann_type)
    cocoevaler.evaluate()
    cocoevaler.accumulate()
    cocoevaler.summarize()
Exemplo n.º 8
0
def test_single_scale():
    cocoGt = cc.COCO("annotations/person_keypoints_val2017.json")
    cocoDt = cocoGt.loadRes("coco_result.json")
    cocoEval = ce.COCOeval(cocoGt, cocoDt, 'keypoints')
    cocoEval.params.imgIds = cocoGt.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    print("Single Scale")
    cocoEval.summarize()
Exemplo n.º 9
0
    def evaluate(self, coco_gt):
        if not self.is_master_rank:
            return

        val_file_path = os.path.join(self.val_path, 'val.json')
        if len(json.load(open(val_file_path, 'r'))) == 0:
            print('no prediction!')
        else:
            coco_evaluator = coco_eval.COCOeval(cocoGt=coco_gt, cocoDt=coco_gt.loadRes(val_file_path), iouType='bbox')
            coco_evaluator.evaluate()
            coco_evaluator.accumulate()
            coco_evaluator.summarize()
Exemplo n.º 10
0
def retinanet_coco_test(imgs, annotations, score_threshold, limit):
    '''
    Test pre-trained PyTorch RetinaNet against COCO.

    Loads up a RetinaNet, runs data through it according to the provided annotation file
    and evaluates the results using pycocotools COCOeval.
    I used this to make sure that the PyTorch-provided RetinaNet works fine
    and to familiarize myself with it!
    '''
    model = tmodels.detection.retinanet_resnet50_fpn(pretrained=True).cuda()
    model.eval()

    data = dsets.CocoDetection(root=imgs,
                               annFile=annotations,
                               transform=tforms.ToTensor())

    evaluated = []
    results = []
    for i, datum in enumerate(data):
        if i % 100 == 0:
            print(i)
        if limit >= 0 and i >= limit:
            break

        img, anns = datum
        predictions = model(img.unsqueeze(0).cuda())[0]

        keep = predictions['scores'] > score_threshold
        boxes = [[
            x1, y1, x2 - x1, y2 - y1
        ] for x1, y1, x2, y2 in utils.recall_tensor(predictions['boxes'][keep])
                 ]
        scores = utils.recall_tensor(predictions['scores'][keep])
        labels = utils.recall_tensor(predictions['labels'][keep])

        img_id = data.ids[i]
        results += [{
            'image_id': img_id,
            'bbox': box,
            'score': score,
            'category_id': label
        } for box, score, label in zip(boxes, scores, labels)]
        evaluated.append(img_id)

    evl = cocoeval.COCOeval(data.coco, data.coco.loadRes(results), 'bbox')
    evl.params.imgIds = evaluated
    evl.evaluate()
    evl.accumulate()
    evl.summarize()

    ann_boxes = [ann['bbox'] for ann in anns]
    ann_labels = [ann['category_id'] for ann in anns]
    utils.show(img, boxes, ann_boxes, labels, ann_labels)
Exemplo n.º 11
0
def evaluate(args):
    data_dir = args.dataset_root
    data_type = 'val2014'
    ann_file = "{0}/annotations/{1}_{2}.cars.json".format(data_dir, 'instances', data_type)
    results_file = "predictions.json"

    coco_gt = coco.COCO(ann_file)
    coco_det = coco_gt.loadRes(results_file)

    coco_eval = cocoeval.COCOeval(coco_gt, coco_det, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Exemplo n.º 12
0
    def _test_model(self,
                    model_name,
                    expected_ap=None,
                    expected_ar=None,
                    resample=Image.NEAREST):
        engine = DetectionEngine(test_utils.test_data_path(model_name))
        ground_truth_file = 'coco/annotations/instances_val2017.json'
        coco_gt = coco.COCO(test_utils.test_data_path(ground_truth_file))
        detection_results = []
        print('Running inference for model %s...' % model_name)
        for _, img in coco_gt.imgs.items():
            with test_utils.test_image('coco', 'val2017',
                                       img['file_name']) as image:
                ret = engine.detect_with_image(image.convert('RGB'),
                                               threshold=0,
                                               top_k=100,
                                               relative_coord=False,
                                               resample=resample)
                for detection in ret:
                    detection_results.append({
                        'image_id':
                        img['id'],
                        # Model label id and ground truth label id are 1 off.
                        'category_id':
                        detection.label_id + 1,
                        'bbox':
                        self.absolute_to_relative_bbox(
                            detection.bounding_box.flatten().tolist()),
                        'score':
                        detection.score.item()
                    })

        detection_file = '/tmp/%s.json' % model_name
        with open(detection_file, 'w') as f:
            json.dump(detection_results, f, separators=(',', ':'))

        coco_dt = coco_gt.loadRes(detection_file)
        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, 'bbox')
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        if expected_ap is not None:
            self.assertGreaterEqual(coco_eval.stats[0], expected_ap)
        if expected_ar is not None:
            self.assertGreaterEqual(coco_eval.stats[6], expected_ar)
Exemplo n.º 13
0
    def calculate_ap(self, gt_result_json, dt_result):
        """ COCOeval을 사용한 AP계산. 중간 과정으로 gt와 dt에 대한 json파일이 out_dir에 생성됨

        Arguments:
            gt_result_json (dict): test 내에 존재하는 모든 도면에 대한 images, annotation, category 정보를 coco json 형태로 저장한 dict
            dt_result (dict): 도면 이름을 key로, box들을 value로 갖는 dt dict
        Returns:
            result_str (string): COCOeval의 계산 결과 summary 저장한 문자열
        """
        # 먼저 gt_json을 파일로 출력
        gt_outpath = os.path.join(self.output_dir, "test_gt_global.json")
        coco_json_write(gt_outpath, gt_result_json)

        # dt_result를 coco형식으로 변환하여 파일로 출력 (주의! dt는 NMS 이전의 결과여야 함)
        test_dt_global = []
        for filename, bboxes in dt_result.items():
            for box in bboxes:
                box["image_id"] = self.get_gt_img_id_from_filename(
                    filename, gt_result_json)
                test_dt_global.append(box)

        dt_outpath = os.path.join(self.output_dir, "test_dt_global.json")
        coco_json_write(dt_outpath, test_dt_global)

        # gt와 dt파일을 로드하여 ap 계산
        cocoGT = coco.COCO(gt_outpath)
        cocoDt = cocoGT.loadRes(dt_outpath)
        annType = 'bbox'
        cocoEval = cocoeval.COCOeval(cocoGT, cocoDt, annType)
        cocoEval.evaluate()
        cocoEval.accumulate()

        original_stdout = sys.stdout
        string_stdout = io.StringIO()
        sys.stdout = string_stdout
        cocoEval.summarize()
        sys.stdout = original_stdout

        result_str = string_stdout.getvalue()

        return result_str
Exemplo n.º 14
0
    def score_coco(dmet, verbose=0):
        """
        score using ms-coco method

        Example:
            >>> # xdoctest: +REQUIRES(--pycocotools)
            >>> dmet = DetectionMetrics.demo(
            >>>     nimgs=100, nboxes=(0, 3), n_fp=(0, 1), nclasses=8)
            >>> print(dmet.score_coco()['mAP'])
            0.711016...
        """
        from pycocotools import coco
        from pycocotools import cocoeval
        # The original pycoco-api prints to much, supress it
        import netharn as nh

        pred, true = dmet._to_coco()

        quiet = verbose == 0
        with nh.util.SupressPrint(coco, cocoeval, enabled=quiet):
            cocoGt = true._aspycoco()
            cocoDt = pred._aspycoco()

            for ann in cocoGt.dataset['annotations']:
                w, h = ann['bbox'][-2:]
                ann['ignore'] = ann['weight'] < .5
                ann['area'] = w * h
                ann['iscrowd'] = False

            for ann in cocoDt.dataset['annotations']:
                w, h = ann['bbox'][-2:]
                ann['area'] = w * h

            evaler = cocoeval.COCOeval(cocoGt, cocoDt, iouType='bbox')
            evaler.evaluate()
            evaler.accumulate()
            evaler.summarize()
            coco_ap = evaler.stats[1]
            coco_scores = {'mAP': coco_ap, 'evalar_stats': evaler.stats}
        return coco_scores
Exemplo n.º 15
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

        Returns:
            coco_metric: float numpy array with shape [24] representing the
              coco-style evaluation metrics (box and mask).
        """
        if not self._annotation_file:
            logger.info('Thre is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(eval_type='box', gt_dataset=gt_dataset)
        else:
            logger.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt

        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(self._predictions)
        coco_dt = coco_gt.load_res(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)

        return metrics_dict
Exemplo n.º 16
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            logging.info('There is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            logging.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)

        # Adds metrics per category.
        if self._per_category_metrics and hasattr(coco_eval, 'category_stats'):
            for category_index, category_id in enumerate(
                    coco_eval.params.catIds):
                metrics_dict['Precision mAP ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[0][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory@50IoU/{}'.format(
                    category_id
                )] = coco_eval.category_stats[1][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory@75IoU/{}'.format(
                    category_id
                )] = coco_eval.category_stats[2][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (small) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[3][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (medium) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[4][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (large) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[5][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@1 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[6][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@10 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[7][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@100 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[8][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (small) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[9][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (medium) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[10][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (large) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[11][category_index].astype(
                    np.float32)
        return metrics_dict
Exemplo n.º 17
0
def evaluate(model=None, weights_file=None, data_loader_val=None):
    """This function performs AP evaluation using coco_eval"""

    train_res_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                  constants.RES_LOC,
                                  constants.TRAIN_RES_FILENAME)

    if weights_file is None and model is None:
        # Get model weights from config
        weights_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    config.MODEL_WEIGHTS_FILENAME)

    if model is None:
        # Get model corresponding to the one selected in config
        model = models.get_model()
        # Load model weights
        model.load_state_dict(torch.load(weights_file))
    # Set device
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    # Empty cache
    torch.cuda.empty_cache()
    # Model to device
    model.to(device)

    if data_loader_val is None:
        # Data loader is in constants.DATA_LOADERS_LOC/constants.DATA_LOADER_VAL_FILENAME by default
        data_loader_val = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            constants.DATA_LOADERS_LOC, constants.DATA_LOADER_VAL_FILENAME_OBJ)

        # If DATA_LOADER is None in config then use default dataloader=data_loader_val as defined above
        data_loader_val = data_loader_val if config.DATA_LOADER is None else config.DATA_LOADER

        # Load dataloader
        data_loader_val = torch.load(data_loader_val)

    # Calculate mIoU
    average_iou = get_mIoU(model, data_loader_val)

    sys.stdout = open(train_res_file, 'a+')
    print("SemSeg mIoU = ", average_iou)
    # Annotation file is by default located under
    # constants.COCO_ANN_LOC/constants.ANN_VAL_DEFAULT_NAME
    val_ann_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    constants.COCO_ANN_LOC,
                                    constants.ANN_VAL_DEFAULT_NAME_OBJ)

    # Make coco api from annotation file
    coco_gt = COCO(val_ann_filename)

    # Get categories
    categories = list(coco_gt.cats)

    # res_filename will contain the predictions to be used later for evaluation
    res_filename = constants.COCO_RES_JSON_FILENAME
    # Export the predictions as a json file
    __export_res(model, data_loader_val, res_filename, categories)

    # Load res with coco.loadRes
    coco_dt = coco_gt.loadRes(res_filename)
    # Get the list of images
    img_ids = sorted(coco_gt.getImgIds())

    for iou_type in config.IOU_TYPES:
        sys.stdout = open(train_res_file, 'a+')
        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iou_type)
        coco_eval.params.img_ids = img_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
Exemplo n.º 18
0
#!/usr/bin/env python

import sys
import pycocotools.coco as coco
import pycocotools.cocoeval as cocoeval

if len(sys.argv) < 2 or len(sys.argv) > 3:
    print("usage: {} det_file.json [gt_file.json]".format(sys.argv[0]))
    sys.exit(1)
det_file = sys.argv[1]
if len(sys.argv) == 3:
    gt_file = sys.argv[2]
else:
    gt_file = "/fastwork/voigtlaender/mywork/data/coco/annotations/instances_valid.json"

coco_gt = coco.COCO(gt_file)
coco_det = coco_gt.loadRes(det_file)
e = cocoeval.COCOeval(coco_gt, coco_det, "bbox")
e.evaluate()
e.accumulate()
e.summarize()
Exemplo n.º 19
0
 def __init__(self,gt_path,det_paths,theta0=0.5):
     self.coco_gt = coco.COCO(gt_path)
     self.coco_dts = [self.coco_gt.loadRes(det_path)  for det_path in det_paths]
     self.coco_evals = [cocoeval.COCOeval(self.coco_gt,coco_dt,iouType='bbox')for coco_dt in self.coco_dts]
     self.num_candidates = len(det_paths)
     self.theta0 = theta0
Exemplo n.º 20
0
def test_inference():
    """
    With a probability threshold ("score" threshold) of 0.2, the standard
    yolov3 model yields a "bbox" mAP @[IoU=0.50:0.95] of 0.33983872 on the 9
    sample images from the COCO validation set as computed by pycocotools
    (with a probability threshold of 0.2 and NMS IOU threshold of 0.3). This
    test checks that the mAP is similar.
    """
    model = "yolov3"
    model_dir = "models"
    config_path = os.path.join(model_dir, model + ".cfg")
    weights_path = os.path.join(model_dir, model + ".weights")

    net = yolov3.Darknet(config_path, device="cpu")
    net.load_weights(weights_path)
    net.eval()

    image_dir = os.path.join("sample_dataset", "images")
    fnames = os.listdir(image_dir)

    images = []
    for fname in fnames:
        fpath = os.path.join(image_dir, fname)
        images.append(cv2.imread(fpath))

    # Accumulate images instead of batching; helps run on systems (including
    # Travis CI) with lower amounts of RAM.
    results = []
    for image in images:
        results.extend(
            yolov3.inference(net,
                             image,
                             device="cpu",
                             prob_thresh=0.2,
                             nms_iou_thresh=0.3))

    with open("models/coco.names", "r") as f:
        class_names = [line.strip() for line in f.readlines()]

    pred_dataset = yolov3.to_coco(fnames, results, class_names)
    truth_dataset = coco_util.load_coco_dataset("sample_dataset/sample.json")

    # Match predicted COCO dataset image ids and cat ids with original
    # ground truth dataset.
    coco_util.match_ids(pred_dataset, truth_dataset)

    # Ground truth COCO API dataset.
    gt_coco = coco.COCO()
    gt_coco.dataset = truth_dataset
    gt_coco.createIndex()

    # Detections COCO API dataset.
    dt_coco = coco.COCO()
    dt_coco.dataset = pred_dataset
    dt_coco.createIndex()

    eval_ = cocoeval.COCOeval(gt_coco, dt_coco)
    eval_.params.iouType = "bbox"
    eval_.evaluate()
    eval_.accumulate()
    eval_.summarize()

    assert np.isclose(eval_.stats[0], 0.33983872, atol=0.0015)
Exemplo n.º 21
0
def _devcheck_voc_consistency():
    """
    # CHECK FOR ISSUES WITH MY MAP COMPUTATION

    TODO:
        Check how cocoeval works
        https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
    """
    import pandas as pd
    import kwcoco as nh
    # method = 'voc2012'
    method = 'voc2007'

    bias = 0
    bias = 0

    # classes = [0, 1, 2]
    classes = [0]

    classname = 0
    # nimgs = 5
    # nboxes = 2
    nimgs = 5
    nboxes = 5
    nbad = 1

    bg_weight = 1.0
    iou_thresh = 0.5
    bg_cls = -1

    xdata = []
    ydatas = ub.ddict(list)
    for noise in np.linspace(0, 5, 10):
        recs = {}
        lines = []
        confusions = []
        rng = np.random.RandomState(0)

        detmetrics = DetectionMetrics()

        true_coco = nh.data.coco_api.CocoDataset()
        pred_coco = nh.data.coco_api.CocoDataset()
        cid = true_coco.add_category('cat1')
        cid = pred_coco.add_category('cat1')
        for imgname in range(nimgs):

            # Create voc style data
            imgname = str(imgname)
            import kwimage
            true_boxes = kwimage.Boxes.random(num=nboxes,
                                              scale=100.,
                                              rng=rng,
                                              format='cxywh')
            pred_boxes = true_boxes.copy()
            pred_boxes.data = pred_boxes.data.astype(
                np.float) + (rng.rand() * noise)
            if nbad:
                pred_boxes.data = np.vstack([
                    pred_boxes.data,
                    kwimage.Boxes.random(num=nbad,
                                         scale=100.,
                                         rng=rng,
                                         format='cxywh').data
                ])

            true_cxs = rng.choice(classes, size=len(true_boxes))
            pred_cxs = true_cxs.copy()

            change = rng.rand(len(true_cxs)) < (noise / 5)
            pred_cxs_swap = rng.choice(classes, size=len(pred_cxs))
            pred_cxs[change] = pred_cxs_swap[change]
            if nbad:
                pred_cxs = np.hstack(
                    [pred_cxs, rng.choice(classes, size=nbad)])

            np.array([0] * len(true_boxes))
            pred_cxs = np.array([0] * len(pred_boxes))

            recs[imgname] = []
            for bbox in true_boxes.to_tlbr().data:
                recs[imgname].append({
                    'bbox': bbox,
                    'difficult': False,
                    'name': classname
                })

            for bbox, score in zip(pred_boxes.to_tlbr().data,
                                   np.arange(len(pred_boxes))):
                lines.append([imgname, score] + list(bbox))
                # lines.append('{} {} {} {} {} {}'.format(imgname, score, *bbox))

            # Create MS-COCO style data
            gid = true_coco.add_image(imgname)
            gid = pred_coco.add_image(imgname)
            for bbox in true_boxes.to_xywh():
                true_coco.add_annotation(gid,
                                         cid,
                                         bbox=bbox,
                                         iscrowd=False,
                                         ignore=0,
                                         area=bbox.area[0])
            for bbox, score in zip(pred_boxes.to_xywh(),
                                   np.arange(len(pred_boxes))):
                pred_coco.add_annotation(gid,
                                         cid,
                                         bbox=bbox,
                                         iscrowd=False,
                                         ignore=0,
                                         score=score,
                                         area=bbox.area[0])

            # Create kwcoco style confusion data
            true_weights = np.array([1] * len(true_boxes))
            pred_scores = np.arange(len(pred_boxes))

            y = pd.DataFrame(
                detection_confusions(true_boxes,
                                     true_cxs,
                                     true_weights,
                                     pred_boxes,
                                     pred_scores,
                                     pred_cxs,
                                     bg_weight=1.0,
                                     iou_thresh=0.5,
                                     bg_cls=-1,
                                     bias=bias))
            y['gx'] = int(imgname)
            y = (y)
            confusions.append(y)

        from pycocotools import cocoeval as coco_score
        cocoGt = true_coco._aspycoco()
        cocoDt = pred_coco._aspycoco()

        evaler = coco_score.COCOeval(cocoGt, cocoDt, iouType='bbox')
        evaler.evaluate()
        evaler.accumulate()
        evaler.summarize()
        coco_ap = evaler.stats[1]

        y = pd.concat(confusions)

        mine_ap = score_detection_assignment(y, method=method)['ap']
        voc_rec, voc_prec, voc_ap = voc_eval(lines,
                                             recs,
                                             classname,
                                             iou_thresh=0.5,
                                             method=method,
                                             bias=bias)
        eav_prec, eav_rec, eav_ap1 = _multiclass_ap(y)

        eav_ap2 = _ave_precision(eav_rec, eav_prec, method=method)
        voc_ap2 = _ave_precision(voc_rec, voc_prec, method=method)

        eav_ap = eav_ap2

        print('noise = {!r}'.format(noise))
        print('mine_ap = {!r}'.format(mine_ap.values.mean()))
        print('voc_ap = {!r}'.format(voc_ap))
        print('eav_ap = {!r}'.format(eav_ap))
        print('---')
        xdata.append(noise)
        ydatas['voc'].append(voc_ap)
        ydatas['eav'].append(eav_ap)
        ydatas['kwcoco'].append(mine_ap.values.mean())
        ydatas['coco'].append(coco_ap)

    ydf = pd.DataFrame(ydatas)
    print(ydf)

    import kwplot
    kwplot.autompl()
    kwplot.multi_plot(xdata=xdata, ydata=ydatas, fnum=1, doclf=True)