Ejemplo n.º 1
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            if self._mask_eval_class == 'all':
                metrics = np.hstack((coco_metrics, mcoco_eval.stats))
            else:
                mask_coco_metrics = mcoco_eval.category_stats
                val_catg_idx = np.isin(mcoco_eval.params.catIds,
                                       self._eval_categories)
                # Gather the valid evaluation of the eval categories.
                if np.any(val_catg_idx):
                    mean_val_metrics = []
                    for mid in range(len(self._metric_names) // 2):
                        mean_val_metrics.append(
                            np.nanmean(mask_coco_metrics[mid][val_catg_idx]))

                    mean_val_metrics = np.array(mean_val_metrics)
                else:
                    mean_val_metrics = np.zeros(len(self._metric_names) // 2)
                metrics = np.hstack((coco_metrics, mean_val_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
Ejemplo n.º 2
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            logging.info('There is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            logging.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
Ejemplo n.º 3
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            logging.info('There is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            logging.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)

        # Adds metrics per category.
        if self._per_category_metrics and hasattr(coco_eval, 'category_stats'):
            for category_index, category_id in enumerate(
                    coco_eval.params.catIds):
                metrics_dict['Precision mAP ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[0][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory@50IoU/{}'.format(
                    category_id
                )] = coco_eval.category_stats[1][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory@75IoU/{}'.format(
                    category_id
                )] = coco_eval.category_stats[2][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (small) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[3][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (medium) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[4][category_index].astype(
                    np.float32)
                metrics_dict['Precision mAP ByCategory (large) /{}'.format(
                    category_id
                )] = coco_eval.category_stats[5][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@1 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[6][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@10 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[7][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR@100 ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[8][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (small) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[9][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (medium) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[10][category_index].astype(
                    np.float32)
                metrics_dict['Recall AR (large) ByCategory/{}'.format(
                    category_id
                )] = coco_eval.category_stats[11][category_index].astype(
                    np.float32)
        return metrics_dict
Ejemplo n.º 4
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

        Returns:
          coco_metric: float numpy array with shape [24] representing the
            coco-style evaluation metrics (box and mask).
        """
        if not self._annotation_file:
            logging.info("There is no annotation_file in COCOEvaluator.")
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=("mask" if self._include_mask else "box"),
                gt_dataset=gt_dataset,
            )
        else:
            logging.info("Using annotation file: %s", self._annotation_file)
            coco_gt = self._coco_gt

        logging.info("Loading predictions...")

        eval_image_sizes = {}
        if self._use_eval_image_sizes:
            for image in coco_gt.dataset["images"]:
                eval_image_sizes[image["id"]] = (image["height"],
                                                 image["width"])

        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions,
            eval_image_sizes,
            score_threshold=self._score_threshold)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann["image_id"] for ann in coco_predictions]

        logging.info("Evaluating bboxes...")

        coco_eval = COCOeval(coco_gt, coco_dt, iouType="bbox")
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

        if self._per_category_metrics:
            coco_eval.summarize_per_category()

        metrics_dict = self._get_metrics_dict(coco_eval, "bbox")
        metrics_dict["performance/bbox_ap"] = metrics_dict[
            "bbox_performance/AP"]

        if self._include_mask:
            logging.info("Evaluating masks...")

            mcoco_eval = COCOeval(coco_gt, coco_dt, iouType="segm")
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()

            if self._per_category_metrics:
                mcoco_eval.summarize_per_category()

            mask_metrics = self._get_metrics_dict(mcoco_eval, "mask")
            mask_metrics["performance/mask_ap"] = mask_metrics[
                "mask_performance/AP"]

            metrics_dict.update(mask_metrics)

        if self._include_attributes:
            logging.info("Evaluating attributes...")
            attribute_metrics = evaluate_attributes(
                coco_gt.dataset["annotations"], coco_dt.dataset["annotations"])
            metrics_dict.update(attribute_metrics)

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        return metrics_dict