コード例 #1
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            if self._mask_eval_class == 'all':
                metrics = np.hstack((coco_metrics, mcoco_eval.stats))
            else:
                mask_coco_metrics = mcoco_eval.category_stats
                val_catg_idx = np.isin(mcoco_eval.params.catIds,
                                       self._eval_categories)
                # Gather the valid evaluation of the eval categories.
                if np.any(val_catg_idx):
                    mean_val_metrics = []
                    for mid in range(len(self._metric_names) // 2):
                        mean_val_metrics.append(
                            np.nanmean(mask_coco_metrics[mid][val_catg_idx]))

                    mean_val_metrics = np.array(mean_val_metrics)
                else:
                    mean_val_metrics = np.zeros(len(self._metric_names) // 2)
                metrics = np.hstack((coco_metrics, mean_val_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
コード例 #2
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            logging.info('Thre is no annotation_file in COCOEvaluator.')
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            logging.info('Using annotation file: %s', self._annotation_file)
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]
        # Class manipulation: 'all' split samples -> ignored_split = 0.
        for idx, _ in enumerate(coco_gt.dataset['annotations']):
            coco_gt.dataset['annotations'][idx]['ignored_split'] = 0
        coco_eval = cocoeval.OlnCOCOevalWrapper(coco_gt,
                                                coco_dt,
                                                iou_type='bbox')
        coco_eval.params.maxDets = [10, 20, 50, 100, 200]
        coco_eval.params.imgIds = image_ids
        coco_eval.params.useCats = 0 if not self._use_category else 1
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.OlnCOCOevalWrapper(coco_gt,
                                                     coco_dt,
                                                     iou_type='segm')
            mcoco_eval.params.maxDets = [10, 20, 50, 100, 200]
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.params.useCats = 0 if not self._use_category else 1
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict
コード例 #3
0
    def __init__(self,
                 annotation_file,
                 include_mask,
                 need_rescale_bboxes=True):
        """Constructs COCO evaluation class.

    The class provides the interface to metrics_fn in TPUEstimator. The
    _update_op() takes detections from each image and push them to
    self.detections. The _evaluate() loads a JSON file in COCO annotation format
    as the groundtruths and runs COCO evaluation.

    Args:
      annotation_file: a JSON file that stores annotations of the eval dataset.
        If `annotation_file` is None, groundtruth annotations will be loaded
        from the dataloader.
      include_mask: a boolean to indicate whether or not to include the mask
        eval.
      need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back
        to absolute values (`image_info` is needed in this case).
    """
        if annotation_file:
            if annotation_file.startswith('gs://'):
                _, local_val_json = tempfile.mkstemp(suffix='.json')
                tf.io.gfile.remove(local_val_json)

                tf.io.gfile.copy(annotation_file, local_val_json)
                atexit.register(tf.io.gfile.remove, local_val_json)
            else:
                local_val_json = annotation_file
            self._coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if include_mask else 'box'),
                annotation_file=local_val_json)
        self._annotation_file = annotation_file
        self._include_mask = include_mask
        self._metric_names = [
            'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10',
            'ARmax100', 'ARs', 'ARm', 'ARl'
        ]
        self._required_prediction_fields = [
            'source_id', 'num_detections', 'detection_classes',
            'detection_scores', 'detection_boxes'
        ]
        self._need_rescale_bboxes = need_rescale_bboxes
        if self._need_rescale_bboxes:
            self._required_prediction_fields.append('image_info')
        self._required_groundtruth_fields = [
            'source_id', 'height', 'width', 'classes', 'boxes'
        ]
        if self._include_mask:
            mask_metric_names = ['mask_' + x for x in self._metric_names]
            self._metric_names.extend(mask_metric_names)
            self._required_prediction_fields.extend(['detection_masks'])
            self._required_groundtruth_fields.extend(['masks'])

        self.reset()
コード例 #4
0
    def evaluate(self):
        """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [24] representing the
        coco-style evaluation metrics (box and mask).
    """
        if not self._annotation_file:
            gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
                self._groundtruths)
            coco_gt = coco_utils.COCOWrapper(
                eval_type=('mask' if self._include_mask else 'box'),
                gt_dataset=gt_dataset)
        else:
            coco_gt = self._coco_gt
        coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
            self._predictions)
        coco_dt = coco_gt.loadRes(predictions=coco_predictions)
        image_ids = [ann['image_id'] for ann in coco_predictions]

        coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_metrics = coco_eval.stats

        if self._include_mask:
            mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
            mcoco_eval.params.imgIds = image_ids
            mcoco_eval.evaluate()
            mcoco_eval.accumulate()
            mcoco_eval.summarize()
            mask_coco_metrics = mcoco_eval.stats

        if self._include_mask:
            metrics = np.hstack((coco_metrics, mask_coco_metrics))
        else:
            metrics = coco_metrics

        # Cleans up the internal variables in order for a fresh eval next time.
        self.reset()

        metrics_dict = {}
        for i, name in enumerate(self._metric_names):
            metrics_dict[name] = metrics[i].astype(np.float32)
        return metrics_dict