def build_metrics(self, training: bool = True):
        """Gets streaming metrics for training/validation."""
        metrics = []
        if training and self.task_config.evaluation.report_train_mean_iou:
            metrics.append(
                segmentation_metrics.MeanIoU(
                    name='mean_iou',
                    num_classes=self.task_config.model.num_classes,
                    rescale_predictions=False,
                    dtype=tf.float32))
            if self.task_config.model.get('mask_scoring_head'):
                metrics.append(
                    tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))
        else:
            self.iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=self.task_config.model.num_classes,
                rescale_predictions=not self.task_config.validation_data.
                resize_eval_groundtruth,
                dtype=tf.float32)
            if self.task_config.validation_data.resize_eval_groundtruth and self.task_config.model.get('mask_scoring_head'):  # pylint: disable=line-too-long
                # Masks scores metric can only be computed if labels are scaled to match
                # preticted mask scores.
                metrics.append(
                    tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))

            # Update state on CPU if TPUStrategy due to dynamic resizing.
            self._process_iou_metric_on_cpu = isinstance(
                tf.distribute.get_strategy(), tf.distribute.TPUStrategy)

        return metrics
示例#2
0
    def build_metrics(self,
                      training: bool = True) -> List[tf.keras.metrics.Metric]:
        """Build detection metrics."""
        metrics = []
        num_segmentation_classes = self.task_config.model.segmentation_model.num_classes
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss',
                'maskrcnn_loss', 'segmentation_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

            if self.task_config.segmentation_evaluation.report_train_mean_iou:
                self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
                    name='train_mean_iou',
                    num_classes=num_segmentation_classes,
                    rescale_predictions=False,
                    dtype=tf.float32)

        else:
            self._build_coco_metrics()

            rescale_predictions = (not self.task_config.validation_data.parser.
                                   segmentation_resize_eval_groundtruth)

            self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=num_segmentation_classes,
                rescale_predictions=rescale_predictions,
                dtype=tf.float32)

            if isinstance(tf.distribute.get_strategy(),
                          tf.distribute.TPUStrategy):
                self._process_iou_metric_on_cpu = True
            else:
                self._process_iou_metric_on_cpu = False

            if self.task_config.model.generate_panoptic_masks:
                if not self.task_config.validation_data.parser.include_panoptic_masks:
                    raise ValueError(
                        '`include_panoptic_masks` should be set to True when'
                        ' computing panoptic quality.')
                pq_config = self.task_config.panoptic_quality_evaluator
                self.panoptic_quality_metric = panoptic_quality_evaluator.PanopticQualityEvaluator(
                    num_categories=pq_config.num_categories,
                    ignored_label=pq_config.ignored_label,
                    max_instances_per_category=pq_config.
                    max_instances_per_category,
                    offset=pq_config.offset,
                    is_thing=pq_config.is_thing,
                    rescale_predictions=pq_config.rescale_predictions)

        return metrics
 def test_mean_iou_metric(self, rescale_predictions):
   tf.config.experimental_run_functions_eagerly(True)
   mean_iou_metric = segmentation_metrics.MeanIoU(
       num_classes=2, rescale_predictions=rescale_predictions)
   y_pred, y_true = self._create_test_data()
   # Disable autograph for correct coverage statistics.
   update_fn = tf.autograph.experimental.do_not_convert(
       mean_iou_metric.update_state)
   update_fn(y_true=y_true, y_pred=y_pred)
   miou = mean_iou_metric.result()
   self.assertAlmostEqual(miou.numpy(), 0.762, places=3)
示例#4
0
  def build_metrics(self, training: bool = True) -> List[
      tf.keras.metrics.Metric]:
    """Build metrics."""
    eval_config = self.task_config.evaluation
    metrics = []
    if training:
      metric_names = [
          'total_loss',
          'segmentation_loss',
          'instance_center_heatmap_loss',
          'instance_center_offset_loss',
          'model_loss']
      for name in metric_names:
        metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

      if eval_config.report_train_mean_iou:
        self.train_mean_iou = segmentation_metrics.MeanIoU(
            name='train_mean_iou',
            num_classes=self.task_config.model.num_classes,
            rescale_predictions=False,
            dtype=tf.float32)
    else:
      rescale_predictions = (not self.task_config.validation_data.parser
                             .resize_eval_groundtruth)
      self.perclass_iou_metric = segmentation_metrics.PerClassIoU(
          name='per_class_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=rescale_predictions,
          dtype=tf.float32)

      if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
        self._process_iou_metric_on_cpu = True
      else:
        self._process_iou_metric_on_cpu = False

      if self.task_config.model.generate_panoptic_masks:
        self.panoptic_quality_metric = panoptic_quality_evaluator.PanopticQualityEvaluator(
            num_categories=self.task_config.model.num_classes,
            ignored_label=eval_config.ignored_label,
            max_instances_per_category=eval_config.max_instances_per_category,
            offset=eval_config.offset,
            is_thing=eval_config.is_thing,
            rescale_predictions=eval_config.rescale_predictions)

    # Update state on CPU if TPUStrategy due to dynamic resizing.
    self._process_iou_metric_on_cpu = isinstance(
        tf.distribute.get_strategy(),
        tf.distribute.TPUStrategy)

    return metrics