def build_metrics(self, training: bool = True):
        """Gets streaming metrics for training/validation."""
        metrics = []
        if training and self.task_config.evaluation.report_train_mean_iou:
            metrics.append(
                segmentation_metrics.MeanIoU(
                    name='mean_iou',
                    num_classes=self.task_config.model.num_classes,
                    rescale_predictions=False,
                    dtype=tf.float32))
            if self.task_config.model.get('mask_scoring_head'):
                metrics.append(
                    tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))
        else:
            self.iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=self.task_config.model.num_classes,
                rescale_predictions=not self.task_config.validation_data.
                resize_eval_groundtruth,
                dtype=tf.float32)
            if self.task_config.validation_data.resize_eval_groundtruth and self.task_config.model.get('mask_scoring_head'):  # pylint: disable=line-too-long
                # Masks scores metric can only be computed if labels are scaled to match
                # preticted mask scores.
                metrics.append(
                    tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))

            # Update state on CPU if TPUStrategy due to dynamic resizing.
            self._process_iou_metric_on_cpu = isinstance(
                tf.distribute.get_strategy(), tf.distribute.TPUStrategy)

        return metrics
Beispiel #2
0
    def build_metrics(self,
                      training: bool = True) -> List[tf.keras.metrics.Metric]:
        """Build detection metrics."""
        metrics = []
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss',
                'maskrcnn_loss', 'segmentation_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

            if self.task_config.segmentation_evaluation.report_train_mean_iou:
                self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
                    name='train_mean_iou',
                    num_classes=self.task_config.model.num_classes,
                    rescale_predictions=False,
                    dtype=tf.float32)

        else:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self.task_config.annotation_file,
                include_mask=self.task_config.model.include_mask,
                per_category_metrics=self.task_config.per_category_metrics)

            rescale_predictions = (not self.task_config.validation_data.parser.
                                   segmentation_resize_eval_groundtruth)
            self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=self.task_config.model.num_classes,
                rescale_predictions=rescale_predictions,
                dtype=tf.float32)
        return metrics
    def build_metrics(self,
                      training: bool = True) -> List[tf.keras.metrics.Metric]:
        """Build detection metrics."""
        metrics = []
        num_segmentation_classes = self.task_config.model.segmentation_model.num_classes
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss',
                'maskrcnn_loss', 'segmentation_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

            if self.task_config.segmentation_evaluation.report_train_mean_iou:
                self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
                    name='train_mean_iou',
                    num_classes=num_segmentation_classes,
                    rescale_predictions=False,
                    dtype=tf.float32)

        else:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self.task_config.annotation_file,
                include_mask=self.task_config.model.include_mask,
                per_category_metrics=self.task_config.per_category_metrics)

            rescale_predictions = (not self.task_config.validation_data.parser.
                                   segmentation_resize_eval_groundtruth)

            self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=num_segmentation_classes,
                rescale_predictions=rescale_predictions,
                dtype=tf.float32)

            if isinstance(tf.distribute.get_strategy(),
                          tf.distribute.TPUStrategy):
                self._process_iou_metric_on_cpu = True
            else:
                self._process_iou_metric_on_cpu = False

            if self.task_config.model.generate_panoptic_masks:
                if not self.task_config.validation_data.parser.include_panoptic_masks:
                    raise ValueError(
                        '`include_panoptic_masks` should be set to True when'
                        ' computing panoptic quality.')
                pq_config = self.task_config.panoptic_quality_evaluator
                self.panoptic_quality_metric = panoptic_quality_evaluator.PanopticQualityEvaluator(
                    num_categories=pq_config.num_categories,
                    ignored_label=pq_config.ignored_label,
                    max_instances_per_category=pq_config.
                    max_instances_per_category,
                    offset=pq_config.offset,
                    is_thing=pq_config.is_thing,
                    rescale_predictions=pq_config.rescale_predictions)

        return metrics
  def build_metrics(self, training=True):
    """Gets streaming metrics for training/validation."""
    metrics = []
    if training:
      metrics.append(segmentation_metrics.MeanIoU(
          name='mean_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=False,
          dtype=tf.float32))
    else:
      self.miou_metric = segmentation_metrics.MeanIoU(
          name='val_mean_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=not self.task_config.validation_data
          .resize_eval_groundtruth,
          dtype=tf.float32)

    return metrics
Beispiel #5
0
  def build_metrics(self, training=True):
    """Gets streaming metrics for training/validation."""
    metrics = []
    if training:
      # TODO(arashwan): make MeanIoU tpu friendly.
      if not isinstance(tf.distribute.get_strategy(),
                        tf.distribute.experimental.TPUStrategy):
        metrics.append(segmentation_metrics.MeanIoU(
            name='mean_iou',
            num_classes=self.task_config.model.num_classes,
            rescale_predictions=False))
    else:
      self.miou_metric = segmentation_metrics.MeanIoU(
          name='val_mean_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=not self.task_config.validation_data
          .resize_eval_groundtruth)

    return metrics
Beispiel #6
0
 def test_mean_iou_metric(self, rescale_predictions):
     tf.config.experimental_run_functions_eagerly(True)
     mean_iou_metric = segmentation_metrics.MeanIoU(
         num_classes=2, rescale_predictions=rescale_predictions)
     y_pred, y_true = self._create_test_data()
     # Disable autograph for correct coverage statistics.
     update_fn = tf.autograph.experimental.do_not_convert(
         mean_iou_metric.update_state)
     update_fn(y_true=y_true, y_pred=y_pred)
     miou = mean_iou_metric.result()
     self.assertAlmostEqual(miou.numpy(), 0.762, places=3)
Beispiel #7
0
  def build_metrics(self, training: bool = True):
    """Gets streaming metrics for training/validation."""
    metrics = []
    if training and self.task_config.evaluation.report_train_mean_iou:
      metrics.append(segmentation_metrics.MeanIoU(
          name='mean_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=False,
          dtype=tf.float32))
    else:
      self.iou_metric = segmentation_metrics.PerClassIoU(
          name='per_class_iou',
          num_classes=self.task_config.model.num_classes,
          rescale_predictions=not self.task_config.validation_data
          .resize_eval_groundtruth,
          dtype=tf.float32)

      # Update state on CPU if TPUStrategy due to dynamic resizing.
      self._process_iou_metric_on_cpu = isinstance(
          tf.distribute.get_strategy(), tf.distribute.TPUStrategy)

    return metrics
Beispiel #8
0
 def build_metrics(self, training: bool = True):
     """Gets streaming metrics for training/validation."""
     metrics = []
     if training and self.task_config.evaluation.report_train_mean_iou:
         metrics.append(
             segmentation_metrics.MeanIoU(
                 name='mean_iou',
                 num_classes=self.task_config.model.num_classes,
                 rescale_predictions=False,
                 dtype=tf.float32))
     else:
         self.iou_metric = segmentation_metrics.PerClassIoU(
             name='per_class_iou',
             num_classes=self.task_config.model.num_classes,
             rescale_predictions=not self.task_config.validation_data.
             resize_eval_groundtruth,
             dtype=tf.float32)
     metrics.append(
         segmentation_metrics.Accuracy(name='accuracy',
                                       rescale_predictions=False,
                                       dtype=tf.float32))
     return metrics