Beispiel #1
0
 def _build_coco_metrics(self):
   """Build COCO metrics evaluator."""
   if (not self._task_config.model.include_mask
      ) or self._task_config.annotation_file:
     self.coco_metric = coco_evaluator.COCOEvaluator(
         annotation_file=self._task_config.annotation_file,
         include_mask=self._task_config.model.include_mask,
         per_category_metrics=self._task_config.per_category_metrics)
   else:
     # Builds COCO-style annotation file if include_mask is True, and
     # annotation_file isn't provided.
     annotation_path = os.path.join(self._logging_dir, 'annotation.json')
     if tf.io.gfile.exists(annotation_path):
       logging.info(
           'annotation.json file exists, skipping creating the annotation'
           ' file.')
     else:
       if self._task_config.validation_data.num_examples <= 0:
         logging.info('validation_data.num_examples needs to be > 0')
       if not self._task_config.validation_data.input_path:
         logging.info('Can not create annotation file for tfds.')
       logging.info(
           'Creating coco-style annotation file: %s', annotation_path)
       coco_utils.scan_and_generator_annotation_file(
           self._task_config.validation_data.input_path,
           self._task_config.validation_data.file_type,
           self._task_config.validation_data.num_examples,
           self.task_config.model.include_mask, annotation_path,
           regenerate_source_id=self._task_config.validation_data.decoder
           .simple_decoder.regenerate_source_id)
     self.coco_metric = coco_evaluator.COCOEvaluator(
         annotation_file=annotation_path,
         include_mask=self._task_config.model.include_mask,
         per_category_metrics=self._task_config.per_category_metrics)
Beispiel #2
0
  def build_metrics(self, training: bool = True):
    """Build detection metrics."""
    metrics = []
    if training:
      metric_names = [
          'total_loss',
          'rpn_score_loss',
          'rpn_box_loss',
          'frcnn_cls_loss',
          'frcnn_box_loss',
          'mask_loss',
          'model_loss'
      ]
      for name in metric_names:
        metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

    else:
      if (not self._task_config.model.include_mask
         ) or self._task_config.annotation_file:
        self.coco_metric = coco_evaluator.COCOEvaluator(
            annotation_file=self._task_config.annotation_file,
            include_mask=self._task_config.model.include_mask,
            per_category_metrics=self._task_config.per_category_metrics)
      else:
        # Builds COCO-style annotation file if include_mask is True, and
        # annotation_file isn't provided.
        annotation_path = os.path.join(self._logging_dir, 'annotation.json')
        if tf.io.gfile.exists(annotation_path):
          logging.info(
              'annotation.json file exists, skipping creating the annotation'
              ' file.')
        else:
          if self._task_config.validation_data.num_examples <= 0:
            logging.info('validation_data.num_examples needs to be > 0')
          if not self._task_config.validation_data.input_path:
            logging.info('Can not create annotation file for tfds.')
          logging.info(
              'Creating coco-style annotation file: %s', annotation_path)
          coco_utils.scan_and_generator_annotation_file(
              self._task_config.validation_data.input_path,
              self._task_config.validation_data.file_type,
              self._task_config.validation_data.num_examples,
              self.task_config.model.include_mask, annotation_path)
        self.coco_metric = coco_evaluator.COCOEvaluator(
            annotation_file=annotation_path,
            include_mask=self._task_config.model.include_mask,
            per_category_metrics=self._task_config.per_category_metrics)

    return metrics
Beispiel #3
0
  def build_metrics(self, training=True):
    """Build detection metrics."""
    metrics = []

    backbone = self.task_config.model.backbone.get()
    metric_names = collections.defaultdict(list)
    for key in range(backbone.min_level, backbone.max_level + 1):
      key = str(key)
      metric_names[key].append('loss')
      metric_names[key].append('avg_iou')
      metric_names[key].append('avg_obj')

    metric_names['net'].append('box')
    metric_names['net'].append('class')
    metric_names['net'].append('conf')

    for _, key in enumerate(metric_names.keys()):
      metrics.append(task_utils.ListMetrics(metric_names[key], name=key))

    self._metrics = metrics
    if not training:
      annotation_file = self.task_config.annotation_file
      if self._coco_91_to_80:
        annotation_file = None
      self.coco_metric = coco_evaluator.COCOEvaluator(
          annotation_file=annotation_file,
          include_mask=False,
          need_rescale_bboxes=False,
          per_category_metrics=self._task_config.per_category_metrics)

    return metrics
Beispiel #4
0
    def build_metrics(self,
                      training: bool = True) -> List[tf.keras.metrics.Metric]:
        """Build detection metrics."""
        metrics = []
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss',
                'maskrcnn_loss', 'segmentation_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

            if self.task_config.segmentation_evaluation.report_train_mean_iou:
                self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
                    name='train_mean_iou',
                    num_classes=self.task_config.model.num_classes,
                    rescale_predictions=False,
                    dtype=tf.float32)

        else:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self.task_config.annotation_file,
                include_mask=self.task_config.model.include_mask,
                per_category_metrics=self.task_config.per_category_metrics)

            rescale_predictions = (not self.task_config.validation_data.parser.
                                   segmentation_resize_eval_groundtruth)
            self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=self.task_config.model.num_classes,
                rescale_predictions=rescale_predictions,
                dtype=tf.float32)
        return metrics
    def build_metrics(self,
                      training: bool = True) -> List[tf.keras.metrics.Metric]:
        """Build detection metrics."""
        metrics = []
        num_segmentation_classes = self.task_config.model.segmentation_model.num_classes
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss',
                'maskrcnn_loss', 'segmentation_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

            if self.task_config.segmentation_evaluation.report_train_mean_iou:
                self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
                    name='train_mean_iou',
                    num_classes=num_segmentation_classes,
                    rescale_predictions=False,
                    dtype=tf.float32)

        else:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self.task_config.annotation_file,
                include_mask=self.task_config.model.include_mask,
                per_category_metrics=self.task_config.per_category_metrics)

            rescale_predictions = (not self.task_config.validation_data.parser.
                                   segmentation_resize_eval_groundtruth)

            self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
                name='per_class_iou',
                num_classes=num_segmentation_classes,
                rescale_predictions=rescale_predictions,
                dtype=tf.float32)

            if isinstance(tf.distribute.get_strategy(),
                          tf.distribute.TPUStrategy):
                self._process_iou_metric_on_cpu = True
            else:
                self._process_iou_metric_on_cpu = False

            if self.task_config.model.generate_panoptic_masks:
                if not self.task_config.validation_data.parser.include_panoptic_masks:
                    raise ValueError(
                        '`include_panoptic_masks` should be set to True when'
                        ' computing panoptic quality.')
                pq_config = self.task_config.panoptic_quality_evaluator
                self.panoptic_quality_metric = panoptic_quality_evaluator.PanopticQualityEvaluator(
                    num_categories=pq_config.num_categories,
                    ignored_label=pq_config.ignored_label,
                    max_instances_per_category=pq_config.
                    max_instances_per_category,
                    offset=pq_config.offset,
                    is_thing=pq_config.is_thing,
                    rescale_predictions=pq_config.rescale_predictions)

        return metrics
Beispiel #6
0
 def build_metrics(self, training=True):
     #return super().build_metrics(training=training)
     if not training:
         self.coco_metric = coco_evaluator.COCOEvaluator(
             annotation_file=self.task_config.annotation_file,
             include_mask=False,
             need_rescale_bboxes=False,
             per_category_metrics=self._task_config.per_category_metrics)
     return []
    def testEval(self, include_mask, use_fake_predictions):
        coco = COCO(annotation_file=_COCO_JSON_FILE)
        index = np.random.randint(len(coco.dataset['images']))
        image_id = coco.dataset['images'][index]['id']
        # image_id = 26564
        # image_id = 324158
        if use_fake_predictions:
            predictions = get_fake_predictions(image_id,
                                               coco,
                                               include_mask=include_mask)
        else:
            predictions = get_predictions(image_id,
                                          coco,
                                          include_mask=include_mask)

        if not predictions:
            logging.info('Empty predictions for index=%d', index)
            return

        predictions = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(x)
            if x is not None else None, predictions)

        evaluator_w_json = coco_evaluator.COCOEvaluator(
            annotation_file=_COCO_JSON_FILE, include_mask=include_mask)
        evaluator_w_json.update_state(groundtruths=None,
                                      predictions=predictions)
        results_w_json = evaluator_w_json.result()

        dummy_generator = DummyGroundtruthGenerator(include_mask=include_mask,
                                                    image_id=image_id,
                                                    coco=coco)
        coco_utils.generate_annotation_file(dummy_generator,
                                            self._saved_coco_json_file)
        evaluator_no_json = coco_evaluator.COCOEvaluator(
            annotation_file=self._saved_coco_json_file,
            include_mask=include_mask)
        evaluator_no_json.update_state(groundtruths=None,
                                       predictions=predictions)
        results_no_json = evaluator_no_json.result()

        for k, v in results_w_json.items():
            self.assertEqual(v, results_no_json[k])
Beispiel #8
0
    def build_metrics(self, training=True):
        """Build detection metrics."""
        metrics = []
        metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
        for name in metric_names:
            metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

        if not training:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=None, include_mask=False)

        return metrics
Beispiel #9
0
    def build_metrics(self, training=True):
        """Build detection metrics."""
        metrics = []
        metric_names = ['cls_loss', 'box_loss', 'giou_loss']
        for name in metric_names:
            metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

        if not training:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file='',
                include_mask=False,
                need_rescale_bboxes=True,
                per_category_metrics=self._task_config.per_category_metrics)
        return metrics
Beispiel #10
0
  def build_metrics(self, training=True):
    metrics = []
    metric_names = self._metric_names

    for name in metric_names:
      metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

    self._metrics = metrics
    if not training:
      self.coco_metric = coco_evaluator.COCOEvaluator(
          annotation_file=self.task_config.annotation_file,
          include_mask=False,
          need_rescale_bboxes=False,
          per_category_metrics=self._task_config.per_category_metrics)
    return metrics
Beispiel #11
0
    def build_metrics(self, training=True):
        """Build detection metrics."""
        metrics = []
        if training:
            metric_names = [
                'total_loss', 'rpn_score_loss', 'rpn_box_loss',
                'frcnn_cls_loss', 'frcnn_box_loss', 'mask_loss', 'model_loss'
            ]
            for name in metric_names:
                metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

        else:
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self._task_config.annotation_file,
                include_mask=self._task_config.model.include_mask)

        return metrics
Beispiel #12
0
    def build_metrics(self, training: bool = True):
        """Build detection metrics."""
        metrics = []
        metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
        for name in metric_names:
            metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))

        if not training:
            if self.task_config.validation_data.tfds_name and self.task_config.annotation_file:
                raise ValueError(
                    "Can't evaluate using annotation file when TFDS is used.")
            self.coco_metric = coco_evaluator.COCOEvaluator(
                annotation_file=self.task_config.annotation_file,
                include_mask=False,
                per_category_metrics=self.task_config.per_category_metrics)

        return metrics