コード例 #1
0
    def evaluate_tflite(self,
                        tflite_filepath: str,
                        dataset: tf.data.Dataset,
                        steps: int,
                        json_file: Optional[str] = None) -> Dict[str, float]:
        """Evaluate the EfficientDet TFLite model.

    Args:
      tflite_filepath: File path to the TFLite model.
      dataset: tf.data.Dataset used for evaluation.
      steps: Number of steps to evaluate the model.
      json_file: JSON with COCO data format containing golden bounding boxes.
        Used for validation. If None, use the ground truth from the dataloader.
        Refer to
        https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
          for the description of COCO data format.

    Returns:
      A dict contains AP metrics.
    """
        # TODO(b/182441458): Use the task library for evaluation instead once it
        # supports python interface.
        evaluator, label_map = self._get_evaluator_and_label_map(json_file)
        dataset = dataset.take(steps)

        lite_runner = eval_tflite.LiteRunner(tflite_filepath,
                                             only_network=False)
        progbar = tf.keras.utils.Progbar(steps)
        for i, (images, labels) in enumerate(dataset):
            # Get the output result after post-processing NMS op.
            nms_boxes, nms_classes, nms_scores, _ = lite_runner.run(images)

            # CLASS_OFFSET is used since label_id for `background` is 0 in label_map
            # while it's not actually included the model. We don't need to add the
            # offset in the Android application.
            nms_classes += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(self.config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes, nms_classes, nms_scores, labels['source_ids'])

            detections = postprocess.transform_detections(detections)
            evaluator.update_state(labels['groundtruth_data'].numpy(),
                                   detections.numpy())
            progbar.update(i + 1)
        print()

        metric_dict = self._get_metric_dict(evaluator, label_map)
        return metric_dict
コード例 #2
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)