Example #1
0
def _get_label_map(label_map):
    """Gets the label map dict."""
    if isinstance(label_map, list):
        label_map_dict = {}
        for i, label in enumerate(label_map):
            # 0 is resevered for background.
            label_map_dict[i + 1] = label
        label_map = label_map_dict
    label_map = label_util.get_label_map(label_map)

    if 0 in label_map and label_map[0] != 'background':
        raise ValueError('0 must be resevered for background.')
    label_map.pop(0, None)
    name_set = set()
    for idx, name in label_map.items():
        if not isinstance(idx, int):
            raise ValueError(
                'The key (label id) in label_map must be integer.')
        if not isinstance(name, str):
            raise ValueError(
                'The value (label name) in label_map must be string.')
        if name in name_set:
            raise ValueError(
                'The value: %s (label name) can\'t be duplicated.' % name)
        name_set.add(name)
    return label_map
Example #2
0
 def set_model(self, model: tf.keras.Model):
   self.model = model
   config = model.config
   self.config = config
   label_map = label_util.get_label_map(config.label_map)
   log_dir = os.path.join(config.model_dir, 'coco')
   self.file_writer = tf.summary.create_file_writer(log_dir)
   self.evaluator = coco_metric.EvaluationMetric(
       filename=config.val_json_file, label_map=label_map)
Example #3
0
 def _export_labels(self, label_filepath):
     """Export labels to label_filepath."""
     tf.compat.v1.logging.info('Saving labels in %s.', label_filepath)
     num_classes = self.model_spec.config.num_classes
     label_map = label_util.get_label_map(self.model_spec.config.label_map)
     with tf.io.gfile.GFile(label_filepath, 'w') as f:
         # Ignores label_map[0] that's the background. The labels in the label file
         # for TFLite metadata should start from the actual labels without the
         # background.
         for i in range(num_classes):
             label = label_map[i + 1] if i + 1 in label_map else '???'
             f.write(label + '\n')
Example #4
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        cls_outputs, box_outputs = lite_runner.run(images)
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'])
        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
Example #5
0
  def _get_evaluator_and_label_map(
      self, json_file: str
  ) -> Tuple[coco_metric.EvaluationMetric, Optional[Dict[int, str]]]:
    """Gets evaluator and label_map for evaluation."""
    label_map = label_util.get_label_map(self.config.label_map)
    # Sorts label_map.keys since pycocotools.cocoeval uses sorted catIds
    # (category ids) in COCOeval class.
    label_map = _get_ordered_label_map(label_map)

    evaluator = coco_metric.EvaluationMetric(
        filename=json_file, label_map=label_map)

    evaluator.reset_states()
    return evaluator, label_map
Example #6
0
    def estimator_metric_fn(self, detections, groundtruth_data):
        """Constructs the metric function for tf.TPUEstimator.

    For each metric, we return the evaluation op and an update op; the update op
    is shared across all metrics and simply appends the set of detections to the
    `self.detections` list. The metric op is invoked after all examples have
    been seen and computes the aggregate COCO metrics. Please find details API
    in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec

    Args:
      detections: Detection results in a tensor with each row representing
        [image_id, x, y, width, height, score, class]
      groundtruth_data: Groundtruth annotations in a tensor with each row
        representing [y1, x1, y2, x2, is_crowd, area, class].
    Returns:
      metrics_dict: A dictionary mapping from evaluation name to a tuple of
        operations (`metric_op`, `update_op`). `update_op` appends the
        detections for the metric to the `self.detections` list.
    """
        with tf.name_scope('coco_metric'):
            if self.testdev_dir:
                update_op = tf.numpy_function(self.update_state,
                                              [groundtruth_data, detections],
                                              [])
                metrics = tf.numpy_function(self.result, [], tf.float32)
                metrics_dict = {'AP': (metrics, update_op)}
                return metrics_dict
            else:
                update_op = tf.numpy_function(self.update_state,
                                              [groundtruth_data, detections],
                                              [])
                metrics = tf.numpy_function(self.result, [], tf.float32)
                metrics_dict = {}
                for i, name in enumerate(self.metric_names):
                    metrics_dict[name] = (metrics[i], update_op)

                if self.label_map:
                    # process per-class AP.
                    label_map = label_util.get_label_map(self.label_map)
                    for i, cid in enumerate(sorted(label_map.keys())):
                        name = 'AP_/%s' % label_map[cid]
                        metrics_dict[name] = (metrics[i +
                                                      len(self.metric_names)],
                                              update_op)
                return metrics_dict
Example #7
0
def visualize_image(image,
                    boxes,
                    classes,
                    scores,
                    label_map=None,
                    min_score_thresh=0.01,
                    max_boxes_to_draw=1000,
                    line_thickness=2,
                    **kwargs):
    """Visualizes a given image.

  Args:
    image: a image with shape [H, W, C].
    boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
    classes: a class prediction with shape [N].
    scores: A list of float value with shape [N].
    label_map: a dictionary from class id to name.
    min_score_thresh: minimal score for showing. If claass probability is below
      this threshold, then the object will not show up.
    max_boxes_to_draw: maximum bounding box to draw.
    line_thickness: how thick is the bounding box line.
    **kwargs: extra parameters.

  Returns:
    output_image: an output image with annotated boxes and classes.
  """
    label_map = label_util.get_label_map(label_map or 'coco')
    category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}
    img = np.array(image)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        img,
        boxes,
        classes,
        scores,
        category_index,
        min_score_thresh=min_score_thresh,
        max_boxes_to_draw=max_boxes_to_draw,
        line_thickness=line_thickness,
        **kwargs)
    return img
Example #8
0
  def evaluate(self, model, dataset, steps, json_file=None):
    """Evaluate the EfficientDet keras model."""
    label_map = label_util.get_label_map(self.config.label_map)
    # Sorts label_map.keys since pycocotools.cocoeval uses sorted catIds
    # (category ids) in COCOeval class.
    label_map = _get_ordered_label_map(label_map)

    evaluator = coco_metric.EvaluationMetric(
        filename=json_file, label_map=label_map)

    evaluator.reset_states()
    dataset = dataset.take(steps)

    @tf.function
    def _get_detections(images, labels):
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(self.config, cls_outputs,
                                                   box_outputs,
                                                   labels['image_scales'],
                                                   labels['source_ids'])
      tf.numpy_function(evaluator.update_state, [
          labels['groundtruth_data'],
          postprocess.transform_detections(detections)
      ], [])

    dataset = self.ds_strategy.experimental_distribute_dataset(dataset)
    for (images, labels) in dataset:
      self.ds_strategy.run(_get_detections, (images, labels))

    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
      metric_dict[name] = metrics[i]

    if label_map:
      for i, cid in enumerate(label_map.keys()):
        name = 'AP_/%s' % label_map[cid]
        metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    return metric_dict
Example #9
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
Example #10
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    if config.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif config.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((None, *config.image_size, 3))
        util_keras.restore_ckpt(model,
                                tf.train.latest_checkpoint(FLAGS.model_dir),
                                config.moving_average_decay,
                                skip_mismatch=False)

        @tf.function
        def model_fn(images, labels):
            cls_outputs, box_outputs = model(images, training=False)
            detections = postprocess.generate_detections(
                config, cls_outputs, box_outputs, labels['image_scales'],
                labels['source_ids'])
            tf.numpy_function(evaluator.update_state, [
                labels['groundtruth_data'],
                postprocess.transform_detections(detections)
            ], [])

        # Evaluator for AP calculation.
        label_map = label_util.get_label_map(config.label_map)
        evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                                 label_map=label_map)

        # dataset
        batch_size = FLAGS.batch_size  # global batch size.
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            max_instances_per_image=config.max_instances_per_image)(
                config, batch_size=batch_size)
        if FLAGS.eval_samples:
            ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
        ds = ds_strategy.experimental_distribute_dataset(ds)

        # evaluate all images.
        eval_samples = FLAGS.eval_samples or 5000
        pbar = tf.keras.utils.Progbar(
            (eval_samples + batch_size - 1) // batch_size)
        for i, (images, labels) in enumerate(ds):
            ds_strategy.run(model_fn, (images, labels))
            pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)