示例#1
0
 def set_model(self, model: tf.keras.Model):
   self.model = model
   config = model.config
   self.config = config
   label_map = label_util.get_label_map(config.label_map)
   log_dir = os.path.join(config.model_dir, 'coco')
   self.file_writer = tf.summary.create_file_writer(log_dir)
   self.evaluator = coco_metric.EvaluationMetric(
       filename=config.val_json_file, label_map=label_map)
示例#2
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  # Evaluator for AP calculation.
  label_map = label_util.get_label_map(config.label_map)
  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file, label_map=label_map)

  # dataset
  batch_size = 1
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      max_instances_per_image=config.max_instances_per_image)(
          config, batch_size=batch_size)
  eval_samples = FLAGS.eval_samples
  if eval_samples:
    ds = ds.take((eval_samples + batch_size - 1) // batch_size)

  # Network
  lite_runner = LiteRunner(FLAGS.tflite_path)
  eval_samples = FLAGS.eval_samples or 5000
  pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
  for i, (images, labels) in enumerate(ds):
    cls_outputs, box_outputs = lite_runner.run(images)
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    detections = postprocess.transform_detections(detections)
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())
    pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
示例#3
0
    def estimator_metric_fn(self,
                            detections,
                            groundtruth_data,
                            label_map=None):
        """Constructs the metric function for tf.TPUEstimator.

    For each metric, we return the evaluation op and an update op; the update op
    is shared across all metrics and simply appends the set of detections to the
    `self.detections` list. The metric op is invoked after all examples have
    been seen and computes the aggregate COCO metrics. Please find details API
    in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec
    Args:
      detections: Detection results in a tensor with each row representing
        [image_id, x, y, width, height, score, class]
      groundtruth_data: Groundtruth annotations in a tensor with each row
        representing [y1, x1, y2, x2, is_crowd, area, class].
      label_map: optional, a map from class id to name.
    Returns:
      metrics_dict: A dictionary mapping from evaluation name to a tuple of
        operations (`metric_op`, `update_op`). `update_op` appends the
        detections for the metric to the `self.detections` list.
    """
        with tf.name_scope('coco_metric'):
            if self.testdev_dir:
                update_op = tf.numpy_function(self.update_state,
                                              [groundtruth_data, detections],
                                              [])
                metrics = tf.numpy_function(self.result, [], tf.float32)
                metrics_dict = {'AP': (metrics, update_op)}
                return metrics_dict
            else:
                update_op = tf.numpy_function(self.update_state,
                                              [groundtruth_data, detections],
                                              [])
                metrics = tf.numpy_function(self.result, [], tf.float32)
                metrics_dict = {}
                for i, name in enumerate(self.metric_names):
                    metrics_dict[name] = (metrics[i], update_op)
                if label_map:
                    label_map = label_util.get_label_map(label_map)
                    for i, cid in enumerate(sorted(label_map.keys())):
                        name = 'AP_/%s' % label_map[cid]
                        metrics_dict[name] = (metrics[i -
                                                      len(self.metric_names)],
                                              update_op)
                return metrics_dict
示例#4
0
def visualize_image(image,
                    boxes,
                    classes,
                    scores,
                    label_map=None,
                    min_score_thresh=0.01,
                    max_boxes_to_draw=1000,
                    line_thickness=2,
                    **kwargs):
    """Visualizes a given image.

  Args:
    image: a image with shape [H, W, C].
    boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
    classes: a class prediction with shape [N].
    scores: A list of float value with shape [N].
    label_map: a dictionary from class id to name.
    min_score_thresh: minimal score for showing. If claass probability is below
      this threshold, then the object will not show up.
    max_boxes_to_draw: maximum bounding box to draw.
    line_thickness: how thick is the bounding box line.
    **kwargs: extra parameters.

  Returns:
    output_image: an output image with annotated boxes and classes.
  """
    label_map = label_util.get_label_map(label_map or 'coco')
    category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}
    img = np.array(image)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        img,
        boxes,
        classes,
        scores,
        category_index,
        min_score_thresh=min_score_thresh,
        max_boxes_to_draw=max_boxes_to_draw,
        line_thickness=line_thickness,
        **kwargs)
    return img
示例#5
0
    def to_json(self, prediction, **kwargs):
        """predictions to json"""

        boxes = prediction[:, 1:5]
        classes = prediction[:, 6].astype(int)
        scores = prediction[:, 5]

        label_map = label_util.get_label_map(self.label_map or 'coco')
        category_index = {
            k: {
                'id': k,
                'name': label_map[k]
            }
            for k in label_map
        }

        min_score_thresh = kwargs.get('min_score_thresh', 0.01)
        # first check for scores higher than given threshold
        scores_idx = scores > min_score_thresh

        scores_abv_thres = scores[scores_idx]
        boxes_abv_thres = boxes[scores_idx]
        classes_abv_thres = classes[scores_idx]

        class_names_abv_thresh = []
        for class_abv in classes_abv_thres:
            if class_abv in six.viewkeys(category_index):
                class_name = category_index[class_abv]['name']
                class_names_abv_thresh.append(class_name)
            else:
                class_names_abv_thresh.append('UNKNOWN')

        json_obj = {}
        json_obj['boxes'] = boxes_abv_thres.tolist()
        json_obj['classes_idx'] = classes_abv_thres.tolist()
        json_obj['classes_name'] = class_names_abv_thresh
        json_obj['scores'] = scores_abv_thres.tolist()

        json_str = json.dumps(json_obj, indent=4)
        return json_str
示例#6
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  if config.strategy == 'tpu':
    tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
    tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
    ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
    logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
  elif config.strategy == 'gpus':
    ds_strategy = tf.distribute.MirroredStrategy()
    logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
  else:
    if tf.config.list_physical_devices('GPU'):
      ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
    else:
      ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

  with ds_strategy.scope():
    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((None, *config.image_size, 3))
    util_keras.restore_ckpt(model,
                            tf.train.latest_checkpoint(FLAGS.model_dir),
                            config.moving_average_decay,
                            skip_mismatch=False)
    @tf.function
    def model_fn(images, labels):
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config,
                                                   cls_outputs,
                                                   box_outputs,
                                                   labels['image_scales'],
                                                   labels['source_ids'])
      tf.numpy_function(evaluator.update_state,
                        [labels['groundtruth_data'],
                         postprocess.transform_detections(detections)], [])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(
        filename=config.val_json_file, label_map=label_map)

    # dataset
    batch_size = FLAGS.batch_size   # global batch size.
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    if FLAGS.eval_samples:
      ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
    ds = ds_strategy.experimental_distribute_dataset(ds)

    # evaluate all images.
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
      ds_strategy.run(model_fn, (images, labels))
      pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
示例#7
0
文件: eval.py 项目: ailabktw/automl
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    if FLAGS.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif FLAGS.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    all_detections = []
    all_labels = []
    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((config.batch_size, base_height, base_width, 3))
        model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

        first_loop = True
        for height, width, flip in augmentations:
            config.image_size = (height, width)
            # dataset
            ds = dataloader.InputReader(
                FLAGS.val_file_pattern,
                is_training=False,
                use_fake_data=False,
                max_instances_per_image=config.max_instances_per_image)(config)

            # create the function once per augmentation, since it closes over the
            # value of config, which gets updated with the new image size
            @tf.function
            def f(images, labels):
                cls_outputs, box_outputs = model(images, training=False)
                return postprocess.generate_detections(config, cls_outputs,
                                                       box_outputs,
                                                       labels['image_scales'],
                                                       labels['source_ids'],
                                                       flip)

            # inference
            for images, labels in ds:
                if flip:
                    images = tf.image.flip_left_right(images)
                detections = f(images, labels)

                all_detections.append(detections)
                if first_loop:
                    all_labels.append(labels)

            first_loop = False

    # collect the giant list of detections into a map from image id to
    # detections
    detections_per_source = dict()
    for batch in all_detections:
        for d in batch:
            img_id = d[0][0]
            if img_id.numpy() in detections_per_source:
                detections_per_source[img_id.numpy()] = tf.concat(
                    [d, detections_per_source[img_id.numpy()]], 0)
            else:
                detections_per_source[img_id.numpy()] = d

    # collect the groundtruth per image id
    groundtruth_per_source = dict()
    for batch in all_labels:
        for img_id, groundtruth in zip(batch['source_ids'],
                                       batch['groundtruth_data']):
            groundtruth_per_source[img_id.numpy()] = groundtruth

    # calucate the AP scores for all the images
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    for img_id, d in detections_per_source.items():
        if FLAGS.enable_tta:
            d = wbf.ensemble_detections(config, d, len(augmentations))
        evaluator.update_state(
            tf.stack([groundtruth_per_source[img_id]]).numpy(),
            postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)
示例#8
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
示例#9
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, base_height, base_width, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    @tf.function
    def f(imgs, labels, flip):
        cls_outputs, box_outputs = model(imgs, training=False)
        return postprocess.generate_detections(config, cls_outputs,
                                               box_outputs,
                                               labels['image_scales'],
                                               labels['source_ids'], flip)

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    evaluator = None
    detections_per_source = dict()
    for height, width, flip in augmentations:
        config.image_size = (height, width)
        # dataset
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            use_fake_data=False,
            max_instances_per_image=config.max_instances_per_image)(config)

        # compute stats for all batches.
        total_steps = FLAGS.eval_samples // FLAGS.batch_size
        progress = tf.keras.utils.Progbar(total_steps)
        for i, (images, labels) in enumerate(ds):
            progress.update(i, values=None)
            if i > total_steps:
                break

            if flip:
                images = tf.image.flip_left_right(images)
            detections = f(images, labels, flip)

            for img_id, d in zip(labels['source_ids'], detections):
                if img_id.numpy() in detections_per_source:
                    detections_per_source[img_id.numpy()] = tf.concat(
                        [d, detections_per_source[img_id.numpy()]], 0)
                else:
                    detections_per_source[img_id.numpy()] = d

            evaluator = coco_metric.EvaluationMetric(
                filename=config.val_json_file)
            for d in detections_per_source.values():
                if FLAGS.enable_tta:
                    d = wbf.ensemble_detections(config, d, len(augmentations))
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)