Beispiel #1
0
 def set_model(self, model: tf.keras.Model):
   self.model = model
   config = model.config
   self.config = config
   label_map = label_util.get_label_map(config.label_map)
   log_dir = os.path.join(config.model_dir, 'coco')
   self.file_writer = tf.summary.create_file_writer(log_dir)
   self.evaluator = coco_metric.EvaluationMetric(
       filename=config.val_json_file, label_map=label_map)
Beispiel #2
0
    def test_mAP(self):

        eval_metric = coco_metric.EvaluationMetric(label_map=self.class_labels)
        coco_metrics = eval_metric.estimator_metric_fn(self.detections,
                                                       self.groundtruth_data)
        self.assertEqual(len(coco_metrics.keys()), 15)
        self.assertAllClose(coco_metrics['AP'][0], 2.0 / 3.0)
        self.assertAllClose(coco_metrics['AP_/car'][0], 1.0)
        self.assertAllClose(coco_metrics['AP_/truck'][0], 1.0)
        self.assertAllClose(coco_metrics['AP_/bicycle'][0], 0.0)
Beispiel #3
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
Beispiel #4
0
    def metric_fn(**kwargs):
      """Returns a dictionary that has the evaluation metrics."""
      if params['nms_configs'].get('pyfunc', True):
        detections_bs = []
        nms_configs = params['nms_configs']
        for index in range(kwargs['boxes'].shape[0]):
          detections = tf.numpy_function(
              functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),
              [
                  kwargs['boxes'][index],
                  kwargs['scores'][index],
                  kwargs['classes'][index],
                  tf.slice(kwargs['image_ids'], [index], [1]),
                  tf.slice(kwargs['image_scales'], [index], [1]),
                  params['num_classes'],
                  nms_configs['max_output_size'],
              ], tf.float32)
          detections_bs.append(detections)
        detections_bs = postprocess.transform_detections(
            tf.stack(detections_bs))
      else:
        # These two branches should be equivalent, but currently they are not.
        # TODO(tanmingxing): enable the non_pyfun path after bug fix.
        nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
            params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
            kwargs['image_scales'])
        img_ids = tf.cast(
            tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)
        detections_bs = [
            img_ids * tf.ones_like(nms_scores),
            nms_boxes[:, :, 1],
            nms_boxes[:, :, 0],
            nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
            nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
            nms_scores,
            nms_classes,
        ]
        detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')

      if params.get('testdev_dir', None):
        logging.info('Eval testdev_dir %s', params['testdev_dir'])
        eval_metric = coco_metric.EvaluationMetric(
            testdev_dir=params['testdev_dir'])
        coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
                                                       tf.zeros([1]))
      else:
        logging.info('Eval val with groudtruths %s.', params['val_json_file'])
        eval_metric = coco_metric.EvaluationMetric(
            filename=params['val_json_file'], label_map=params['label_map'])
        coco_metrics = eval_metric.estimator_metric_fn(
            detections_bs, kwargs['groundtruth_data'])

      # Add metrics to output.
      cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
      box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
      output_metrics = {
          'cls_loss': cls_loss,
          'box_loss': box_loss,
      }
      output_metrics.update(coco_metrics)
      return output_metrics
Beispiel #5
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    if config.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif config.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((None, *config.image_size, 3))
        util_keras.restore_ckpt(model,
                                tf.train.latest_checkpoint(FLAGS.model_dir),
                                config.moving_average_decay,
                                skip_mismatch=False)

        @tf.function
        def model_fn(images, labels):
            cls_outputs, box_outputs = model(images, training=False)
            detections = postprocess.generate_detections(
                config, cls_outputs, box_outputs, labels['image_scales'],
                labels['source_ids'])
            tf.numpy_function(evaluator.update_state, [
                labels['groundtruth_data'],
                postprocess.transform_detections(detections)
            ], [])

        # Evaluator for AP calculation.
        label_map = label_util.get_label_map(config.label_map)
        evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                                 label_map=label_map)

        # dataset
        batch_size = FLAGS.batch_size  # global batch size.
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            max_instances_per_image=config.max_instances_per_image)(
                config, batch_size=batch_size)
        if FLAGS.eval_samples:
            ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
        ds = ds_strategy.experimental_distribute_dataset(ds)

        # evaluate all images.
        eval_samples = FLAGS.eval_samples or 5000
        pbar = tf.keras.utils.Progbar(
            (eval_samples + batch_size - 1) // batch_size)
        for i, (images, labels) in enumerate(ds):
            ds_strategy.run(model_fn, (images, labels))
            pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)