示例#1
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file

    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, None, None, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)

    # compute stats for all batches.
    for images, labels in ds:
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS

        cls_outputs, box_outputs = model(images, training=False)
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'],
                                                     False)

        if FLAGS.enable_tta:
            images_flipped = tf.image.flip_left_right(images)
            cls_outputs_flipped, box_outputs_flipped = model(images_flipped,
                                                             training=False)
            detections_flipped = postprocess.generate_detections(
                config, cls_outputs_flipped, box_outputs_flipped,
                labels['image_scales'], labels['source_ids'], True)

            for d, df in zip(detections, detections_flipped):
                combined_detections = wbf.ensemble_detections(
                    config, tf.concat([d, df], 0))
                combined_detections = tf.stack([combined_detections])
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(
                        combined_detections).numpy())
        else:
            evaluator.update_state(
                labels['groundtruth_data'].numpy(),
                postprocess.transform_detections(detections).numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
示例#2
0
    def test_postprocess_per_class(self):
        """Test postprocess with per class nms."""
        tf.random.set_seed(1111)
        cls_outputs = {
            1: tf.random.normal([2, 4, 4, 2]),
            2: tf.random.normal([2, 2, 2, 2])
        }
        box_outputs = {
            1: tf.random.normal([2, 4, 4, 4]),
            2: tf.random.normal([2, 2, 2, 4])
        }
        cls_outputs_list = [cls_outputs[1], cls_outputs[2]]
        box_outputs_list = [box_outputs[1], box_outputs[2]]
        scales = [1.0, 2.0]
        ids = [0, 1]

        self.params['max_detection_points'] = 10
        outputs = postprocess.generate_detections(self.params,
                                                  cls_outputs_list,
                                                  box_outputs_list, scales,
                                                  ids)

        self.params['disable_pyfun'] = False
        score_thresh = 0.5
        max_output_size = self.params['nms_configs']['max_output_size']
        self.params['batch_size'] = len(scales)
        legacy_outputs = inference.det_post_process(self.params, cls_outputs,
                                                    box_outputs, scales,
                                                    score_thresh,
                                                    max_output_size)
        self.assertAllClose(outputs, legacy_outputs)
示例#3
0
文件: eval.py 项目: ailabktw/automl
 def f(images, labels):
     cls_outputs, box_outputs = model(images, training=False)
     return postprocess.generate_detections(config, cls_outputs,
                                            box_outputs,
                                            labels['image_scales'],
                                            labels['source_ids'],
                                            flip)
示例#4
0
  def test_postprocess_per_class(self):
    """Test postprocess with per class nms."""
    tf.random.set_seed(1111)
    cls_outputs = {
        1: tf.random.normal([2, 4, 4, 2]),
        2: tf.random.normal([2, 2, 2, 2])
    }
    box_outputs = {
        1: tf.random.normal([2, 4, 4, 4]),
        2: tf.random.normal([2, 2, 2, 4])
    }
    cls_outputs_list = [cls_outputs[1], cls_outputs[2]]
    box_outputs_list = [box_outputs[1], box_outputs[2]]
    scales = [1.0, 2.0]
    ids = [0, 1]

    self.params['max_detection_points'] = 10
    outputs = postprocess.generate_detections(self.params, cls_outputs_list,
                                              box_outputs_list, scales, ids)
    self.assertAllClose(
        outputs.numpy(),
        [[[0., -1.177383, 1.793507, 9.518328, 2.624881, 0.901576, 2.],
          [0., 5.676410, 6.102146, 2.109282, 2.435021, 0.888125, 1.]],
         [[1., 5.885427, 13.529362, 5.524654, 0.624685, 0.884544, 1.],
          [1., 8.145872, -9.660868, 6.028101, 20.073238, 0.815883, 2.]]])
示例#5
0
 def _get_detections(self, images, labels):
   cls_outputs, box_outputs = self.model(images, training=False)
   detections = postprocess.generate_detections(self.config,
                                                cls_outputs,
                                                box_outputs,
                                                labels['image_scales'],
                                                labels['source_ids'])
   return postprocess.transform_detections(detections)
示例#6
0
 def model_fn(images, labels):
     cls_outputs, box_outputs = model(images, training=False)
     detections = postprocess.generate_detections(
         config, cls_outputs, box_outputs, labels['image_scales'],
         labels['source_ids'])
     tf.numpy_function(evaluator.update_state, [
         labels['groundtruth_data'],
         postprocess.transform_detections(detections)
     ], [])
示例#7
0
 def _get_detections(self, images, labels):
     cls_outputs, box_outputs = util_keras.fp16_to_fp32_nested(
         self.model(images, training=False))
     detections = postprocess.generate_detections(self.config, cls_outputs,
                                                  box_outputs,
                                                  labels['image_scales'],
                                                  labels['source_ids'])
     tf.numpy_function(self.evaluator.update_state, [
         labels['groundtruth_data'],
         postprocess.transform_detections(detections)
     ], [])
示例#8
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  # Evaluator for AP calculation.
  label_map = label_util.get_label_map(config.label_map)
  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file, label_map=label_map)

  # dataset
  batch_size = 1
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      max_instances_per_image=config.max_instances_per_image)(
          config, batch_size=batch_size)
  eval_samples = FLAGS.eval_samples
  if eval_samples:
    ds = ds.take((eval_samples + batch_size - 1) // batch_size)

  # Network
  lite_runner = LiteRunner(FLAGS.tflite_path)
  eval_samples = FLAGS.eval_samples or 5000
  pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
  for i, (images, labels) in enumerate(ds):
    cls_outputs, box_outputs = lite_runner.run(images)
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    detections = postprocess.transform_detections(detections)
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())
    pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
示例#9
0
    def test_postprocess_per_class_tf_nms(self):
        """Test postprocess with per class nms using the tensorflow nms."""
        tf.random.set_seed(1111)
        cls_outputs = {
            1: tf.random.normal([2, 4, 4, 2]),
            2: tf.random.normal([2, 2, 2, 2])
        }
        box_outputs = {
            1: tf.random.normal([2, 4, 4, 4]),
            2: tf.random.normal([2, 2, 2, 4])
        }
        cls_outputs_list = [cls_outputs[1], cls_outputs[2]]
        box_outputs_list = [box_outputs[1], box_outputs[2]]
        scales = [1.0, 2.0]
        ids = [0, 1]

        self.params['max_detection_points'] = 10
        self.params['nms_configs']['pyfunc'] = False
        outputs = postprocess.generate_detections(self.params,
                                                  cls_outputs_list,
                                                  box_outputs_list, scales,
                                                  ids)
        self.assertAllClose(
            outputs.numpy(),
            [[[0., -1.177383, 1.793507, 8.340945, 4.418388, 0.901576, 2.],
              [0., 5.676410, 6.102146, 7.785691, 8.537168, 0.888125, 1.]],
             [[1., 5.885427, 13.529362, 11.410081, 14.154047, 0.884544, 1.],
              [1., 8.145872, -9.660868, 14.173973, 10.41237, 0.815883, 2.]]])

        outputs_flipped = postprocess.generate_detections(
            self.params, cls_outputs_list, box_outputs_list, scales, ids, True)
        self.assertAllClose(
            outputs_flipped.numpy(),
            [[[0., -0.340945, 1.793507, 9.177383, 4.418388, 0.901576, 2.],
              [0., 0.214309, 6.102146, 2.32359, 8.537168, 0.888125, 1.]],
             [[1., 4.589919, 13.529362, 10.114573, 14.154047, 0.884544, 1.],
              [1., 1.826027, -9.660868, 7.854128, 10.41237, 0.815883, 2.]]])
示例#10
0
文件: eval.py 项目: DaveCoding/automl
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file

  # dataset
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      use_fake_data=False,
      max_instances_per_image=config.max_instances_per_image)(
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)
示例#11
0
def main(_):
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.batch_size = 8
    config.val_json_file = 'tmp/coco/annotations/instances_val2017.json'

    # dataset
    input_files = 'tmp/coco/val-00000-of-00032.tfrecord'
    is_training = False
    ds = dataloader.InputReader(
        input_files,
        is_training=is_training,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, 512, 512, 3))
    model.load_weights('tmp/efficientdet-d0/model')

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    # compute stats for all batches.
    for images, labels in ds:
        cls_outputs, box_outputs = model(images, training=False)
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'])
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
示例#12
0
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(FLAGS.checkpoint)

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)


if __name__ == '__main__':
  flags.mark_flag_as_required('val_file_pattern')
  flags.mark_flag_as_required('val_json_file')
示例#13
0
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
    """Model definition entry.

  Args:
    features: the input image tensor with shape [batch_size, height, width, 3].
      The height and width are fixed and equal.
    labels: the input labels in a dictionary. The labels include class targets
      and box targets which are dense label maps. The labels are generated from
      get_input_fn function in data/dataloader.py
    mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
    params: the dictionary defines hyperparameters of model. The default
      settings are in default_hparams function in this file.
    model: the model outputs class logits and box regression outputs.
    variable_filter_fn: the filter function that takes trainable_variables and
      returns the variable list after applying the filter rule.

  Returns:
    tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.

  Raises:
    RuntimeError: if both ckpt and backbone_ckpt are set.
  """
    utils.image('input_image', features)
    training_hooks = []

    def _model_outputs(inputs):
        # Convert params (dict) to Config for easier access.
        return model(inputs, config=hparams_config.Config(params))

    precision = utils.get_precision(params['strategy'],
                                    params['mixed_precision'])
    cls_outputs, box_outputs = utils.build_model_with_precision(
        precision, _model_outputs, features, params['is_training_bn'])

    levels = cls_outputs.keys()
    for level in levels:
        cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
        box_outputs[level] = tf.cast(box_outputs[level], tf.float32)

    # First check if it is in PREDICT mode.
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'image': features,
        }
        for level in levels:
            predictions['cls_outputs_%d' % level] = cls_outputs[level]
            predictions['box_outputs_%d' % level] = box_outputs[level]
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Set up training loss and learning rate.
    update_learning_rate_schedule_parameters(params)
    global_step = tf.train.get_or_create_global_step()
    learning_rate = learning_rate_schedule(params, global_step)

    # cls_loss and box_loss are for logging. only total_loss is optimized.
    det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
        cls_outputs, box_outputs, labels, params)
    reg_l2loss = reg_l2_loss(params['weight_decay'])
    total_loss = det_loss + reg_l2loss

    if mode == tf.estimator.ModeKeys.TRAIN:
        utils.scalar('lrn_rate', learning_rate)
        utils.scalar('trainloss/cls_loss', cls_loss)
        utils.scalar('trainloss/box_loss', box_loss)
        utils.scalar('trainloss/det_loss', det_loss)
        utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
        utils.scalar('trainloss/loss', total_loss)
        if params['iou_loss_type']:
            utils.scalar('trainloss/box_iou_loss', box_iou_loss)

    moving_average_decay = params['moving_average_decay']
    if moving_average_decay:
        ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay,
                                                num_updates=global_step)
        ema_vars = utils.get_ema_vars()
    if params['strategy'] == 'horovod':
        import horovod.tensorflow as hvd  # pylint: disable=g-import-not-at-top
        learning_rate = learning_rate * hvd.size()
    if mode == tf.estimator.ModeKeys.TRAIN:
        if params['optimizer'].lower() == 'sgd':
            optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                   momentum=params['momentum'])
        elif params['optimizer'].lower() == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate)
        else:
            raise ValueError('optimizers should be adam or sgd')

        if params['strategy'] == 'tpu':
            optimizer = tf.tpu.CrossShardOptimizer(optimizer)
        elif params['strategy'] == 'horovod':
            optimizer = hvd.DistributedOptimizer(optimizer)
            training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        var_list = tf.trainable_variables()
        if variable_filter_fn:
            var_list = variable_filter_fn(var_list)

        if params.get('clip_gradients_norm', 0) > 0:
            logging.info('clip gradients norm by %f',
                         params['clip_gradients_norm'])
            grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
            with tf.name_scope('clip'):
                grads = [gv[0] for gv in grads_and_vars]
                tvars = [gv[1] for gv in grads_and_vars]
                clipped_grads, gnorm = tf.clip_by_global_norm(
                    grads, params['clip_gradients_norm'])
                utils.scalar('gnorm', gnorm)
                grads_and_vars = list(zip(clipped_grads, tvars))

            with tf.control_dependencies(update_ops):
                train_op = optimizer.apply_gradients(grads_and_vars,
                                                     global_step)
        else:
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(total_loss,
                                              global_step,
                                              var_list=var_list)

        if moving_average_decay:
            with tf.control_dependencies([train_op]):
                train_op = ema.apply(ema_vars)

    else:
        train_op = None

    eval_metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:

        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                eval_metric = coco_metric.EvaluationMetric(
                    testdev_dir=params['testdev_dir'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    kwargs['detections_bs'], tf.zeros([1]))
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                eval_metric = coco_metric.EvaluationMetric(
                    filename=params['val_json_file'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    kwargs['detections_bs'], kwargs['groundtruth_data'])

            # Add metrics to output.
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics

        cls_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(cls_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])
        box_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(box_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])

        params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
        detections_bs = postprocess.generate_detections(
            params, cls_outputs, box_outputs, labels['image_scales'],
            labels['source_ids'])

        metric_fn_inputs = {
            'cls_loss_repeat': cls_loss_repeat,
            'box_loss_repeat': box_loss_repeat,
            'source_ids': labels['source_ids'],
            'groundtruth_data': labels['groundtruth_data'],
            'image_scales': labels['image_scales'],
            'detections_bs': detections_bs,
        }
        eval_metrics = (metric_fn, metric_fn_inputs)

    checkpoint = params.get('ckpt') or params.get('backbone_ckpt')

    if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
        # Initialize the model from an EfficientDet or backbone checkpoint.
        if params.get('ckpt') and params.get('backbone_ckpt'):
            raise RuntimeError(
                '--backbone_ckpt and --checkpoint are mutually exclusive')

        if params.get('backbone_ckpt'):
            var_scope = params['backbone_name'] + '/'
            if params['ckpt_var_scope'] is None:
                # Use backbone name as default checkpoint scope.
                ckpt_scope = params['backbone_name'] + '/'
            else:
                ckpt_scope = params['ckpt_var_scope'] + '/'
        else:
            # Load every var in the given checkpoint
            var_scope = ckpt_scope = '/'

        def scaffold_fn():
            """Loads pretrained model through scaffold function."""
            logging.info('restore variables from %s', checkpoint)

            var_map = utils.get_ckpt_var_map(ckpt_path=checkpoint,
                                             ckpt_scope=ckpt_scope,
                                             var_scope=var_scope,
                                             var_exclude_expr=params.get(
                                                 'var_exclude_expr', None))

            tf.train.init_from_checkpoint(checkpoint, var_map)

            return tf.train.Scaffold()
    elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:

        def scaffold_fn():
            """Load moving average variables for eval."""
            logging.info('Load EMA vars with ema_decay=%f',
                         moving_average_decay)
            restore_vars_dict = ema.variables_to_restore(ema_vars)
            saver = tf.train.Saver(restore_vars_dict)
            return tf.train.Scaffold(saver=saver)
    else:
        scaffold_fn = None

    if params['strategy'] != 'tpu':
        # Profile every 1K steps.
        profile_hook = tf.train.ProfilerHook(save_steps=1000,
                                             output_dir=params['model_dir'])
        training_hooks.append(profile_hook)

        # Report memory allocation if OOM
        class OomReportingHook(tf.estimator.SessionRunHook):
            def before_run(self, run_context):
                return tf.estimator.SessionRunArgs(
                    fetches=[],
                    options=tf.RunOptions(
                        report_tensor_allocations_upon_oom=True))

        training_hooks.append(OomReportingHook())

    return tf.estimator.tpu.TPUEstimatorSpec(mode=mode,
                                             loss=total_loss,
                                             train_op=train_op,
                                             eval_metrics=eval_metrics,
                                             host_call=utils.get_tpu_host_call(
                                                 global_step, params),
                                             scaffold_fn=scaffold_fn,
                                             training_hooks=training_hooks)
示例#14
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
示例#15
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  base_height, base_width = utils.parse_image_size(config['image_size'])

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, base_height, base_width, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  # in format (height, width, flip)
  augmentations = [] 
  if FLAGS.enable_tta:
    for size_offset in (0, 128, 256):
      for flip in (False, True):
        augmentations.append((base_height + size_offset, base_width + size_offset, flip))
  else:
    augmentations.append((base_height, base_width, False))

  detections_per_source = dict()
  for height, width, flip in augmentations:
    config.image_size = (height, width)
    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(
            config)

    # compute stats for all batches.
    for images, labels in ds:
      if flip:
        images = tf.image.flip_left_right(images)
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config, cls_outputs,
                                                  box_outputs,
                                                  labels['image_scales'],
                                                  labels['source_ids'], flip)

      for id, d in zip(labels['source_ids'], detections):
        if id.numpy() in detections_per_source:
          detections_per_source[id.numpy()] = tf.concat([d, detections_per_source[id.numpy()]], 0)
        else:
          detections_per_source[id.numpy()] = d


  evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
  for d in detections_per_source.values():
    if FLAGS.enable_tta:
      d = wbf.ensemble_detections(config, d, len(augmentations))
    evaluator.update_state(
        labels['groundtruth_data'].numpy(),
        postprocess.transform_detections(tf.stack([d])).numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)