コード例 #1
0
ファイル: det_model_fn.py プロジェクト: tcwtc/efficientdet
        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                eval_metric = coco_metric.EvaluationMetric(
                    testdev_dir=params['testdev_dir'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    kwargs['detections_bs'], tf.zeros([1]))
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                eval_metric = coco_metric.EvaluationMetric(
                    filename=params['val_json_file'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    kwargs['detections_bs'], kwargs['groundtruth_data'])

            # Add metrics to output.
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics
コード例 #2
0
def coco_metric_fn(batch_size,
                   anchor_labeler,
                   filename=None,
                   testdev_dir=None,
                   **kwargs):
    """Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
    # add metrics to output
    detections_bs = []
    for index in range(batch_size):
        cls_outputs_per_sample = kwargs['cls_outputs_all'][index]
        box_outputs_per_sample = kwargs['box_outputs_all'][index]
        indices_per_sample = kwargs['indices_all'][index]
        classes_per_sample = kwargs['classes_all'][index]
        detections = anchor_labeler.generate_detections(
            cls_outputs_per_sample,
            box_outputs_per_sample,
            indices_per_sample,
            classes_per_sample,
            tf.slice(kwargs['source_ids'], [index], [1]),
            tf.slice(kwargs['image_scales'], [index], [1]),
            disable_pyfun=kwargs.get('disable_pyfun', None),
        )
        detections_bs.append(detections)

    if testdev_dir:
        eval_metric = coco_metric.EvaluationMetric(testdev_dir=testdev_dir)
        coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
                                                       tf.zeros([1]))
    else:
        eval_metric = coco_metric.EvaluationMetric(filename=filename)
        coco_metrics = eval_metric.estimator_metric_fn(
            detections_bs, kwargs['groundtruth_data'])
    return coco_metrics
コード例 #3
0
    def metric_fn(**kwargs):
      """Returns a dictionary that has the evaluation metrics."""
      if params['nms_configs'].get('pyfunc', True):
        detections_bs = []
        for index in range(kwargs['boxes'].shape[0]):
          nms_configs = params['nms_configs']
          detections = tf.numpy_function(
              functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),
              [
                  kwargs['boxes'][index],
                  kwargs['scores'][index],
                  kwargs['classes'][index],
                  tf.slice(kwargs['image_ids'], [index], [1]),
                  tf.slice(kwargs['image_scales'], [index], [1]),
                  params['num_classes'],
                  nms_configs['max_output_size'],
              ], tf.float32)
          detections_bs.append(detections)
      else:
        # These two branches should be equivalent, but currently they are not.
        # TODO(tanmingxing): enable the non_pyfun path after bug fix.
        nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
            params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
            kwargs['image_scales'])
        img_ids = tf.cast(
            tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)
        detections_bs = [
            img_ids * tf.ones_like(nms_scores),
            nms_boxes[:, :, 1],
            nms_boxes[:, :, 0],
            nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
            nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
            nms_scores,
            nms_classes,
        ]
        detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')

      if params.get('testdev_dir', None):
        logging.info('Eval testdev_dir %s', params['testdev_dir'])
        eval_metric = coco_metric.EvaluationMetric(
            testdev_dir=params['testdev_dir'])
        coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
                                                       tf.zeros([1]))
      else:
        logging.info('Eval val with groudtruths %s.', params['val_json_file'])
        eval_metric = coco_metric.EvaluationMetric(
            filename=params['val_json_file'])
        coco_metrics = eval_metric.estimator_metric_fn(
            detections_bs, kwargs['groundtruth_data'])

      # Add metrics to output.
      cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
      box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
      output_metrics = {
          'cls_loss': cls_loss,
          'box_loss': box_loss,
      }
      output_metrics.update(coco_metrics)
      return output_metrics
コード例 #4
0
def compute_coco_eval_metric(predictor,
                             num_batches=-1,
                             include_mask=True,
                             annotation_json_file=None):
    """Compute COCO eval metric given a prediction generator.

  Args:
    predictor: a generator that iteratively pops a dictionary of predictions
      with the format compatible with COCO eval tool.
    num_batches: the number of batches to be aggregated in eval. This is how
      many times that the predictor gets pulled.
    include_mask: a boolean that indicates whether we include the mask eval.
    annotation_json_file: the annotation json file of the eval dataset.

  Returns:
    eval_results: the aggregated COCO metric eval results.
  """
    if not annotation_json_file:
        annotation_json_file = None
    use_groundtruth_from_json = (annotation_json_file is not None)

    predictions = dict()
    batch_idx = 0
    while num_batches < 0 or batch_idx < num_batches:
        try:
            prediction = six.next(predictor)
            tf.logging.info('Running inference on batch %d/%d...' %
                            (batch_idx + 1, num_batches))
        except StopIteration:
            tf.logging.info('Get StopIteration at %d batch.' % (batch_idx + 1))
            break

        prediction = process_prediction_for_eval(prediction)
        for k, v in six.iteritems(prediction):
            if k not in predictions:
                predictions[k] = [v]
            else:
                predictions[k].append(v)

        batch_idx = batch_idx + 1

    for k, v in six.iteritems(predictions):
        predictions[k] = np.concatenate(predictions[k], axis=0)

    if use_groundtruth_from_json:
        eval_metric = coco_metric.EvaluationMetric(annotation_json_file,
                                                   include_mask=include_mask)
        eval_results = eval_metric.predict_metric_fn(predictions)
    else:
        images, annotations = coco_utils.extract_coco_groundtruth(
            predictions, include_mask)
        dataset = coco_utils.create_coco_format_dataset(images, annotations)
        eval_metric = coco_metric.EvaluationMetric(filename=None,
                                                   include_mask=include_mask)
        eval_results = eval_metric.predict_metric_fn(predictions,
                                                     groundtruth_data=dataset)
    tf.logging.info('Eval results: %s' % eval_results)
    return eval_results
コード例 #5
0
 def metric_fn(**kwargs):
   """Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
   eval_anchors = anchors.Anchors(params['min_level'],
                                  params['max_level'],
                                  params['num_scales'],
                                  params['aspect_ratios'],
                                  params['anchor_scale'],
                                  params['image_size'])
   anchor_labeler = anchors.AnchorLabeler(eval_anchors,
                                          params['num_classes'])
   cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
   box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
   # add metrics to output
   cls_outputs = {}
   box_outputs = {}
   for level in range(params['min_level'], params['max_level'] + 1):
     cls_outputs[level] = kwargs['cls_outputs_%d' % level]
     box_outputs[level] = kwargs['box_outputs_%d' % level]
   detections = anchor_labeler.generate_detections(
       cls_outputs, box_outputs, kwargs['source_ids'])
   eval_metric = coco_metric.EvaluationMetric(params['val_json_file'])
   coco_metrics = eval_metric.estimator_metric_fn(detections,
                                                  kwargs['image_scales'])
   # Add metrics to output.
   output_metrics = {
       'cls_loss': cls_loss,
       'box_loss': box_loss,
   }
   output_metrics.update(coco_metrics)
   return output_metrics
コード例 #6
0
ファイル: eval_coco.py プロジェクト: stjordanis/TensorRT
def main(args):
    automl_path = os.path.realpath(args.automl_path)
    sys.path.insert(1, os.path.join(automl_path, "efficientdet"))
    try:
        import coco_metric
    except ImportError:
        print("Could not import the 'coco_metric' module from AutoML. Searching in: {}".format(automl_path))
        print("Please clone the repository https://github.com/google/automl and provide its path with --automl_path.")
        sys.exit(1)

    trt_infer = TensorRTInfer(args.engine)
    batcher = ImageBatcher(args.input, *trt_infer.input_spec())
    evaluator = coco_metric.EvaluationMetric(filename=args.annotations)
    for batch, images, scales in batcher.get_batch():
        print("Processing Image {} / {}".format(batcher.image_index, batcher.num_images), end="\r")
        detections = trt_infer.infer(batch, scales, args.nms_threshold)
        coco_det = np.zeros((len(images), max([len(d) for d in detections]), 7))
        coco_det[:, :, -1] = -1
        for i in range(len(images)):
            for n in range(len(detections[i])):
                source_id = int(os.path.splitext(os.path.basename(images[i]))[0])
                det = detections[i][n]
                coco_det[i][n] = [
                    source_id,
                    det['xmin'],
                    det['ymin'],
                    det['xmax'] - det['xmin'],
                    det['ymax'] - det['ymin'],
                    det['score'],
                    det['class'] + 1,  # The COCO evaluator expects class 0 to be background, so offset by 1
                ]
        evaluator.update_state(None, coco_det)
    print()
    evaluator.result(100)
コード例 #7
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file

    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, None, None, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)

    # compute stats for all batches.
    for images, labels in ds:
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS

        cls_outputs, box_outputs = model(images, training=False)
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'],
                                                     False)

        if FLAGS.enable_tta:
            images_flipped = tf.image.flip_left_right(images)
            cls_outputs_flipped, box_outputs_flipped = model(images_flipped,
                                                             training=False)
            detections_flipped = postprocess.generate_detections(
                config, cls_outputs_flipped, box_outputs_flipped,
                labels['image_scales'], labels['source_ids'], True)

            for d, df in zip(detections, detections_flipped):
                combined_detections = wbf.ensemble_detections(
                    config, tf.concat([d, df], 0))
                combined_detections = tf.stack([combined_detections])
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(
                        combined_detections).numpy())
        else:
            evaluator.update_state(
                labels['groundtruth_data'].numpy(),
                postprocess.transform_detections(detections).numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
コード例 #8
0
ファイル: train_lib.py プロジェクト: teamteam321/automl
 def set_model(self, model: tf.keras.Model):
   self.model = model
   config = model.config
   self.config = config
   label_map = label_util.get_label_map(config.label_map)
   log_dir = os.path.join(config.model_dir, 'coco')
   self.file_writer = tf.summary.create_file_writer(log_dir)
   self.evaluator = coco_metric.EvaluationMetric(
       filename=config.val_json_file, label_map=label_map)
コード例 #9
0
    def test_mAP(self):

        eval_metric = coco_metric.EvaluationMetric(label_map=self.class_labels)
        coco_metrics = eval_metric.estimator_metric_fn(self.detections,
                                                       self.groundtruth_data)
        self.assertEqual(len(coco_metrics.keys()), 15)
        self.assertAllClose(coco_metrics['AP'][0], 2.0 / 3.0)
        self.assertAllClose(coco_metrics['AP_/car'][0], 1.0)
        self.assertAllClose(coco_metrics['AP_/truck'][0], 1.0)
        self.assertAllClose(coco_metrics['AP_/bicycle'][0], 0.0)
コード例 #10
0
ファイル: det_model_fn.py プロジェクト: etam103/automl
        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
                params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
                kwargs['image_scales'])
            img_ids = tf.cast(tf.expand_dims(kwargs['source_ids'], -1),
                              nms_scores.dtype)
            detections = [
                img_ids * tf.ones_like(nms_scores),
                nms_boxes[:, :, 1],
                nms_boxes[:, :, 0],
                nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
                nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
                nms_scores,
                nms_classes,
            ]
            detections = tf.stack(detections, axis=-1, name='detnections')
            kwargs['detections_bs'] = detections

            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                eval_metric = coco_metric.EvaluationMetric(
                    testdev_dir=params['testdev_dir'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections, tf.zeros([1]))
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                eval_metric = coco_metric.EvaluationMetric(
                    filename=params['val_json_file'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections, kwargs['groundtruth_data'])

            # Add metrics to output.
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics
コード例 #11
0
def evaluation(eval_estimator, num_epochs, val_json_file):
  """Runs one evluation."""
  mlperf_log.maskrcnn_print(key=mlperf_log.EVAL_START,
                            value=num_epochs)
  mlperf_log.maskrcnn_print(key=mlperf_log.BATCH_SIZE_TEST,
                            value=FLAGS.eval_batch_size)
  predictor = eval_estimator.predict(
      input_fn=dataloader.InputReader(
          FLAGS.validation_file_pattern,
          mode=tf.estimator.ModeKeys.PREDICT),
      yield_single_examples=False)
  # Every predictor.next() gets a batch of prediction (a dictionary).
  predictions = dict()
  for _ in range(FLAGS.eval_samples // FLAGS.eval_batch_size):
    prediction = six.next(predictor)
    image_info = prediction['image_info']
    raw_detections = prediction['detections']
    processed_detections = raw_detections
    for b in range(raw_detections.shape[0]):
      scale = image_info[b][2]
      for box_id in range(raw_detections.shape[1]):
        # Map [y1, x1, y2, x2] -> [x1, y1, w, h] and multiply detections
        # by image scale.
        new_box = raw_detections[b, box_id, :]
        y1, x1, y2, x2 = new_box[1:5]
        new_box[1:5] = scale * np.array([x1, y1, x2 - x1, y2 - y1])
        processed_detections[b, box_id, :] = new_box
    prediction['detections'] = processed_detections

    for k, v in six.iteritems(prediction):
      if k not in predictions:
        predictions[k] = v
      else:
        predictions[k] = np.append(predictions[k], v, axis=0)

  eval_metric = coco_metric.EvaluationMetric(val_json_file)
  eval_results = eval_metric.predict_metric_fn(predictions)
  tf.logging.info('Eval results: %s' % eval_results)
  mlperf_log.maskrcnn_print(key=mlperf_log.EVAL_STOP,
                            value=num_epochs)
  mlperf_log.maskrcnn_print(key=mlperf_log.EVAL_SIZE,
                            value=FLAGS.eval_samples)
  mlperf_log.maskrcnn_print(
      key=mlperf_log.EVAL_ACCURACY,
      value={
          'epoch': num_epochs,
          'box_AP': str(eval_results['AP']),
          'mask_AP': str(eval_results['mask_AP']),
      })

  return eval_results
コード例 #12
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  # Evaluator for AP calculation.
  label_map = label_util.get_label_map(config.label_map)
  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file, label_map=label_map)

  # dataset
  batch_size = 1
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      max_instances_per_image=config.max_instances_per_image)(
          config, batch_size=batch_size)
  eval_samples = FLAGS.eval_samples
  if eval_samples:
    ds = ds.take((eval_samples + batch_size - 1) // batch_size)

  # Network
  lite_runner = LiteRunner(FLAGS.tflite_path)
  eval_samples = FLAGS.eval_samples or 5000
  pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
  for i, (images, labels) in enumerate(ds):
    cls_outputs, box_outputs = lite_runner.run(images)
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    detections = postprocess.transform_detections(detections)
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())
    pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
コード例 #13
0
ファイル: evaluation.py プロジェクト: sukingw/tpu
def compute_coco_eval_metric(predictor,
                             num_batches=-1,
                             include_mask=True,
                             annotation_json_file=None):
    """Compute COCO eval metric given a prediction generator.

  Args:
    predictor: a generator that iteratively pops a dictionary of predictions
      with the format compatible with COCO eval tool.
    num_batches: the number of batches to be aggregated in eval. This is how
      many times that the predictor gets pulled.
    include_mask: a boolean that indicates whether we include the mask eval.
    annotation_json_file: the annotation json file of the eval dataset.

  Returns:
    eval_results: the aggregated COCO metric eval results.
  """
    # TODO(pengchong): remove assertion once we support eval without json.
    assert annotation_json_file is not None

    predictions = dict()
    batch_idx = 0
    while num_batches < 0 or batch_idx < num_batches:
        try:
            prediction = six.next(predictor)
            tf.logging.info('Running inference on batch %d/%d...' %
                            (batch_idx + 1, num_batches))
        except StopIteration:
            tf.logging.info('Get StopIteration at %d batch.' % (batch_idx + 1))
            break

        prediction = process_prediction_for_eval(prediction)
        for k, v in six.iteritems(prediction):
            if k not in predictions:
                predictions[k] = v
            else:
                predictions[k] = np.append(predictions[k], v, axis=0)

        batch_idx = batch_idx + 1

    eval_metric = coco_metric.EvaluationMetric(annotation_json_file,
                                               include_mask=include_mask)
    eval_results = eval_metric.predict_metric_fn(predictions)
    tf.logging.info('Eval results: %s' % eval_results)
    return eval_results
コード例 #14
0
ファイル: retinanet_model.py プロジェクト: ryanpstauffer/tpu
        def metric_fn(**kwargs):
            """Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
            eval_anchors = anchors.Anchors(params['min_level'],
                                           params['max_level'],
                                           params['num_scales'],
                                           params['aspect_ratios'],
                                           params['anchor_scale'],
                                           params['image_size'])
            anchor_labeler = anchors.AnchorLabeler(eval_anchors,
                                                   params['num_classes'])
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            # add metrics to output
            cls_outputs = {}
            box_outputs = {}
            detections_bs = []
            for index in range(batch_size):
                for level in range(params['min_level'],
                                   params['max_level'] + 1):
                    _, w, h, c = kwargs['cls_outputs_%d' %
                                        level].get_shape().as_list()
                    cls_outputs[level] = tf.slice(
                        kwargs['cls_outputs_%d' % level], [index, 0, 0, 0],
                        [1, w, h, c])
                    _, w, h, c = kwargs['box_outputs_%d' %
                                        level].get_shape().as_list()
                    box_outputs[level] = tf.slice(
                        kwargs['box_outputs_%d' % level], [index, 0, 0, 0],
                        [1, w, h, c])
                detections = anchor_labeler.generate_detections(
                    cls_outputs, box_outputs,
                    tf.slice(kwargs['source_ids'], [index], [1]),
                    tf.slice(kwargs['image_scales'], [index], [1]))
                detections_bs.append(detections)
            eval_metric = coco_metric.EvaluationMetric(params['val_json_file'])
            coco_metrics = eval_metric.estimator_metric_fn(
                detections_bs, kwargs['groundtruth_data'])

            # Add metrics to output.
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics
コード例 #15
0
ファイル: evaluation.py プロジェクト: zzm422/tpu
def compute_coco_eval_metric(predictor,
                             num_batches=-1,
                             annotation_json_file=None):
  """Compute COCO eval metric given a prediction generator.

  Args:
    predictor: a generator that iteratively pops a dictionary of predictions
      with the format compatible with COCO eval tool.
    num_batches: the number of batches to be aggregated in eval. This is how
      many times that the predictor gets pulled.
    annotation_json_file: the annotation json file of the eval dataset.

  Returns:
    eval_results: the aggregated COCO metric eval results.
  """
  assert annotation_json_file is not None
  # For retinanet.coco_metric
  eval_metric = coco_metric.EvaluationMetric(annotation_json_file)
  predictions = dict()
  batch_idx = 0
  while num_batches < 0 or batch_idx < num_batches:
    try:
      prediction = six.next(predictor)
      tf.logging.info('Running inference on batch %d/%d...' %
                      (batch_idx + 1, num_batches))
    except StopIteration:
      tf.logging.info('Get StopIteration at %d batch.' % (batch_idx + 1))
      break

    prediction = process_prediction_for_eval(prediction)
    for k, v in six.iteritems(prediction):
      if k not in predictions:
        predictions[k] = [v]
      else:
        predictions[k].append(v)

    batch_idx = batch_idx + 1

  for k, v in six.iteritems(predictions):
    predictions[k] = np.concatenate(predictions[k], axis=0)

  eval_results = eval_metric.predict_metric_fn(predictions)
  tf.logging.info('Eval results: %s' % eval_results)
  return eval_results
コード例 #16
0
ファイル: eval.py プロジェクト: DaveCoding/automl
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file

  # dataset
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      use_fake_data=False,
      max_instances_per_image=config.max_instances_per_image)(
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)
コード例 #17
0
def evaluation(eval_estimator, config):
    """Runs one evluation."""
    predictor = eval_estimator.predict(input_fn=dataloader.InputReader(
        config.validation_file_pattern,
        mode=tf.estimator.ModeKeys.PREDICT,
        num_examples=config.eval_samples,
        use_instance_mask=config.include_mask),
                                       yield_single_examples=False)
    # Every predictor.next() gets a batch of prediction (a dictionary).
    predictions = dict()
    for _ in range(config.eval_samples // config.eval_batch_size):
        prediction = six.next(predictor)
        image_info = prediction['image_info']
        raw_detections = prediction['detections']
        processed_detections = raw_detections
        for b in range(raw_detections.shape[0]):
            scale = image_info[b][2]
            for box_id in range(raw_detections.shape[1]):
                # Map [y1, x1, y2, x2] -> [x1, y1, w, h] and multiply detections
                # by image scale.
                new_box = raw_detections[b, box_id, :]
                y1, x1, y2, x2 = new_box[1:5]
                new_box[1:5] = scale * np.array([x1, y1, x2 - x1, y2 - y1])
                processed_detections[b, box_id, :] = new_box
        prediction['detections'] = processed_detections

        for k, v in six.iteritems(prediction):
            if k not in predictions:
                predictions[k] = v
            else:
                predictions[k] = np.append(predictions[k], v, axis=0)

    eval_metric = coco_metric.EvaluationMetric(
        config.val_json_file, include_mask=config.include_mask)
    eval_results = eval_metric.predict_metric_fn(predictions)
    tf.logging.info('Eval results: %s' % eval_results)

    return eval_results
コード例 #18
0
def main(_):
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.batch_size = 8
    config.val_json_file = 'tmp/coco/annotations/instances_val2017.json'

    # dataset
    input_files = 'tmp/coco/val-00000-of-00032.tfrecord'
    is_training = False
    ds = dataloader.InputReader(
        input_files,
        is_training=is_training,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, 512, 512, 3))
    model.load_weights('tmp/efficientdet-d0/model')

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    # compute stats for all batches.
    for images, labels in ds:
        cls_outputs, box_outputs = model(images, training=False)
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'])
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
コード例 #19
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                per_class_nms=FLAGS.per_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
コード例 #20
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, base_height, base_width, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    @tf.function
    def f(imgs, labels, flip):
        cls_outputs, box_outputs = model(imgs, training=False)
        return postprocess.generate_detections(config, cls_outputs,
                                               box_outputs,
                                               labels['image_scales'],
                                               labels['source_ids'], flip)

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    evaluator = None
    detections_per_source = dict()
    for height, width, flip in augmentations:
        config.image_size = (height, width)
        # dataset
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            use_fake_data=False,
            max_instances_per_image=config.max_instances_per_image)(config)

        # compute stats for all batches.
        total_steps = FLAGS.eval_samples // FLAGS.batch_size
        progress = tf.keras.utils.Progbar(total_steps)
        for i, (images, labels) in enumerate(ds):
            progress.update(i, values=None)
            if i > total_steps:
                break

            if flip:
                images = tf.image.flip_left_right(images)
            detections = f(images, labels, flip)

            for img_id, d in zip(labels['source_ids'], detections):
                if img_id.numpy() in detections_per_source:
                    detections_per_source[img_id.numpy()] = tf.concat(
                        [d, detections_per_source[img_id.numpy()]], 0)
                else:
                    detections_per_source[img_id.numpy()] = d

            evaluator = coco_metric.EvaluationMetric(
                filename=config.val_json_file)
            for d in detections_per_source.values():
                if FLAGS.enable_tta:
                    d = wbf.ensemble_detections(config, d, len(augmentations))
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)
コード例 #21
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  if config.strategy == 'tpu':
    tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
    tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
    ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
    logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
  elif config.strategy == 'gpus':
    ds_strategy = tf.distribute.MirroredStrategy()
    logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
  else:
    if tf.config.list_physical_devices('GPU'):
      ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
    else:
      ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

  with ds_strategy.scope():
    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((None, *config.image_size, 3))
    util_keras.restore_ckpt(model,
                            tf.train.latest_checkpoint(FLAGS.model_dir),
                            config.moving_average_decay,
                            skip_mismatch=False)
    @tf.function
    def model_fn(images, labels):
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config,
                                                   cls_outputs,
                                                   box_outputs,
                                                   labels['image_scales'],
                                                   labels['source_ids'])
      tf.numpy_function(evaluator.update_state,
                        [labels['groundtruth_data'],
                         postprocess.transform_detections(detections)], [])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(
        filename=config.val_json_file, label_map=label_map)

    # dataset
    batch_size = FLAGS.batch_size   # global batch size.
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    if FLAGS.eval_samples:
      ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
    ds = ds_strategy.experimental_distribute_dataset(ds)

    # evaluate all images.
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
      ds_strategy.run(model_fn, (images, labels))
      pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
コード例 #22
0
ファイル: eval.py プロジェクト: ailabktw/automl
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    if FLAGS.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif FLAGS.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    all_detections = []
    all_labels = []
    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((config.batch_size, base_height, base_width, 3))
        model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

        first_loop = True
        for height, width, flip in augmentations:
            config.image_size = (height, width)
            # dataset
            ds = dataloader.InputReader(
                FLAGS.val_file_pattern,
                is_training=False,
                use_fake_data=False,
                max_instances_per_image=config.max_instances_per_image)(config)

            # create the function once per augmentation, since it closes over the
            # value of config, which gets updated with the new image size
            @tf.function
            def f(images, labels):
                cls_outputs, box_outputs = model(images, training=False)
                return postprocess.generate_detections(config, cls_outputs,
                                                       box_outputs,
                                                       labels['image_scales'],
                                                       labels['source_ids'],
                                                       flip)

            # inference
            for images, labels in ds:
                if flip:
                    images = tf.image.flip_left_right(images)
                detections = f(images, labels)

                all_detections.append(detections)
                if first_loop:
                    all_labels.append(labels)

            first_loop = False

    # collect the giant list of detections into a map from image id to
    # detections
    detections_per_source = dict()
    for batch in all_detections:
        for d in batch:
            img_id = d[0][0]
            if img_id.numpy() in detections_per_source:
                detections_per_source[img_id.numpy()] = tf.concat(
                    [d, detections_per_source[img_id.numpy()]], 0)
            else:
                detections_per_source[img_id.numpy()] = d

    # collect the groundtruth per image id
    groundtruth_per_source = dict()
    for batch in all_labels:
        for img_id, groundtruth in zip(batch['source_ids'],
                                       batch['groundtruth_data']):
            groundtruth_per_source[img_id.numpy()] = groundtruth

    # calucate the AP scores for all the images
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    for img_id, d in detections_per_source.items():
        if FLAGS.enable_tta:
            d = wbf.ensemble_detections(config, d, len(augmentations))
        evaluator.update_state(
            tf.stack([groundtruth_per_source[img_id]]).numpy(),
            postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)
コード例 #23
0
  config.val_json_file = FLAGS.val_json_file

  # dataset
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      use_fake_data=False,
      max_instances_per_image=config.max_instances_per_image)(
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(FLAGS.checkpoint)

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
コード例 #24
0
def main(argv):
    del argv  # Unused.

    # TODO(b/132208296): remove this workaround that uses control flow v2.
    control_flow_util.ENABLE_CONTROL_FLOW_V2 = True

    tpu = FLAGS.tpu or FLAGS.master
    tpu_cluster_resolver = runner_utils.create_tpu_cluster_resolver(
        FLAGS.use_tpu, tpu, FLAGS.tpu_zone, FLAGS.gcp_project)
    if tpu_cluster_resolver:
        tpu_grpc_url = tpu_cluster_resolver.get_master()
        tf.Session.reset(tpu_grpc_url)

    # Check data path
    run_train = FLAGS.mode in ('train', 'train_and_eval')
    if run_train and FLAGS.training_file_pattern is None:
        raise RuntimeError(
            'You must specify --training_file_pattern for training.')
    run_eval = FLAGS.mode in ('eval', 'train_and_eval') or (
        FLAGS.mode == 'train' and FLAGS.eval_after_training)
    if run_eval:
        if FLAGS.validation_file_pattern is None:
            raise RuntimeError('You must specify --validation_file_pattern '
                               'for evaluation.')
        if FLAGS.val_json_file is None:
            raise RuntimeError(
                'You must specify --val_json_file for evaluation.')

    # Parse hparams
    hparams = mask_rcnn_params.default_hparams()
    hparams.parse(FLAGS.hparams)

    # The following is for spatial partitioning. `features` has one tensor while
    # `labels` has 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
    # partition is performed on `features` and all partitionable tensors of
    # `labels`, see the partition logic below.
    # Note: In the below code, TPUEstimator uses both `shard` and `replica` (with
    # the same meaning).
    # Note that spatial partition is part of the model-parallelism optimization.
    # See core_assignment_utils.py for more details about model parallelism.
    if FLAGS.input_partition_dims:
        labels_partition_dims = {
            'gt_boxes': None,
            'gt_classes': None,
            'cropped_gt_masks': None,
        }
        for level in range(hparams.get('min_level'),
                           hparams.get('max_level') + 1):
            labels_partition_dims['box_targets_%d' % level] = None
            labels_partition_dims['score_targets_%d' % level] = None
        num_cores_per_replica = int(np.prod(FLAGS.input_partition_dims))
        image_partition_dims = [
            FLAGS.input_partition_dims[i] for i in [1, 0, 2]
        ] if hparams.get('transpose_input') else FLAGS.input_partition_dims
        features_partition_dims = {
            'images': image_partition_dims,
            'source_ids': None,
            'image_info': None,
        }
        input_partition_dims = [features_partition_dims, labels_partition_dims]
        num_shards = FLAGS.num_cores // num_cores_per_replica
    else:
        num_cores_per_replica = None
        input_partition_dims = None
        num_shards = FLAGS.num_cores

    params = dict(hparams.values(),
                  num_shards=num_shards,
                  num_cores_per_replica=num_cores_per_replica,
                  use_tpu=FLAGS.use_tpu,
                  resnet_checkpoint=FLAGS.resnet_checkpoint,
                  val_json_file=FLAGS.val_json_file,
                  model_dir=FLAGS.model_dir)

    tpu_config = tf.contrib.tpu.TPUConfig(
        params['iterations_per_loop'],
        num_shards=num_shards,
        num_cores_per_replica=params['num_cores_per_replica'],
        input_partition_dims=input_partition_dims,
        per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.
        PER_HOST_V2,
        tpu_job_name=FLAGS.tpu_job_name,
    )

    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        model_dir=FLAGS.model_dir,
        log_step_count_steps=params['iterations_per_loop'],
        tpu_config=tpu_config,
        save_checkpoints_steps=params['iterations_per_loop'],
    )

    train_replicas_per_worker = (
        params['cores_per_worker'] // params['num_cores_per_replica']
    ) if params['num_cores_per_replica'] else params['cores_per_worker']
    train_params = dict(
        params,
        replicas_per_worker=train_replicas_per_worker,
    )
    eval_params = dict(
        params,
        input_rand_hflip=False,
        resnet_checkpoint=None,
        is_training_bn=False,
        transpose_input=False,
    )

    # MLPerf logging.
    mlp_log.mlperf_print(key='init_start', value=None)
    mlp_log.mlperf_print(key='global_batch_size',
                         value=params['train_batch_size'])
    runner = None
    if run_train and run_eval:
        if params['train_use_tpu_estimator'] or params[
                'eval_use_tpu_estimator']:
            raise RuntimeError(
                'train_and_eval runner does not support TPUEstimator.')
        dist_eval_params = dict(
            eval_params,
            replicas_per_worker=train_replicas_per_worker,
        )
        runner = mask_rcnn_runner.TrainEvalRunner(
            model_fn=mask_rcnn_model.MaskRcnnModelFn(),
            input_fn=dataloader.InputReader(FLAGS.training_file_pattern,
                                            mode=tf.estimator.ModeKeys.TRAIN,
                                            use_fake_data=FLAGS.use_fake_data),
            eval_input_fn=dataloader.InputReader(
                FLAGS.validation_file_pattern,
                mode=tf.estimator.ModeKeys.PREDICT,
                distributed_eval=True),
            eval_metric=coco_metric.EvaluationMetric(FLAGS.val_json_file,
                                                     use_cpp_extension=True),
            train_params=train_params,
            eval_params=dist_eval_params,
            run_config=run_config)
    elif run_train:
        # Check low-level train runner compatibility.
        if not params['train_use_tpu_estimator']:
            if FLAGS.mode == 'train_and_eval':
                raise RuntimeError(
                    'Low level train runner does not support mode '
                    'train_and_eval yet.')
        train_params = dict(
            params,
            replicas_per_worker=train_replicas_per_worker,
        )
        runner = mask_rcnn_runner.TrainRunner(
            model_fn=mask_rcnn_model.MaskRcnnModelFn(),
            input_fn=dataloader.InputReader(FLAGS.training_file_pattern,
                                            mode=tf.estimator.ModeKeys.TRAIN,
                                            use_fake_data=FLAGS.use_fake_data),
            params=train_params,
            run_config=run_config,
            use_tpu_estimator=train_params['train_use_tpu_estimator'])
    else:
        sidecar_eval_params = dict(
            eval_params,
            # sidecar eval only uses one worker and does not use spatial partition.
            replicas_per_worker=FLAGS.num_cores,
        )
        runner = mask_rcnn_runner.EvalRunner(
            mask_rcnn_model.MaskRcnnModelFn(),
            dataloader.InputReader(FLAGS.validation_file_pattern,
                                   mode=tf.estimator.ModeKeys.PREDICT),
            coco_metric.EvaluationMetric(FLAGS.val_json_file,
                                         use_cpp_extension=True),
            sidecar_eval_params,
            run_config,
            use_tpu_estimator=sidecar_eval_params['eval_use_tpu_estimator'])

    if FLAGS.mode == 'train':
        runner.train()
    elif FLAGS.mode == 'eval':

        def terminate_eval():
            tf.logging.info(
                'Terminating eval after %d seconds of no checkpoints' %
                FLAGS.eval_timeout)
            return True

        run_success = False
        # Run evaluation when there's a new checkpoint
        for ckpt in tf.contrib.training.checkpoints_iterator(
                params['model_dir'],
                min_interval_secs=FLAGS.min_eval_interval,
                timeout=FLAGS.eval_timeout,
                timeout_fn=terminate_eval):

            tf.logging.info('Starting to evaluate.')
            try:

                eval_results = runner.evaluate(ckpt)
                current_step, _ = runner.get_step_and_epoch_number(ckpt)

                if (eval_results['AP'] >= mask_rcnn_params.BOX_EVAL_TARGET
                        and eval_results['mask_AP'] >=
                        mask_rcnn_params.MASK_EVAL_TARGET):
                    mlp_log.mlperf_print(key='run_stop',
                                         metadata={'status': 'success'})
                    run_success = True
                    break

                if int(current_step) >= params['total_steps']:
                    tf.logging.info(
                        'Evaluation finished after training step %d' %
                        current_step)
                    break

            except tf.errors.NotFoundError:
                # Since the coordinator is on a different job than the TPU worker,
                # sometimes the TPU worker does not finish initializing until long after
                # the CPU job tells it to start evaluating. In this case, the checkpoint
                # file could have been deleted already.
                tf.logging.info(
                    'Checkpoint %s no longer exists, skipping checkpoint' %
                    ckpt)
        if not run_success:
            mlp_log.mlperf_print(key='run_stop',
                                 metadata={'status': 'aborted'})

    elif FLAGS.mode == 'train_and_eval':
        runner.train_and_eval()
    else:
        tf.logging.info('Mode not found.')
コード例 #25
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  base_height, base_width = utils.parse_image_size(config['image_size'])

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, base_height, base_width, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  # in format (height, width, flip)
  augmentations = [] 
  if FLAGS.enable_tta:
    for size_offset in (0, 128, 256):
      for flip in (False, True):
        augmentations.append((base_height + size_offset, base_width + size_offset, flip))
  else:
    augmentations.append((base_height, base_width, False))

  detections_per_source = dict()
  for height, width, flip in augmentations:
    config.image_size = (height, width)
    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(
            config)

    # compute stats for all batches.
    for images, labels in ds:
      if flip:
        images = tf.image.flip_left_right(images)
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config, cls_outputs,
                                                  box_outputs,
                                                  labels['image_scales'],
                                                  labels['source_ids'], flip)

      for id, d in zip(labels['source_ids'], detections):
        if id.numpy() in detections_per_source:
          detections_per_source[id.numpy()] = tf.concat([d, detections_per_source[id.numpy()]], 0)
        else:
          detections_per_source[id.numpy()] = d


  evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
  for d in detections_per_source.values():
    if FLAGS.enable_tta:
      d = wbf.ensemble_detections(config, d, len(augmentations))
    evaluator.update_state(
        labels['groundtruth_data'].numpy(),
        postprocess.transform_detections(tf.stack([d])).numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)