Пример #1
0
 def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
     """Tests computing mAP with is_crowd GT boxes skipped."""
     category_list = [{
         'id': 0,
         'name': 'person'
     }, {
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1, 2]),
             standard_fields.InputDataFields.groundtruth_is_crowd:
             np.array([0, 1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
Пример #2
0
 def testRejectionOnDuplicateDetections(self):
     """Tests that detections cannot be added more than once for an image."""
     categories = [{
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }, {
         'id': 3,
         'name': 'elephant'
     }]
     #  Add groundtruth
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[99., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     detections_lists_len = len(coco_evaluator._detection_boxes_list)
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',  # Note that this image id was previously added.
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     self.assertEqual(detections_lists_len,
                      len(coco_evaluator._detection_boxes_list))
Пример #3
0
    def testRejectionOnDuplicateGroundtruth(self):
        """Tests that groundtruth cannot be added more than once for an image."""
        categories = [{
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }, {
            'id': 3,
            'name': 'elephant'
        }]
        #  Add groundtruth
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
        image_key1 = 'img1'
        groundtruth_boxes1 = np.array(
            [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
        groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
        coco_evaluator.add_single_ground_truth_image_info(
            image_key1, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
            })
        groundtruth_lists_len = len(coco_evaluator._groundtruth_list)

        # Add groundtruth with the same image id.
        coco_evaluator.add_single_ground_truth_image_info(
            image_key1, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
            })
        self.assertEqual(groundtruth_lists_len,
                         len(coco_evaluator._groundtruth_list))
Пример #4
0
 def testExceptionRaisedWithMissingGroundtruth(self):
     """Tests that exception is raised for detection with missing groundtruth."""
     categories = [{
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }, {
         'id': 3,
         'name': 'elephant'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
     with self.assertRaises(ValueError):
         coco_evaluator.add_single_detected_image_info(
             image_id='image1',
             detections_dict={
                 standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
                 standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
                 standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
             })
  def model_fn(features, labels, mode, params=None):
    """Constructs the object detection model.

    Args:
      features: Dictionary of feature tensors, returned from `input_fn`.
      labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
        otherwise None.
      mode: Mode key from tf.estimator.ModeKeys.
      params: Parameter dictionary passed from the estimator.

    Returns:
      An `EstimatorSpec` that encapsulates the model and its serving
        configurations.
    """
    params = params or {}
    total_loss, train_op, detections, export_outputs = None, None, None, None
    is_training = mode == tf.estimator.ModeKeys.TRAIN
    detection_model = detection_model_fn(is_training=is_training,
                                         add_summaries=(not use_tpu))
    scaffold_fn = None

    if mode == tf.estimator.ModeKeys.TRAIN:
      labels = unstack_batch(
          labels,
          unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
    elif mode == tf.estimator.ModeKeys.EVAL:
      labels = unstack_batch(labels, unpad_groundtruth_tensors=False)

    if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
      gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
      gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
      gt_masks_list = None
      if fields.InputDataFields.groundtruth_instance_masks in labels:
        gt_masks_list = labels[
            fields.InputDataFields.groundtruth_instance_masks]
      gt_keypoints_list = None
      if fields.InputDataFields.groundtruth_keypoints in labels:
        gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
      detection_model.provide_groundtruth(
          groundtruth_boxes_list=gt_boxes_list,
          groundtruth_classes_list=gt_classes_list,
          groundtruth_masks_list=gt_masks_list,
          groundtruth_keypoints_list=gt_keypoints_list)

    preprocessed_images = features[fields.InputDataFields.image]
    prediction_dict = detection_model.predict(
        preprocessed_images, features[fields.InputDataFields.true_image_shape])
    detections = detection_model.postprocess(
        prediction_dict, features[fields.InputDataFields.true_image_shape])

    if mode == tf.estimator.ModeKeys.TRAIN:
      if train_config.fine_tune_checkpoint and hparams.load_pretrained:
        asg_map = detection_model.restore_map(
            from_detection_checkpoint=train_config.from_detection_checkpoint,
            load_all_detection_checkpoint_vars=(
                train_config.load_all_detection_checkpoint_vars))
        available_var_map = (
            variables_helper.get_variables_available_in_checkpoint(
                asg_map, train_config.fine_tune_checkpoint,
                include_global_step=False))
        if use_tpu:
          def tpu_scaffold():
            tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
                                          available_var_map)
            return tf.train.Scaffold()
          scaffold_fn = tpu_scaffold
        else:
          tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
                                        available_var_map)

    if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
      losses_dict = detection_model.loss(
          prediction_dict, features[fields.InputDataFields.true_image_shape])
      losses = [loss_tensor for loss_tensor in losses_dict.itervalues()]
      total_loss = tf.add_n(losses, name='total_loss')

    if mode == tf.estimator.ModeKeys.TRAIN:
      global_step = tf.train.get_or_create_global_step()
      training_optimizer, optimizer_summary_vars = optimizer_builder.build(
          train_config.optimizer)

      if use_tpu:
        training_optimizer = tpu_optimizer.CrossShardOptimizer(
            training_optimizer)

      # Optionally freeze some layers by setting their gradients to be zero.
      trainable_variables = None
      if train_config.freeze_variables:
        trainable_variables = tf.contrib.framework.filter_variables(
            tf.trainable_variables(),
            exclude_patterns=train_config.freeze_variables)

      clip_gradients_value = None
      if train_config.gradient_clipping_by_norm > 0:
        clip_gradients_value = train_config.gradient_clipping_by_norm

      if not use_tpu:
        for var in optimizer_summary_vars:
          tf.summary.scalar(var.op.name, var)
      summaries = [] if use_tpu else None
      train_op = tf.contrib.layers.optimize_loss(
          loss=total_loss,
          global_step=global_step,
          learning_rate=None,
          clip_gradients=clip_gradients_value,
          optimizer=training_optimizer,
          variables=trainable_variables,
          summaries=summaries,
          name='')  # Preventing scope prefix on all variables.

    if mode == tf.estimator.ModeKeys.PREDICT:
      export_outputs = {
          tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
              tf.estimator.export.PredictOutput(detections)
      }

    eval_metric_ops = None
    if mode == tf.estimator.ModeKeys.EVAL:
      # Detection summaries during eval.
      class_agnostic = (fields.DetectionResultFields.detection_classes
                        not in detections)
      groundtruth = _get_groundtruth_data(detection_model, class_agnostic)
      eval_dict = eval_util.result_dict_for_single_example(
          tf.expand_dims(features[fields.InputDataFields.original_image][0], 0),
          features[inputs.HASH_KEY][0],
          detections,
          groundtruth,
          class_agnostic=class_agnostic,
          scale_to_absolute=False)

      if class_agnostic:
        category_index = label_map_util.create_class_agnostic_category_index()
      else:
        category_index = label_map_util.create_category_index_from_labelmap(
            eval_input_config.label_map_path)
      detection_and_groundtruth = vis_utils.draw_side_by_side_evaluation_image(
          eval_dict, category_index, max_boxes_to_draw=20, min_score_thresh=0.2)
      if not use_tpu:
        tf.summary.image('Detections_Left_Groundtruth_Right',
                         detection_and_groundtruth)

      # Eval metrics on a single image.
      detection_fields = fields.DetectionResultFields()
      input_data_fields = fields.InputDataFields()
      coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
          category_index.values())
      eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(
          image_id=eval_dict[input_data_fields.key],
          groundtruth_boxes=eval_dict[input_data_fields.groundtruth_boxes],
          groundtruth_classes=eval_dict[input_data_fields.groundtruth_classes],
          detection_boxes=eval_dict[detection_fields.detection_boxes],
          detection_scores=eval_dict[detection_fields.detection_scores],
          detection_classes=eval_dict[detection_fields.detection_classes])

    if use_tpu:
      return tf.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          scaffold_fn=scaffold_fn,
          predictions=detections,
          loss=total_loss,
          train_op=train_op,
          eval_metrics=eval_metric_ops,
          export_outputs=export_outputs)
    else:
      return tf.estimator.EstimatorSpec(
          mode=mode,
          predictions=detections,
          loss=total_loss,
          train_op=train_op,
          eval_metric_ops=eval_metric_ops,
          export_outputs=export_outputs)
Пример #6
0
    def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
        category_list = [{
            'id': 0,
            'name': 'person'
        }, {
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }]
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
        image_id = tf.placeholder(tf.string, shape=())
        groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
        groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
        detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
        detection_scores = tf.placeholder(tf.float32, shape=(None))
        detection_classes = tf.placeholder(tf.float32, shape=(None))

        eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(
            image_id, groundtruth_boxes, groundtruth_classes, detection_boxes,
            detection_scores, detection_classes)

        _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image1',
                         groundtruth_boxes: np.array([[100., 100., 200.,
                                                       200.]]),
                         groundtruth_classes: np.array([1]),
                         detection_boxes: np.array([[100., 100., 200., 200.]]),
                         detection_scores: np.array([.8]),
                         detection_classes: np.array([1])
                     })
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image2',
                         groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
                         groundtruth_classes: np.array([3]),
                         detection_boxes: np.array([[50., 50., 100., 100.]]),
                         detection_scores: np.array([.7]),
                         detection_classes: np.array([3])
                     })
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image3',
                         groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
                         groundtruth_classes: np.array([2]),
                         detection_boxes: np.array([[25., 25., 50., 50.]]),
                         detection_scores: np.array([.9]),
                         detection_classes: np.array([2])
                     })
        metrics = {}
        for key, (value_op, _) in eval_metric_ops.iteritems():
            metrics[key] = value_op
        metrics = sess.run(metrics)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Precision/mAP (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Recall/AR@100 (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'],
                               1.0)
        self.assertFalse(coco_evaluator._groundtruth_list)
        self.assertFalse(coco_evaluator._detection_boxes_list)
        self.assertFalse(coco_evaluator._image_ids)
Пример #7
0
 def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
     """Tests that mAP is calculated correctly on GT and Detections."""
     category_list = [{
         'id': 0,
         'name': 'person'
     }, {
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image2',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[50., 50., 100., 100.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image2',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[50., 50., 100., 100.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image3',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[25., 25., 50., 50.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image3',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[25., 25., 50., 50.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)