コード例 #1
0
 def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
     """Tests computing mAP with is_crowd GT boxes skipped."""
     category_list = [{
         'id': 0,
         'name': 'person'
     }, {
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1, 2]),
             standard_fields.InputDataFields.groundtruth_is_crowd:
             np.array([0, 1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
コード例 #2
0
 def testRejectionOnDuplicateDetections(self):
     """Tests that detections cannot be added more than once for an image."""
     categories = [{
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }, {
         'id': 3,
         'name': 'elephant'
     }]
     #  Add groundtruth
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[99., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     detections_lists_len = len(coco_evaluator._detection_boxes_list)
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',  # Note that this image id was previously added.
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     self.assertEqual(detections_lists_len,
                      len(coco_evaluator._detection_boxes_list))
コード例 #3
0
    def testRejectionOnDuplicateGroundtruth(self):
        """Tests that groundtruth cannot be added more than once for an image."""
        categories = [{
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }, {
            'id': 3,
            'name': 'elephant'
        }]
        #  Add groundtruth
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
        image_key1 = 'img1'
        groundtruth_boxes1 = np.array(
            [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
        groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
        coco_evaluator.add_single_ground_truth_image_info(
            image_key1, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
            })
        groundtruth_lists_len = len(coco_evaluator._groundtruth_list)

        # Add groundtruth with the same image id.
        coco_evaluator.add_single_ground_truth_image_info(
            image_key1, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
            })
        self.assertEqual(groundtruth_lists_len,
                         len(coco_evaluator._groundtruth_list))
コード例 #4
0
 def testExceptionRaisedWithMissingGroundtruth(self):
     """Tests that exception is raised for detection with missing groundtruth."""
     categories = [{
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }, {
         'id': 3,
         'name': 'elephant'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
     with self.assertRaises(ValueError):
         coco_evaluator.add_single_detected_image_info(
             image_id='image1',
             detections_dict={
                 standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
                 standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
                 standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
             })
コード例 #5
0
ファイル: eval_util.py プロジェクト: uniquetrij/SecureIt
def get_eval_metric_ops_for_evaluators(evaluation_metrics,
                                       categories,
                                       eval_dict,
                                       include_metrics_per_category=False):
    """Returns a dictionary of eval metric ops to use with `tf.EstimatorSpec`.

  Args:
    evaluation_metrics: List of evaluation metric names. Current options are
      'coco_detection_metrics' and 'coco_mask_metrics'.
    categories: A list of dicts, each of which has the following keys -
        'id': (required) an integer id uniquely identifying this category.
        'name': (required) string representing category name e.g., 'cat', 'dog'.
    eval_dict: An evaluation dictionary, returned from
      result_dict_for_single_example().
    include_metrics_per_category: If True, additionally include per-category
      metrics.

  Returns:
    A dictionary of metric names to tuple of value_op and update_op that can be
    used as eval metric ops in tf.EstimatorSpec.

  Raises:
    ValueError: If any of the metrics in `evaluation_metric` is not
    'coco_detection_metrics' or 'coco_mask_metrics'.
  """
    evaluation_metrics = list(set(evaluation_metrics))

    input_data_fields = fields.InputDataFields
    detection_fields = fields.DetectionResultFields
    eval_metric_ops = {}
    for metric in evaluation_metrics:
        if metric == 'coco_detection_metrics':
            coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
                categories,
                include_metrics_per_category=include_metrics_per_category)
            eval_metric_ops.update(
                coco_evaluator.get_estimator_eval_metric_ops(
                    image_id=eval_dict[input_data_fields.key],
                    groundtruth_boxes=eval_dict[
                        input_data_fields.groundtruth_boxes],
                    groundtruth_classes=eval_dict[
                        input_data_fields.groundtruth_classes],
                    detection_boxes=eval_dict[
                        detection_fields.detection_boxes],
                    detection_scores=eval_dict[
                        detection_fields.detection_scores],
                    detection_classes=eval_dict[
                        detection_fields.detection_classes],
                    groundtruth_is_crowd=eval_dict.get(
                        input_data_fields.groundtruth_is_crowd)))
        elif metric == 'coco_mask_metrics':
            coco_mask_evaluator = coco_evaluation.CocoMaskEvaluator(
                categories,
                include_metrics_per_category=include_metrics_per_category)
            eval_metric_ops.update(
                coco_mask_evaluator.get_estimator_eval_metric_ops(
                    image_id=eval_dict[input_data_fields.key],
                    groundtruth_boxes=eval_dict[
                        input_data_fields.groundtruth_boxes],
                    groundtruth_classes=eval_dict[
                        input_data_fields.groundtruth_classes],
                    groundtruth_instance_masks=eval_dict[
                        input_data_fields.groundtruth_instance_masks],
                    detection_scores=eval_dict[
                        detection_fields.detection_scores],
                    detection_classes=eval_dict[
                        detection_fields.detection_classes],
                    detection_masks=eval_dict[
                        detection_fields.detection_masks],
                    groundtruth_is_crowd=eval_dict.get(
                        input_data_fields.groundtruth_is_crowd),
                ))
        else:
            raise ValueError(
                'The only evaluation metrics supported are '
                '"coco_detection_metrics" and "coco_mask_metrics". '
                'Found {} in the evaluation metrics'.format(metric))

    return eval_metric_ops
コード例 #6
0
    def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self):
        category_list = [{
            'id': 0,
            'name': 'person'
        }, {
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }]
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
        batch_size = 3
        image_id = tf.placeholder(tf.string, shape=(batch_size))
        groundtruth_boxes = tf.placeholder(tf.float32,
                                           shape=(batch_size, None, 4))
        groundtruth_classes = tf.placeholder(tf.float32,
                                             shape=(batch_size, None))
        num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=(None))
        detection_boxes = tf.placeholder(tf.float32,
                                         shape=(batch_size, None, 4))
        detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
        detection_classes = tf.placeholder(tf.float32,
                                           shape=(batch_size, None))
        num_det_boxes_per_image = tf.placeholder(tf.int32, shape=(None))

        eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(
            image_id,
            groundtruth_boxes,
            groundtruth_classes,
            detection_boxes,
            detection_scores,
            detection_classes,
            num_gt_boxes_per_image=num_gt_boxes_per_image,
            num_det_boxes_per_image=num_det_boxes_per_image)

        _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            sess.run(update_op,
                     feed_dict={
                         image_id: ['image1', 'image2', 'image3'],
                         groundtruth_boxes:
                         np.array([[[100., 100., 200., 200.], [-1, -1, -1,
                                                               -1]],
                                   [[50., 50., 100., 100.], [-1, -1, -1, -1]],
                                   [[25., 25., 50., 50.], [10., 10., 15.,
                                                           15.]]]),
                         groundtruth_classes:
                         np.array([[1, -1], [3, -1], [2, 2]]),
                         num_gt_boxes_per_image:
                         np.array([1, 1, 2]),
                         detection_boxes:
                         np.array([[[100., 100., 200., 200.], [0., 0., 0.,
                                                               0.]],
                                   [[50., 50., 100., 100.], [0., 0., 0., 0.]],
                                   [[25., 25., 50., 50.], [10., 10., 15.,
                                                           15.]]]),
                         detection_scores:
                         np.array([[.8, 0.], [.7, 0.], [.95, .9]]),
                         detection_classes:
                         np.array([[1, -1], [3, -1], [2, 2]]),
                         num_det_boxes_per_image:
                         np.array([1, 1, 2]),
                     })
        metrics = {}
        for key, (value_op, _) in eval_metric_ops.iteritems():
            metrics[key] = value_op
        metrics = sess.run(metrics)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Precision/mAP (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.75)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Recall/AR@100 (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'],
                               1.0)
        self.assertFalse(coco_evaluator._groundtruth_list)
        self.assertFalse(coco_evaluator._detection_boxes_list)
        self.assertFalse(coco_evaluator._image_ids)
コード例 #7
0
    def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
        category_list = [{
            'id': 0,
            'name': 'person'
        }, {
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }]
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
        image_id = tf.placeholder(tf.string, shape=())
        groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
        groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
        detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
        detection_scores = tf.placeholder(tf.float32, shape=(None))
        detection_classes = tf.placeholder(tf.float32, shape=(None))

        eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(
            image_id, groundtruth_boxes, groundtruth_classes, detection_boxes,
            detection_scores, detection_classes)

        _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image1',
                         groundtruth_boxes: np.array([[100., 100., 200.,
                                                       200.]]),
                         groundtruth_classes: np.array([1]),
                         detection_boxes: np.array([[100., 100., 200., 200.]]),
                         detection_scores: np.array([.8]),
                         detection_classes: np.array([1])
                     })
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image2',
                         groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
                         groundtruth_classes: np.array([3]),
                         detection_boxes: np.array([[50., 50., 100., 100.]]),
                         detection_scores: np.array([.7]),
                         detection_classes: np.array([3])
                     })
            sess.run(update_op,
                     feed_dict={
                         image_id: 'image3',
                         groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
                         groundtruth_classes: np.array([2]),
                         detection_boxes: np.array([[25., 25., 50., 50.]]),
                         detection_scores: np.array([.9]),
                         detection_classes: np.array([2])
                     })
        metrics = {}
        for key, (value_op, _) in eval_metric_ops.iteritems():
            metrics[key] = value_op
        metrics = sess.run(metrics)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Precision/mAP (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'],
                               1.0)
        self.assertAlmostEqual(
            metrics['DetectionBoxes_Recall/AR@100 (medium)'], -1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'],
                               1.0)
        self.assertFalse(coco_evaluator._groundtruth_list)
        self.assertFalse(coco_evaluator._detection_boxes_list)
        self.assertFalse(coco_evaluator._image_ids)
コード例 #8
0
 def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
     """Tests that mAP is calculated correctly on GT and Detections."""
     category_list = [{
         'id': 0,
         'name': 'person'
     }, {
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }]
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(category_list)
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image2',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[50., 50., 100., 100.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image2',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[50., 50., 100., 100.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image3',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
             np.array([[25., 25., 50., 50.]]),
             standard_fields.InputDataFields.groundtruth_classes:
             np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image3',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
             np.array([[25., 25., 50., 50.]]),
             standard_fields.DetectionResultFields.detection_scores:
             np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
             np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)