예제 #1
0
 def testRejectionOnDuplicateDetections(self):
     """Tests that detections cannot be added more than once for an image."""
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
         _get_categories_list())
     #  Add groundtruth
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
                 np.array([[99., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes: np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     detections_lists_len = len(coco_evaluator._detection_boxes_list)
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',  # Note that this image id was previously added.
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     self.assertEqual(detections_lists_len,
                      len(coco_evaluator._detection_boxes_list))
예제 #2
0
 def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
     """Tests computing mAP with is_crowd GT boxes skipped."""
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
         _get_categories_list())
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
                 np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes:
                 np.array([1, 2]),
             standard_fields.InputDataFields.groundtruth_is_crowd:
                 np.array([0, 1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
예제 #3
0
    def testRejectionOnDuplicateGroundtruth(self):
        """Tests that groundtruth cannot be added more than once for an image."""
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
            _get_categories_list())
        #  Add groundtruth
        image_key1 = 'img1'
        groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
                                      dtype=float)
        groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
        coco_evaluator.add_single_ground_truth_image_info(image_key1, {
            standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
            standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
        })
        groundtruth_lists_len = len(coco_evaluator._groundtruth_list)

        # Add groundtruth with the same image id.
        coco_evaluator.add_single_ground_truth_image_info(image_key1, {
            standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
            standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
        })
        self.assertEqual(groundtruth_lists_len,
                         len(coco_evaluator._groundtruth_list))
예제 #4
0
 def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
     """Tests that mAP is calculated correctly on GT and Detections."""
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
         _get_categories_list())
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image1',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.InputDataFields.groundtruth_classes: np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image1',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image2',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
                 np.array([[50., 50., 100., 100.]]),
             standard_fields.InputDataFields.groundtruth_classes: np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image2',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[50., 50., 100., 100.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     coco_evaluator.add_single_ground_truth_image_info(
         image_id='image3',
         groundtruth_dict={
             standard_fields.InputDataFields.groundtruth_boxes:
                 np.array([[25., 25., 50., 50.]]),
             standard_fields.InputDataFields.groundtruth_classes: np.array([1])
         })
     coco_evaluator.add_single_detected_image_info(
         image_id='image3',
         detections_dict={
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[25., 25., 50., 50.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         })
     metrics = coco_evaluator.evaluate()
     self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
예제 #5
0
 def testExceptionRaisedWithMissingGroundtruth(self):
     """Tests that exception is raised for detection with missing groundtruth."""
     coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
         _get_categories_list())
     with self.assertRaises(ValueError):
         coco_evaluator.add_single_detected_image_info(
             image_id='image1',
             detections_dict={
                 standard_fields.DetectionResultFields.detection_boxes:
                     np.array([[100., 100., 200., 200.]]),
                 standard_fields.DetectionResultFields.detection_scores:
                     np.array([.8]),
                 standard_fields.DetectionResultFields.detection_classes:
                     np.array([1])
             })
예제 #6
0
    def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self):
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
            _get_categories_list())
        batch_size = 3
        image_id = tf.placeholder(tf.string, shape=batch_size)
        groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
        groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
        num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=None)
        detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
        detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
        detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
        num_det_boxes_per_image = tf.placeholder(tf.int32, shape=None)

        input_data_fields = standard_fields.InputDataFields
        detection_fields = standard_fields.DetectionResultFields
        eval_dict = {
            input_data_fields.key: image_id,
            input_data_fields.groundtruth_boxes: groundtruth_boxes,
            input_data_fields.groundtruth_classes: groundtruth_classes,
            detection_fields.detection_boxes: detection_boxes,
            detection_fields.detection_scores: detection_scores,
            detection_fields.detection_classes: detection_classes,
            'num_groundtruth_boxes_per_image': num_gt_boxes_per_image,
            'num_det_boxes_per_image': num_det_boxes_per_image
        }

        eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)

        _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            sess.run(
                update_op,
                feed_dict={
                    image_id: ['image1', 'image2', 'image3'],
                    groundtruth_boxes:
                        np.array([[[100., 100., 200., 200.], [-1, -1, -1, -1]],
                                  [[50., 50., 100., 100.], [-1, -1, -1, -1]],
                                  [[25., 25., 50., 50.], [10., 10., 15., 15.]]]),
                    groundtruth_classes:
                        np.array([[1, -1], [3, -1], [2, 2]]),
                    num_gt_boxes_per_image:
                        np.array([1, 1, 2]),
                    detection_boxes:
                        np.array([[[100., 100., 200., 200.],
                                   [0., 0., 0., 0.],
                                   [0., 0., 0., 0.]],
                                  [[50., 50., 100., 100.],
                                   [0., 0., 0., 0.],
                                   [0., 0., 0., 0.]],
                                  [[25., 25., 50., 50.],
                                   [10., 10., 15., 15.],
                                   [10., 10., 15., 15.]]]),
                    detection_scores:
                        np.array([[.8, 0., 0.], [.7, 0., 0.], [.95, .9, 0.9]]),
                    detection_classes:
                        np.array([[1, -1, -1], [3, -1, -1], [2, 2, 2]]),
                    num_det_boxes_per_image:
                        np.array([1, 1, 3]),
                })

        # Check the number of bounding boxes added.
        self.assertEqual(len(coco_evaluator._groundtruth_list), 4)
        self.assertEqual(len(coco_evaluator._detection_boxes_list), 5)

        metrics = {}
        for key in eval_metric_ops:
            (value_op, _) = eval_metric_ops.get(key)
            metrics[key] = value_op
        metrics = sess.run(metrics)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
        self.assertFalse(coco_evaluator._groundtruth_list)
        self.assertFalse(coco_evaluator._detection_boxes_list)
        self.assertFalse(coco_evaluator._image_ids)
예제 #7
0
    def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
        coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
            _get_categories_list())
        batch_size = 3
        image_id = tf.placeholder(tf.string, shape=batch_size)
        groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
        groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
        detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
        detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
        detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))

        input_data_fields = standard_fields.InputDataFields
        detection_fields = standard_fields.DetectionResultFields
        eval_dict = {
            input_data_fields.key: image_id,
            input_data_fields.groundtruth_boxes: groundtruth_boxes,
            input_data_fields.groundtruth_classes: groundtruth_classes,
            detection_fields.detection_boxes: detection_boxes,
            detection_fields.detection_scores: detection_scores,
            detection_fields.detection_classes: detection_classes
        }

        eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)

        _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            sess.run(update_op,
                     feed_dict={
                         image_id: ['image1', 'image2', 'image3'],
                         groundtruth_boxes: np.array([[[100., 100., 200., 200.]],
                                                      [[50., 50., 100., 100.]],
                                                      [[25., 25., 50., 50.]]]),
                         groundtruth_classes: np.array([[1], [3], [2]]),
                         detection_boxes: np.array([[[100., 100., 200., 200.]],
                                                    [[50., 50., 100., 100.]],
                                                    [[25., 25., 50., 50.]]]),
                         detection_scores: np.array([[.8], [.7], [.9]]),
                         detection_classes: np.array([[1], [3], [2]])
                     })
        metrics = {}
        for key in eval_metric_ops:
            (value_op, _) = eval_metric_ops.get(key)
            metrics[key] = value_op
        metrics = sess.run(metrics)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
                               1.0)
        self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
        self.assertFalse(coco_evaluator._groundtruth_list)
        self.assertFalse(coco_evaluator._detection_boxes_list)
        self.assertFalse(coco_evaluator._image_ids)