def testMissingDetectionResults(self):
   """Tests if groundtrue is missing, raises ValueError."""
   category_list = [{'id': 0, 'name': 'dog'}]
   video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
       category_list)
   video_evaluator.add_single_ground_truth_image_info(
       image_id='image1',
       groundtruth_dict=[{
           standard_fields.InputDataFields.groundtruth_boxes:
               np.array([[100., 100., 200., 200.]]),
           standard_fields.InputDataFields.groundtruth_classes:
               np.array([1])
       }])
   with self.assertRaisesRegexp(ValueError,
                                r'Missing groundtruth for image-frame id:.*'):
     video_evaluator.add_single_detected_image_info(
         image_id='image3',
         detections_dict=[{
             standard_fields.DetectionResultFields.detection_boxes:
                 np.array([[100., 100., 200., 200.]]),
             standard_fields.DetectionResultFields.detection_scores:
                 np.array([.8]),
             standard_fields.DetectionResultFields.detection_classes:
                 np.array([1])
         }])
  def testGroundtruthAndDetectionsDisagreeOnAllFrames(self):
    """Tests that mAP is calculated on several different frame results."""
    category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}]
    video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
        category_list)
    video_evaluator.add_single_ground_truth_image_info(
        image_id='image1',
        groundtruth_dict=[{
            standard_fields.InputDataFields.groundtruth_boxes:
                np.array([[50., 50., 200., 200.]]),
            standard_fields.InputDataFields.groundtruth_classes:
                np.array([1])
        }, {
            standard_fields.InputDataFields.groundtruth_boxes:
                np.array([[50., 50., 100., 100.]]),
            standard_fields.InputDataFields.groundtruth_classes:
                np.array([1])
        }])
    video_evaluator.add_single_detected_image_info(
        image_id='image1',
        # A different groundtruth box on the frame other than the last one.
        detections_dict=[{
            standard_fields.DetectionResultFields.detection_boxes:
                np.array([[100., 100., 200., 200.]]),
            standard_fields.DetectionResultFields.detection_scores:
                np.array([.8]),
            standard_fields.DetectionResultFields.detection_classes:
                np.array([1])
        }, {
            standard_fields.DetectionResultFields.detection_boxes:
                np.array([[50., 50., 100., 100.]]),
            standard_fields.DetectionResultFields.detection_scores:
                np.array([.8]),
            standard_fields.DetectionResultFields.detection_classes:
                np.array([1])
        }])

    metrics = video_evaluator.evaluate()
    self.assertNotEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
 def testGroundtruthAndDetections(self):
   """Tests that mAP is calculated correctly on GT and Detections."""
   category_list = [{'id': 0, 'name': 'dog'}, {'id': 1, 'name': 'cat'}]
   video_evaluator = coco_evaluation_all_frames.CocoEvaluationAllFrames(
       category_list)
   video_evaluator.add_single_ground_truth_image_info(
       image_id='image1',
       groundtruth_dict=[{
           standard_fields.InputDataFields.groundtruth_boxes:
               np.array([[100., 100., 200., 200.]]),
           standard_fields.InputDataFields.groundtruth_classes:
               np.array([1])
       }])
   video_evaluator.add_single_ground_truth_image_info(
       image_id='image2',
       groundtruth_dict=[{
           standard_fields.InputDataFields.groundtruth_boxes:
               np.array([[50., 50., 100., 100.]]),
           standard_fields.InputDataFields.groundtruth_classes:
               np.array([1])
       }])
   video_evaluator.add_single_ground_truth_image_info(
       image_id='image3',
       groundtruth_dict=[{
           standard_fields.InputDataFields.groundtruth_boxes:
               np.array([[50., 100., 100., 120.]]),
           standard_fields.InputDataFields.groundtruth_classes:
               np.array([1])
       }])
   video_evaluator.add_single_detected_image_info(
       image_id='image1',
       detections_dict=[{
           standard_fields.DetectionResultFields.detection_boxes:
               np.array([[100., 100., 200., 200.]]),
           standard_fields.DetectionResultFields.detection_scores:
               np.array([.8]),
           standard_fields.DetectionResultFields.detection_classes:
               np.array([1])
       }])
   video_evaluator.add_single_detected_image_info(
       image_id='image2',
       detections_dict=[{
           standard_fields.DetectionResultFields.detection_boxes:
               np.array([[50., 50., 100., 100.]]),
           standard_fields.DetectionResultFields.detection_scores:
               np.array([.8]),
           standard_fields.DetectionResultFields.detection_classes:
               np.array([1])
       }])
   video_evaluator.add_single_detected_image_info(
       image_id='image3',
       detections_dict=[{
           standard_fields.DetectionResultFields.detection_boxes:
               np.array([[50., 100., 100., 120.]]),
           standard_fields.DetectionResultFields.detection_scores:
               np.array([.8]),
           standard_fields.DetectionResultFields.detection_classes:
               np.array([1])
       }])
   metrics = video_evaluator.evaluate()
   self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)