Exemplo n.º 1
0
    def _evaluate_single(self, detections_dict, groundtruth_list):
        groundtruth_dict = {
            'annotations': groundtruth_list,
            'images': [{
                'id': gt['image_id']
            } for gt in groundtruth_list],
            'categories': self.categories
        }

        if len(groundtruth_list) > 0:
            detections_list = coco_tools.ExportSingleImageDetectionBoxesToCoco(
                image_id=groundtruth_list[0]['image_id'],
                category_id_set=set([c['id'] for c in self.categories]),
                detection_boxes=detections_dict['detection_boxes'],
                detection_scores=detections_dict['detection_scores'],
                detection_classes=detections_dict['detection_classes'])
        else:
            detections_list = []

        # The COCO evaluation prints some information, which we don't care about
        with HiddenPrints():
            groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
            detections = groundtruth.LoadAnnotations(detections_list)
            evaluator = coco_tools.COCOEvalWrapper(groundtruth,
                                                   detections,
                                                   iou_type='bbox')
            summary_metrics, _ = evaluator.ComputeMetrics()

        return summary_metrics['Precision/mAP']
Exemplo n.º 2
0
    def add_single_detected_image_info(self, image_id, detections_dict):
        """Add detection results of all frames to the eval pipeline.

    This method overrides the function defined in the base class.

    Args:
      image_id: A unique string/integer identifier for the image.
      detections_dict: A list of dictionary containing -
        DetectionResultFields.detection_boxes: float32 numpy array of shape
          [num_boxes, 4] containing `num_boxes` detection boxes of the format
          [ymin, xmin, ymax, xmax] in absolute image coordinates.
        DetectionResultFields.detection_scores: float32 numpy array of shape
          [num_boxes] containing detection scores for the boxes.
        DetectionResultFields.detection_classes: integer numpy array of shape
          [num_boxes] containing 1-indexed detection classes for the boxes.

    Raises:
      ValueError: If groundtruth for the image_id is not available.
    """
        for idx, det in enumerate(detections_dict):
            if not det:
                continue

            image_frame_id = '{}_{}'.format(image_id, idx)
            if image_frame_id not in self._image_ids:
                raise ValueError(
                    'Missing groundtruth for image-frame id: {}'.format(
                        image_frame_id))

            if self._image_ids[image_frame_id]:
                tf.logging.warning(
                    'Ignoring detection with image id %s since it was '
                    'previously added', image_frame_id)
                continue

            self._detection_boxes_list.extend(
                coco_tools.ExportSingleImageDetectionBoxesToCoco(
                    image_id=image_frame_id,
                    category_id_set=self._category_id_set,
                    detection_boxes=det[
                        standard_fields.DetectionResultFields.detection_boxes],
                    detection_scores=det[standard_fields.DetectionResultFields.
                                         detection_scores],
                    detection_classes=det[standard_fields.DetectionResultFields
                                          .detection_classes]))
            self._image_ids[image_frame_id] = True
Exemplo n.º 3
0
  def add_single_detected_image_info(self,
                                     image_id,
                                     detections_dict):
    """Adds detections for a single image to be used for evaluation.

    If a detection has already been added for this image id, a warning is
    logged, and the detection is skipped.

    Args:
      image_id: A unique string/integer identifier for the image.
      detections_dict: A dictionary containing -
        DetectionResultFields.detection_boxes: float32 numpy array of shape
          [num_boxes, 4] containing `num_boxes` detection boxes of the format
          [ymin, xmin, ymax, xmax] in absolute image coordinates.
        DetectionResultFields.detection_scores: float32 numpy array of shape
          [num_boxes] containing detection scores for the boxes.
        DetectionResultFields.detection_classes: integer numpy array of shape
          [num_boxes] containing 1-indexed detection classes for the boxes.

    Raises:
      ValueError: If groundtruth for the image_id is not available.
    """
    if image_id not in self._image_ids:
      raise ValueError('Missing groundtruth for image id: {}'.format(image_id))

    if self._image_ids[image_id]:
      tf.logging.warning('Ignoring detection with image id %s since it was '
                         'previously added', image_id)
      return

    self._detection_boxes_list.extend(
        coco_tools.ExportSingleImageDetectionBoxesToCoco(
            image_id=image_id,
            category_id_set=self._category_id_set,
            detection_boxes=detections_dict[standard_fields.
                                            DetectionResultFields
                                            .detection_boxes],
            detection_scores=detections_dict[standard_fields.
                                             DetectionResultFields.
                                             detection_scores],
            detection_classes=detections_dict[standard_fields.
                                              DetectionResultFields.
                                              detection_classes]))
    self._image_ids[image_id] = True
Exemplo n.º 4
0
    def testSingleImageDetectionBoxesExportWithKeypoints(self):
        boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]],
                         dtype=np.float32)
        coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]],
                              dtype=np.float32)
        keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]],
                              [[0, 0], [0.125, 0.125], [0.375, 0.375]],
                              [[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]],
                             dtype=np.float32)
        visibilities = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]],
                                dtype=np.int32)

        classes = np.array([1, 2, 3], dtype=np.int32)
        scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)

        # Tests exporting without passing in is_crowd (for backward compatibility).
        coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
            image_id='first_image',
            category_id_set=set([1, 2, 3]),
            detection_boxes=boxes,
            detection_scores=scores,
            detection_classes=classes,
            detection_keypoints=keypoints,
            detection_keypoint_visibilities=visibilities)
        for i, annotation in enumerate(coco_annotations):
            self.assertTrue(
                np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
            self.assertEqual(annotation['image_id'], 'first_image')
            self.assertEqual(annotation['category_id'], classes[i])
            self.assertTrue(
                np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
            self.assertEqual(annotation['score'], scores[i])
            self.assertEqual(annotation['num_keypoints'], 3)
            self.assertTrue(
                np.all(
                    np.isclose(annotation['keypoints'][0::3], keypoints[i, :,
                                                                        1])))
            self.assertTrue(
                np.all(
                    np.isclose(annotation['keypoints'][1::3], keypoints[i, :,
                                                                        0])))
            self.assertTrue(
                np.all(np.equal(annotation['keypoints'][2::3],
                                visibilities[i])))
Exemplo n.º 5
0
 def testSingleImageDetectionBoxesExport(self):
     boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]],
                      dtype=np.float32)
     classes = np.array([1, 2, 3], dtype=np.int32)
     scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
     coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]],
                           dtype=np.float32)
     coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
         image_id='first_image',
         category_id_set=set([1, 2, 3]),
         detection_boxes=boxes,
         detection_classes=classes,
         detection_scores=scores)
     for i, annotation in enumerate(coco_annotations):
         self.assertEqual(annotation['image_id'], 'first_image')
         self.assertEqual(annotation['category_id'], classes[i])
         self.assertAlmostEqual(annotation['score'], scores[i])
         self.assertTrue(
             np.all(np.isclose(annotation['bbox'], coco_boxes[i])))