def read_data_and_evaluate(input_config, eval_config):
  """Reads pre-computed object detections and groundtruth from tf_record.

  Args:
    input_config: input config proto of type
      object_detection.protos.InputReader.
    eval_config: evaluation config proto of type
      object_detection.protos.EvalConfig.

  Returns:
    Evaluated detections metrics.

  Raises:
    ValueError: if input_reader type is not supported or metric type is unknown.
  """
  if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    input_paths = input_config.tf_record_input_reader.input_path

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    object_detection_evaluators = evaluator.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    skipped_images = 0
    processed_images = 0
    for input_path in _generate_filenames(input_paths):
      tf.logging.info('Processing file: {0}'.format(input_path))

      record_iterator = tf.python_io.tf_record_iterator(path=input_path)
      data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

      for string_record in record_iterator:
        tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                               processed_images)
        processed_images += 1

        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        if decoded_dict:
          object_detection_evaluator.add_single_ground_truth_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
          object_detection_evaluator.add_single_detected_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
        else:
          skipped_images += 1
          tf.logging.info('Skipped images: {0}'.format(skipped_images))

    return object_detection_evaluator.evaluate()

  raise ValueError('Unsupported input_reader_config.')
def process_detections(detections_record, categories):
    record_iterator = tf.python_io.tf_record_iterator(path=detections_record)
    data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

    confusion_matrix = np.zeros(shape=(len(categories) + 1,
                                       len(categories) + 1))

    image_index = 0
    for string_record in record_iterator:
        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        image_index += 1

        if decoded_dict:
            groundtruth_boxes = decoded_dict[
                standard_fields.InputDataFields.groundtruth_boxes]
            groundtruth_classes = decoded_dict[
                standard_fields.InputDataFields.groundtruth_classes]

            detection_scores = decoded_dict[
                standard_fields.DetectionResultFields.detection_scores]
            detection_classes = decoded_dict[
                standard_fields.DetectionResultFields.detection_classes][
                    detection_scores >= CONFIDENCE_THRESHOLD]
            detection_boxes = decoded_dict[
                standard_fields.DetectionResultFields.detection_boxes][
                    detection_scores >= CONFIDENCE_THRESHOLD]

            matches = []

            if image_index % 100 == 0:
                print("Processed %d images" % (image_index))

            for i in range(len(groundtruth_boxes)):
                for j in range(len(detection_boxes)):
                    iou = compute_iou(groundtruth_boxes[i], detection_boxes[j])

                    if iou > IOU_THRESHOLD:
                        matches.append([i, j, iou])

            matches = np.array(matches)
            if matches.shape[0] > 0:
                # Sort list of matches by descending IOU so we can remove duplicate detections
                # while keeping the highest IOU entry.
                matches = matches[matches[:, 2].argsort()[::-1][:len(matches)]]

                # Remove duplicate detections from the list.
                matches = matches[np.unique(matches[:, 1],
                                            return_index=True)[1]]

                # Sort the list again by descending IOU. Removing duplicates doesn't preserve
                # our previous sort.
                matches = matches[matches[:, 2].argsort()[::-1][:len(matches)]]

                # Remove duplicate ground truths from the list.
                matches = matches[np.unique(matches[:, 0],
                                            return_index=True)[1]]

            for i in range(len(groundtruth_boxes)):
                if matches.shape[0] > 0 and matches[matches[:, 0] ==
                                                    i].shape[0] == 1:
                    confusion_matrix[groundtruth_classes[i] - 1][
                        detection_classes[int(matches[matches[:, 0] == i,
                                                      1][0])] - 1] += 1
                else:
                    confusion_matrix[groundtruth_classes[i] -
                                     1][confusion_matrix.shape[1] - 1] += 1

            for i in range(len(detection_boxes)):
                if matches.shape[0] > 0 and matches[matches[:, 1] ==
                                                    i].shape[0] == 0:
                    confusion_matrix[confusion_matrix.shape[0] -
                                     1][detection_classes[i] - 1] += 1
        else:
            print("Skipped image %d" % (image_index))

    print("Processed %d images" % (image_index))

    return confusion_matrix
Exemplo n.º 3
0
    def testParseDetectionsAndGT(self):
        source_id = 'abc.jpg'
        # y_min, x_min, y_max, x_max
        object_bb = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6],
                              [1.0, 0.6, 0.8], [1.0, 0.6, 0.7]]).transpose()
        detection_bb = np.array([[0.1, 0.2], [0.0, 0.8], [1.0, 0.6],
                                 [1.0, 0.85]]).transpose()

        object_class_label = [1, 1, 2]
        object_difficult = [1, 0, 0]
        object_group_of = [0, 0, 1]
        detection_class_label = [2, 1]
        detection_score = [0.5, 0.3]
        features = {
            fields.TfExampleFields.source_id:
            self._BytesFeature(source_id),
            fields.TfExampleFields.object_bbox_ymin:
            self._FloatFeature(object_bb[:, 0].tolist()),
            fields.TfExampleFields.object_bbox_xmin:
            self._FloatFeature(object_bb[:, 1].tolist()),
            fields.TfExampleFields.object_bbox_ymax:
            self._FloatFeature(object_bb[:, 2].tolist()),
            fields.TfExampleFields.object_bbox_xmax:
            self._FloatFeature(object_bb[:, 3].tolist()),
            fields.TfExampleFields.detection_bbox_ymin:
            self._FloatFeature(detection_bb[:, 0].tolist()),
            fields.TfExampleFields.detection_bbox_xmin:
            self._FloatFeature(detection_bb[:, 1].tolist()),
            fields.TfExampleFields.detection_bbox_ymax:
            self._FloatFeature(detection_bb[:, 2].tolist()),
            fields.TfExampleFields.detection_bbox_xmax:
            self._FloatFeature(detection_bb[:, 3].tolist()),
            fields.TfExampleFields.detection_class_label:
            self._Int64Feature(detection_class_label),
            fields.TfExampleFields.detection_score:
            self._FloatFeature(detection_score),
        }

        example = tf.train.Example(features=tf.train.Features(
            feature=features))
        parser = tf_example_parser.TfExampleDetectionAndGTParser()

        results_dict = parser.parse(example)
        self.assertIsNone(results_dict)

        features[fields.TfExampleFields.object_class_label] = (
            self._Int64Feature(object_class_label))
        features[fields.TfExampleFields.object_difficult] = (
            self._Int64Feature(object_difficult))

        example = tf.train.Example(features=tf.train.Features(
            feature=features))
        results_dict = parser.parse(example)

        self.assertIsNotNone(results_dict)
        self.assertEqual(source_id,
                         results_dict[fields.DetectionResultFields.key])
        np_testing.assert_almost_equal(
            object_bb, results_dict[fields.InputDataFields.groundtruth_boxes])
        np_testing.assert_almost_equal(
            detection_bb,
            results_dict[fields.DetectionResultFields.detection_boxes])
        np_testing.assert_almost_equal(
            detection_score,
            results_dict[fields.DetectionResultFields.detection_scores])
        np_testing.assert_almost_equal(
            detection_class_label,
            results_dict[fields.DetectionResultFields.detection_classes])
        np_testing.assert_almost_equal(
            object_difficult,
            results_dict[fields.InputDataFields.groundtruth_difficult])
        np_testing.assert_almost_equal(
            object_class_label,
            results_dict[fields.InputDataFields.groundtruth_classes])

        parser = tf_example_parser.TfExampleDetectionAndGTParser()

        features[fields.TfExampleFields.object_group_of] = (
            self._Int64Feature(object_group_of))

        example = tf.train.Example(features=tf.train.Features(
            feature=features))
        results_dict = parser.parse(example)
        self.assertIsNotNone(results_dict)
        np_testing.assert_almost_equal(
            object_group_of,
            results_dict[fields.InputDataFields.groundtruth_group_of])