def main(unused_argv):
    flags.mark_flag_as_required('input_annotations_boxes')
    flags.mark_flag_as_required('input_annotations_labels')
    flags.mark_flag_as_required('input_predictions')
    flags.mark_flag_as_required('input_class_labelmap')
    flags.mark_flag_as_required('output_metrics')

    all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes)
    all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels)
    all_label_annotations.rename(
        columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)

    is_instance_segmentation_eval = False
    if FLAGS.input_annotations_segm:
        is_instance_segmentation_eval = True
        all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm)
        # Note: this part is unstable as it requires the float point numbers in both
        # csvs are exactly the same;
        # Will be replaced by more stable solution: merge on LabelName and ImageID
        # and filter down by IoU.
        all_location_annotations = utils.merge_boxes_and_masks(
            all_location_annotations, all_segm_annotations)
    all_annotations = pd.concat(
        [all_location_annotations, all_label_annotations])

    class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap)
    challenge_evaluator = (
        object_detection_evaluation.OpenImagesChallengeEvaluator(
            categories, evaluate_masks=is_instance_segmentation_eval))

    all_predictions = pd.read_csv(FLAGS.input_predictions)
    images_processed = 0
    for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
        logging.info('Processing image %d', images_processed)
        image_id, image_groundtruth = groundtruth
        groundtruth_dictionary = utils.build_groundtruth_dictionary(
            image_groundtruth, class_label_map)
        challenge_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)

        prediction_dictionary = utils.build_predictions_dictionary_kaggle(
            all_predictions.loc[all_predictions['ImageID'] == image_id],
            class_label_map)
        challenge_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)
        images_processed += 1

    metrics = challenge_evaluator.evaluate()

    with open(FLAGS.output_metrics, 'w') as fid:
        io_utils.write_csv(fid, metrics)
Esempio n. 2
0
    def proc_one_image(gt_and_preds, categories, class_label_map,
                       evaluate_masks):
        groundtruth, predictions = gt_and_preds
        image_id = groundtruth['ImageID'].iloc[0]

        tfod_evaluator = tfod_evaluation.OpenImagesChallengeEvaluator(
            categories, evaluate_masks=evaluate_masks)

        gt_dict = tfod_utils.build_groundtruth_dictionary(
            groundtruth, class_label_map)
        tfod_evaluator.add_single_ground_truth_image_info(image_id, gt_dict)

        pred_dict = tfod_utils.build_predictions_dictionary(
            predictions, class_label_map)
        tfod_evaluator.add_single_detected_image_info(image_id, pred_dict)

        return tfod_evaluator.get_internal_state()
Esempio n. 3
0
    def evaluate_image(self, image_id, groundtruth, predictions):
        """Evaluates a single image.

        Args:
            image_id (str): the Open Images ID
            groundtruth: pandas.DataFrame with columns:
                'ImageID', 'Source', 'LabelName', 'Confidence',
                'XMin', 'XMax', 'YMin', 'YMax',
                'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction',
                'IsInside', 'ConfidenceImageLabel'
            predictions: pandas.DataFrame with columns:
                'ImageID', 'LabelName', 'Score', 'XMin', 'XMax', 'YMin', 'YMax'

        Returns:
            a dictionary with structure:
                {
                    "true_positive_indexes": [<IDX1>, <IDX2>, ...],
                    "false_positive_indexes": [<IDX1>, <IDX2>, ...],
                    "mAP": <mAP>,
                    "AP_per_class": {
                        "<LabelName>": <AP>,
                        "<LabelName>": <AP>,
                        ...
                    }
                }
        """
        # add data to evaluator
        groundtruth_dict = utils.build_groundtruth_dictionary(
            groundtruth, self._class_label_map)
        prediction_dict = utils.build_predictions_dictionary(
            predictions, self._class_label_map)
        self._evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dict)
        self._evaluator.add_single_detected_image_info(image_id,
                                                       prediction_dict)

        # guarantee the evaluator is cleared
        try:
            # actually evaluate the image
            result = self._evaluate_image(predictions)
        finally:
            self._evaluator.clear()

        return result
Esempio n. 4
0
    def testBuildGroundtruthDictionaryBoxes(self):
        np_data = pd.DataFrame([
            ['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 1, None],
            ['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0, None],
            ['fe58ec1b06db2bb7', '/m/04bcr3', None, None, None, None, None, 1],
            ['fe58ec1b06db2bb7', '/m/083vt', None, None, None, None, None, 0],
            ['fe58ec1b06db2bb7', '/m/02gy9n', None, None, None, None, None, 1]
        ],
                               columns=[
                                   'ImageID', 'LabelName', 'XMin', 'XMax',
                                   'YMin', 'YMax', 'IsGroupOf',
                                   'ConfidenceImageLabel'
                               ])
        class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
        groundtruth_dictionary = utils.build_groundtruth_dictionary(
            np_data, class_label_map)

        self.assertIn(standard_fields.InputDataFields.groundtruth_boxes,
                      groundtruth_dictionary)
        self.assertIn(standard_fields.InputDataFields.groundtruth_classes,
                      groundtruth_dictionary)
        self.assertIn(standard_fields.InputDataFields.groundtruth_group_of,
                      groundtruth_dictionary)
        self.assertIn(
            standard_fields.InputDataFields.groundtruth_image_classes,
            groundtruth_dictionary)

        self.assertAllEqual(
            np.array([1, 3]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_classes])
        self.assertAllEqual(
            np.array([1, 0]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_group_of])

        expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3],
                                        [0.3, 0.1, 0.4, 0.2]])

        self.assertNDArrayNear(
            expected_boxes_data, groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_boxes], 1e-5)
        self.assertAllEqual(
            np.array([1, 2, 3]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_image_classes])
Esempio n. 5
0
    def testBuildGroundtruthDictionaryMasks(self):
        mask1 = np.array(
            [[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
            dtype=np.uint8)
        mask2 = np.array(
            [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
            dtype=np.uint8)

        encoding1 = mask.encode(np.asfortranarray(mask1))
        encoding2 = mask.encode(np.asfortranarray(mask2))

        np_data = pd.DataFrame(
            [[
                'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0],
                '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 0, None, encoding1['counts']
            ],
             [
                 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', 0.1, 0.2, 0.3,
                 0.4, 1, None, None
             ],
             [
                 'fe58ec1b06db2bb7', mask2.shape[1], mask2.shape[0],
                 '/m/02gy9n', 0.5, 0.6, 0.8, 0.9, 0, None, encoding2['counts']
             ],
             [
                 'fe58ec1b06db2bb7', None, None, '/m/04bcr3', None, None, None,
                 None, None, 1, None
             ],
             [
                 'fe58ec1b06db2bb7', None, None, '/m/083vt', None, None, None,
                 None, None, 0, None
             ],
             [
                 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', None, None, None,
                 None, None, 1, None
             ]],
            columns=[
                'ImageID', 'ImageWidth', 'ImageHeight', 'LabelName', 'XMin',
                'XMax', 'YMin', 'YMax', 'IsGroupOf', 'ConfidenceImageLabel',
                'Mask'
            ])
        class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
        groundtruth_dictionary = utils.build_groundtruth_dictionary(
            np_data, class_label_map)
        self.assertIn(standard_fields.InputDataFields.groundtruth_boxes,
                      groundtruth_dictionary)
        self.assertIn(standard_fields.InputDataFields.groundtruth_classes,
                      groundtruth_dictionary)
        self.assertIn(standard_fields.InputDataFields.groundtruth_group_of,
                      groundtruth_dictionary)
        self.assertIn(
            standard_fields.InputDataFields.groundtruth_image_classes,
            groundtruth_dictionary)
        self.assertIn(
            standard_fields.InputDataFields.groundtruth_instance_masks,
            groundtruth_dictionary)
        self.assertAllEqual(
            np.array([1, 3, 3]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_classes])
        self.assertAllEqual(
            np.array([0, 1, 0]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_group_of])

        expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3],
                                        [0.3, 0.1, 0.4, 0.2],
                                        [0.8, 0.5, 0.9, 0.6]])

        self.assertNDArrayNear(
            expected_boxes_data, groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_boxes], 1e-5)
        self.assertAllEqual(
            np.array([1, 2, 3]), groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_image_classes])

        expected_segm = np.concatenate([
            np.expand_dims(mask1, 0),
            np.zeros((1, 4, 4), dtype=np.uint8),
            np.expand_dims(mask2, 0)
        ],
                                       axis=0)
        self.assertAllEqual(
            expected_segm, groundtruth_dictionary[
                standard_fields.InputDataFields.groundtruth_instance_masks])