def main(unused_argv):
    flags.mark_flag_as_required('input_annotations_boxes')
    flags.mark_flag_as_required('input_annotations_labels')
    flags.mark_flag_as_required('input_predictions')
    flags.mark_flag_as_required('input_class_labelmap')
    flags.mark_flag_as_required('output_metrics')

    all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes)
    all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels)
    all_label_annotations.rename(
        columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)

    is_instance_segmentation_eval = False
    if FLAGS.input_annotations_segm:
        is_instance_segmentation_eval = True
        all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm)
        # Note: this part is unstable as it requires the float point numbers in both
        # csvs are exactly the same;
        # Will be replaced by more stable solution: merge on LabelName and ImageID
        # and filter down by IoU.
        all_location_annotations = utils.merge_boxes_and_masks(
            all_location_annotations, all_segm_annotations)
    all_annotations = pd.concat(
        [all_location_annotations, all_label_annotations])

    class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap)
    challenge_evaluator = (
        object_detection_evaluation.OpenImagesChallengeEvaluator(
            categories, evaluate_masks=is_instance_segmentation_eval))

    all_predictions = pd.read_csv(FLAGS.input_predictions)
    images_processed = 0
    for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
        logging.info('Processing image %d', images_processed)
        image_id, image_groundtruth = groundtruth
        groundtruth_dictionary = utils.build_groundtruth_dictionary(
            image_groundtruth, class_label_map)
        challenge_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)

        prediction_dictionary = utils.build_predictions_dictionary_kaggle(
            all_predictions.loc[all_predictions['ImageID'] == image_id],
            class_label_map)
        challenge_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)
        images_processed += 1

    metrics = challenge_evaluator.evaluate()

    with open(FLAGS.output_metrics, 'w') as fid:
        io_utils.write_csv(fid, metrics)
Exemple #2
0
    def proc_one_image(gt_and_preds, categories, class_label_map,
                       evaluate_masks):
        groundtruth, predictions = gt_and_preds
        image_id = groundtruth['ImageID'].iloc[0]

        tfod_evaluator = tfod_evaluation.OpenImagesChallengeEvaluator(
            categories, evaluate_masks=evaluate_masks)

        gt_dict = tfod_utils.build_groundtruth_dictionary(
            groundtruth, class_label_map)
        tfod_evaluator.add_single_ground_truth_image_info(image_id, gt_dict)

        pred_dict = tfod_utils.build_predictions_dictionary(
            predictions, class_label_map)
        tfod_evaluator.add_single_detected_image_info(image_id, pred_dict)

        return tfod_evaluator.get_internal_state()
Exemple #3
0
    def __init__(self, class_label_map_path, iou_threshold=0.5):
        """
        Args:
            class_label_map_path: path to the label map .pbtxt file
        """
        self._iou_threshold = iou_threshold

        self._class_label_map, self._categories = self.load_labelmap(
            class_label_map_path)
        self._reverse_label_map = {
            v: k
            for k, v in self._class_label_map.items()
        }

        self._evaluator = object_detection_evaluation.OpenImagesChallengeEvaluator(
            self._categories,
            evaluate_masks=False,
            matching_iou_threshold=self._iou_threshold,
        )
    def test_returns_correct_instance_segm_metric_values(self):
        categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}]
        oivchallenge_evaluator = (
            object_detection_evaluation.OpenImagesChallengeEvaluator(
                categories, evaluate_masks=True))

        image_key = 'img1'
        groundtruth_boxes = np.array(
            [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
        groundtruth_class_labels = np.array([1, 2, 1], dtype=int)
        groundtruth_is_group_of_list = np.array([False, False, True],
                                                dtype=bool)
        groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
        groundtruth_mask_0 = np.array(
            [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)
        zero_mask = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                             dtype=np.uint8)
        groundtruth_masks = np.stack(
            [groundtruth_mask_0, zero_mask, zero_mask], axis=0)
        oivchallenge_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels,
                standard_fields.InputDataFields.groundtruth_group_of:
                groundtruth_is_group_of_list,
                standard_fields.InputDataFields.groundtruth_image_classes:
                groundtruth_verified_labels,
                standard_fields.InputDataFields.groundtruth_instance_masks:
                groundtruth_masks
            })
        image_key = 'img3'
        groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
        groundtruth_class_labels = np.array([2], dtype=int)
        groundtruth_mask_0 = np.array(
            [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)
        groundtruth_masks = np.stack([groundtruth_mask_0], axis=0)
        oivchallenge_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels,
                standard_fields.InputDataFields.groundtruth_instance_masks:
                groundtruth_masks
            })
        image_key = 'img1'
        detected_boxes = np.array([[0, 0, 2, 2], [2, 2, 3, 3]], dtype=float)
        detection_mask_0 = np.array([[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
                                    dtype=np.uint8)
        detected_masks = np.stack([detection_mask_0, zero_mask], axis=0)
        detected_class_labels = np.array([2, 1], dtype=int)
        detected_scores = np.array([0.7, 0.8], dtype=float)
        oivchallenge_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels,
                standard_fields.DetectionResultFields.detection_masks:
                detected_masks
            })
        image_key = 'img3'
        detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
        detected_class_labels = np.array([2], dtype=int)
        detected_scores = np.array([0.5], dtype=float)
        detected_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                                   dtype=np.uint8)
        detected_masks = np.stack([detected_mask_0], axis=0)
        oivchallenge_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels,
                standard_fields.DetectionResultFields.detection_masks:
                detected_masks
            })
        metrics = oivchallenge_evaluator.evaluate()
        expected_metric_name = 'OpenImagesInstanceSegmentationChallenge'

        self.assertAlmostEqual(
            metrics[expected_metric_name +
                    '_PerformanceByCategory/[email protected]/dog'], 1.0)
        self.assertAlmostEqual(
            metrics[expected_metric_name +
                    '_PerformanceByCategory/[email protected]/cat'], 0)
        self.assertAlmostEqual(
            metrics[expected_metric_name + '_Precision/[email protected]'], 0.5)

        oivchallenge_evaluator.clear()
        self.assertFalse(oivchallenge_evaluator._image_ids)
    def test_returns_correct_detection_metric_values(self):
        categories = [{
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }, {
            'id': 3,
            'name': 'elephant'
        }]
        oivchallenge_evaluator = (
            object_detection_evaluation.OpenImagesChallengeEvaluator(
                categories, evaluate_masks=False, group_of_weight=0.5))

        image_key = 'img1'
        groundtruth_boxes = np.array(
            [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
        groundtruth_class_labels = np.array([1, 3, 1], dtype=int)
        groundtruth_is_group_of_list = np.array([False, False, True],
                                                dtype=bool)
        groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
        oivchallenge_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels,
                standard_fields.InputDataFields.groundtruth_group_of:
                groundtruth_is_group_of_list,
                standard_fields.InputDataFields.groundtruth_image_classes:
                groundtruth_verified_labels,
            })
        image_key = 'img2'
        groundtruth_boxes = np.array(
            [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]],
            dtype=float)
        groundtruth_class_labels = np.array([1, 1, 3], dtype=int)
        groundtruth_is_group_of_list = np.array([False, False, True],
                                                dtype=bool)
        oivchallenge_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels,
                standard_fields.InputDataFields.groundtruth_group_of:
                groundtruth_is_group_of_list
            })
        image_key = 'img3'
        groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
        groundtruth_class_labels = np.array([2], dtype=int)
        oivchallenge_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels
            })
        image_key = 'img1'
        detected_boxes = np.array([[10, 10, 11, 11], [100, 100, 120, 120]],
                                  dtype=float)
        detected_class_labels = np.array([2, 2], dtype=int)
        detected_scores = np.array([0.7, 0.8], dtype=float)
        oivchallenge_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels
            })
        image_key = 'img2'
        detected_boxes = np.array([[10, 10, 11, 11], [100, 100, 120, 120],
                                   [100, 100, 220, 220], [10, 10, 11, 11]],
                                  dtype=float)
        detected_class_labels = np.array([1, 1, 2, 3], dtype=int)
        detected_scores = np.array([0.7, 0.8, 0.5, 0.9], dtype=float)
        oivchallenge_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels
            })
        image_key = 'img3'
        detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
        detected_class_labels = np.array([2], dtype=int)
        detected_scores = np.array([0.5], dtype=float)
        oivchallenge_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels
            })
        metrics = oivchallenge_evaluator.evaluate()
        expected_metric_name = 'OpenImagesDetectionChallenge'

        self.assertAlmostEqual(
            metrics[expected_metric_name +
                    '_PerformanceByCategory/[email protected]/dog'], 0.3333333333)
        self.assertAlmostEqual(
            metrics[expected_metric_name +
                    '_PerformanceByCategory/[email protected]/elephant'],
            0.333333333333)
        self.assertAlmostEqual(
            metrics[expected_metric_name +
                    '_PerformanceByCategory/[email protected]/cat'], 0.142857142857)
        self.assertAlmostEqual(
            metrics[expected_metric_name + '_Precision/[email protected]'],
            0.269841269)

        oivchallenge_evaluator.clear()
        self.assertFalse(oivchallenge_evaluator._image_ids)
Exemple #6
0
    def evaluate(self):
        tfod_eval_dicts = []
        if self._distributed:
            from datetime import datetime
            tic = datetime.now()
            for sid in self.tfod_eval_dicts.keys():
                comm.synchronize()
                shard = comm.gather(self.tfod_eval_dicts[sid], dst=0)
                tfod_eval_dicts.extend(list(itertools.chain(*shard)))
            if not comm.is_main_process():
                return {}
            print('>' * 30 +
                  f'gather cost: {(datetime.now() - tic).total_seconds()}s')
        else:
            for sid in self.tfod_eval_dicts.keys():
                tfod_eval_dicts.extend(self.tfod_eval_dicts[sid])
            pass

        if len(tfod_eval_dicts) == 0:
            print("_" * 60 + "There's no predictions.")
            return

        # Order doesn't matter.
        image_ids = list(set([x['ImageID'] for x in tfod_eval_dicts]))
        all_predictions = pd.DataFrame.from_records(tfod_eval_dicts)

        all_location_annotations = self.gt_bboxes[
            self.gt_bboxes['ImageID'].isin(image_ids)]
        all_label_annotations = self.gt_image_labels[
            self.gt_image_labels['ImageID'].isin(image_ids)]

        is_instance_segmentation_eval = True
        if 'instance-segmentation-task':
            all_segm_annotations = self.gt_masks[self.gt_masks['ImageID'].isin(
                image_ids)]
            # Note: this part is unstable as it requires the float point numbers in both
            # csvs are exactly the same;
            # Will be replaced by more stable solution: merge on LabelName and ImageID
            # and filter down by IoU.
            all_location_annotations = pd.merge(all_location_annotations,
                                                all_segm_annotations,
                                                how='outer',
                                                on=[
                                                    'LabelName',
                                                    'ImageID',
                                                    'XMin',
                                                    'XMax',
                                                    'YMin',
                                                    'YMax',
                                                ])

        all_annotations = pd.concat(
            [all_location_annotations, all_label_annotations])

        class_label_map, categories = self.label_map, self.categories

        with Pool() as pool:
            func = partial(self.proc_one_image,
                           categories=categories,
                           class_label_map=class_label_map,
                           evaluate_masks=is_instance_segmentation_eval)
            itr = zip([
                all_annotations[all_annotations['ImageID'] == x]
                for x in image_ids
            ], [
                all_predictions[all_predictions['ImageID'] == x]
                for x in image_ids
            ])
            state_and_ids_list = list(tqdm(pool.imap(func, itr)))
        if 'merge-tfod-evaluator':
            merged = tfod_evaluation.OpenImagesChallengeEvaluator(
                categories, is_instance_segmentation_eval)
            for state, ids in state_and_ids_list:
                merged.merge_internal_state(ids, state)

        metrics = merged.evaluate()
        return OrderedDict(instance_segmentation=metrics)