示例#1
0
def main(parsed_args):
    all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
    all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)

    all_label_annotations.rename(
        columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
    all_annotations = pd.concat([all_box_annotations, all_label_annotations])

    class_label_map, categories = _load_labelmap(
        parsed_args.input_class_labelmap)
    challenge_evaluator = (object_detection_evaluation.
                           OpenImagesDetectionChallengeEvaluator(categories))

    for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
        image_id, image_groundtruth = groundtruth
        groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary(
            image_groundtruth, class_label_map)
        challenge_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)

    all_predictions = pd.read_csv(parsed_args.input_predictions)
    for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
        image_id, image_predictions = prediction_data
        prediction_dictionary = utils.build_predictions_dictionary(
            image_predictions, class_label_map)
        challenge_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)

    metrics = challenge_evaluator.evaluate()

    with open(parsed_args.output_metrics, 'w') as fid:
        io_utils.write_csv(fid, metrics)
def main(parsed_args):
  all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
  all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)
  all_label_annotations.rename(
      columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
  all_annotations = pd.concat([all_box_annotations, all_label_annotations])

  class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap)
  challenge_evaluator = (
      object_detection_evaluation.OpenImagesDetectionChallengeEvaluator(
          categories))

  for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
    image_id, image_groundtruth = groundtruth
    groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary(
        image_groundtruth, class_label_map)
    challenge_evaluator.add_single_ground_truth_image_info(
        image_id, groundtruth_dictionary)

  all_predictions = pd.read_csv(parsed_args.input_predictions)
  for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
    image_id, image_predictions = prediction_data
    prediction_dictionary = utils.build_predictions_dictionary(
        image_predictions, class_label_map)
    challenge_evaluator.add_single_detected_image_info(image_id,
                                                       prediction_dictionary)

  metrics = challenge_evaluator.evaluate()

  with open(parsed_args.output_metrics, 'w') as fid:
    io_utils.write_csv(fid, metrics)
def main(unused_argv):
    flags.mark_flag_as_required('input_annotations_boxes')
    flags.mark_flag_as_required('input_annotations_labels')
    flags.mark_flag_as_required('input_predictions')
    flags.mark_flag_as_required('input_class_labelmap')
    flags.mark_flag_as_required('output_metrics')

    all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes)
    all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels)
    all_label_annotations.rename(
        columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)

    is_instance_segmentation_eval = False
    if FLAGS.input_annotations_segm:
        is_instance_segmentation_eval = True
        all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm)
        # Note: this part is unstable as it requires the float point numbers in both
        # csvs are exactly the same;
        # Will be replaced by more stable solution: merge on LabelName and ImageID
        # and filter down by IoU.
        all_location_annotations = utils.merge_boxes_and_masks(
            all_location_annotations, all_segm_annotations)
    all_annotations = pd.concat(
        [all_location_annotations, all_label_annotations])

    class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap)
    challenge_evaluator = (
        object_detection_evaluation.OpenImagesChallengeEvaluator(
            categories, evaluate_masks=is_instance_segmentation_eval))

    all_predictions = pd.read_csv(FLAGS.input_predictions)
    images_processed = 0
    for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
        logging.info('Processing image %d', images_processed)
        image_id, image_groundtruth = groundtruth
        groundtruth_dictionary = utils.build_groundtruth_dictionary(
            image_groundtruth, class_label_map)
        challenge_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)

        prediction_dictionary = utils.build_predictions_dictionary_kaggle(
            all_predictions.loc[all_predictions['ImageID'] == image_id],
            class_label_map)
        challenge_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)
        images_processed += 1

    metrics = challenge_evaluator.evaluate()

    with open(FLAGS.output_metrics, 'w') as fid:
        io_utils.write_csv(fid, metrics)
def main(parsed_args):
    all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
    all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)
    all_annotations = pd.concat([all_box_annotations, all_label_annotations])

    class_label_map = _load_labelmap(parsed_args.input_class_labelmap)
    relationship_label_map = _load_labelmap(
        parsed_args.input_relationship_labelmap)

    relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator()
    phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator()

    for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
        image_id, image_groundtruth = groundtruth
        groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary(
            image_groundtruth, class_label_map, relationship_label_map)

        relation_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)
        phrase_evaluator.add_single_ground_truth_image_info(
            image_id, groundtruth_dictionary)

    all_predictions = pd.read_csv(parsed_args.input_predictions)
    for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
        image_id, image_predictions = prediction_data
        prediction_dictionary = utils.build_predictions_vrd_dictionary(
            image_predictions, class_label_map, relationship_label_map)

        relation_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)
        phrase_evaluator.add_single_detected_image_info(
            image_id, prediction_dictionary)

    relation_metrics = relation_evaluator.evaluate(
        relationships=_swap_labelmap_dict(relationship_label_map))
    phrase_metrics = phrase_evaluator.evaluate(
        relationships=_swap_labelmap_dict(relationship_label_map))

    with open(parsed_args.output_metrics, 'w') as fid:
        io_utils.write_csv(fid, relation_metrics)
        io_utils.write_csv(fid, phrase_metrics)