Exemplo n.º 1
0
 def test_value_error_on_duplicate_images(self):
     categories = [{
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }, {
         'id': 3,
         'name': 'elephant'
     }]
     #  Add groundtruth
     pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
         categories)
     image_key1 = 'img1'
     groundtruth_boxes1 = np.array(
         [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
     groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
     pascal_evaluator.add_single_ground_truth_image_info(
         image_key1, {
             standard_fields.InputDataFields.groundtruth_boxes:
             groundtruth_boxes1,
             standard_fields.InputDataFields.groundtruth_classes:
             groundtruth_class_labels1
         })
     with self.assertRaises(ValueError):
         pascal_evaluator.add_single_ground_truth_image_info(
             image_key1, {
                 standard_fields.InputDataFields.groundtruth_boxes:
                 groundtruth_boxes1,
                 standard_fields.InputDataFields.groundtruth_classes:
                 groundtruth_class_labels1
             })
Exemplo n.º 2
0
    def test_returns_correct_metric_values(self):
        categories = [{
            'id': 1,
            'name': 'cat'
        }, {
            'id': 2,
            'name': 'dog'
        }, {
            'id': 3,
            'name': 'elephant'
        }]
        #  Add groundtruth
        pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
            categories)
        image_key1 = 'img1'
        groundtruth_boxes1 = np.array(
            [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
        groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
        pascal_evaluator.add_single_ground_truth_image_info(
            image_key1, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes1,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels1
            })
        image_key2 = 'img2'
        groundtruth_boxes2 = np.array(
            [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]],
            dtype=float)
        groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
        groundtruth_is_difficult_list2 = np.array([False, True, False],
                                                  dtype=bool)
        pascal_evaluator.add_single_ground_truth_image_info(
            image_key2, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes2,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels2,
                standard_fields.InputDataFields.groundtruth_difficult:
                groundtruth_is_difficult_list2
            })
        image_key3 = 'img3'
        groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
        groundtruth_class_labels3 = np.array([2], dtype=int)
        pascal_evaluator.add_single_ground_truth_image_info(
            image_key3, {
                standard_fields.InputDataFields.groundtruth_boxes:
                groundtruth_boxes3,
                standard_fields.InputDataFields.groundtruth_classes:
                groundtruth_class_labels3
            })

        # Add detections
        image_key = 'img2'
        detected_boxes = np.array(
            [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
            dtype=float)
        detected_class_labels = np.array([1, 1, 3], dtype=int)
        detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
        pascal_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                detected_boxes,
                standard_fields.DetectionResultFields.detection_scores:
                detected_scores,
                standard_fields.DetectionResultFields.detection_classes:
                detected_class_labels
            })

        metrics = pascal_evaluator.evaluate()
        self.assertAlmostEqual(
            metrics['PASCAL/PerformanceByCategory/[email protected]/dog'], 0.0)
        self.assertAlmostEqual(
            metrics['PASCAL/PerformanceByCategory/[email protected]/elephant'], 0.0)
        self.assertAlmostEqual(
            metrics['PASCAL/PerformanceByCategory/[email protected]/cat'], 0.16666666)
        self.assertAlmostEqual(metrics['PASCAL/Precision/[email protected]'],
                               0.05555555)
        pascal_evaluator.clear()
        self.assertFalse(pascal_evaluator._image_ids)
Exemplo n.º 3
0
def main(_):
    pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)

    if FLAGS.model_dir:
        pipeline_proto.model_dir = FLAGS.model_dir
        tf.logging.info("Override model checkpoint dir: %s", FLAGS.model_dir)

    if FLAGS.shard_indicator:
        pipeline_proto.eval_reader.shard_indicator = FLAGS.shard_indicator
        tf.logging.info("Override shard_indicator: %s", FLAGS.shard_indicator)

    if FLAGS.input_pattern:
        while len(pipeline_proto.eval_reader.wsod_reader.input_pattern) > 0:
            pipeline_proto.eval_reader.wsod_reader.input_pattern.pop()
        pipeline_proto.eval_reader.wsod_reader.input_pattern.append(
            FLAGS.input_pattern)
        tf.logging.info("Override input_pattern: %s", FLAGS.input_pattern)

    tf.logging.info("Pipeline configure: %s", '=' * 128)
    tf.logging.info(pipeline_proto)

    # Load the vocabulary file.

    categories = []
    category_to_id = {}
    with open(FLAGS.vocabulary_file, 'r') as fp:
        for line_id, line in enumerate(fp.readlines()):
            categories.append({'id': 1 + line_id, 'name': line.strip('\n')})
            category_to_id[line.strip('\n')] = 1 + line_id
    tf.logging.info("\n%s", json.dumps(categories, indent=2))

    # Create the evaluator.

    number_of_evaluators = max(1, FLAGS.number_of_evaluators)

    if FLAGS.evaluator.lower() == 'pascal':
        evaluators = [
            object_detection_evaluation.PascalDetectionEvaluator(categories)
            for i in range(number_of_evaluators)
        ]
    elif FLAGS.evaluator.lower() == 'coco':
        evaluators = [
            coco_evaluation.CocoDetectionEvaluator(categories)
            for i in range(number_of_evaluators)
        ]
    else:
        raise ValueError('Invalid evaluator {}.'.format(FLAGS.evaluator))

    if not FLAGS.run_once:

        # Evaluation loop.

        latest_step = None
        while True:
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
            if checkpoint_path is not None:
                global_step = int(checkpoint_path.split('-')[-1])

                if global_step != latest_step and global_step >= FLAGS.min_eval_steps:

                    # Evaluate the checkpoint.

                    latest_step = global_step
                    tf.logging.info('Start to evaluate checkpoint %s.',
                                    checkpoint_path)

                    summary, metric = _run_evaluation(pipeline_proto,
                                                      checkpoint_path,
                                                      evaluators,
                                                      category_to_id,
                                                      categories)

                    step_best, metric_best = save_model_if_it_is_better(
                        global_step, metric, checkpoint_path,
                        FLAGS.saved_ckpts_dir)

                    # Write summary.
                    summary.value.add(tag='loss/best_metric',
                                      simple_value=metric_best)
                    summary_writer = tf.summary.FileWriter(FLAGS.eval_log_dir)
                    summary_writer.add_summary(summary,
                                               global_step=global_step)
                    summary_writer.close()
                    tf.logging.info("Summary is written.")

                    continue
            tf.logging.info("Wait for 10 seconds.")
            time.sleep(10)

    else:

        # Run once.
        if FLAGS.eval_best_model:
            checkpoint_path = get_best_model_checkpoint(FLAGS.saved_ckpts_dir)
        else:
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
        tf.logging.info('Start to evaluate checkpoint %s.', checkpoint_path)

        summary, metric = _run_evaluation(pipeline_proto,
                                          checkpoint_path,
                                          evaluators,
                                          category_to_id,
                                          categories,
                                          save_report_to_file=True)

    tf.logging.info('Done')