Пример #1
0
 def test_evaluator_options_from_eval_config_with_super_categories(self):
   eval_config_text_proto = """
     metrics_set: "coco_detection_metrics"
     metrics_set: "coco_mask_metrics"
     include_metrics_per_category: true
     use_moving_averages: false
     batch_size: 1;
     super_categories {
       key: "supercat1"
       value: "a,b,c"
     }
     super_categories {
       key: "supercat2"
       value: "d,e,f"
     }
   """
   eval_config = eval_pb2.EvalConfig()
   text_format.Merge(eval_config_text_proto, eval_config)
   evaluator_options = eval_util.evaluator_options_from_eval_config(
       eval_config)
   self.assertIn('super_categories', evaluator_options['coco_mask_metrics'])
   super_categories = evaluator_options[
       'coco_mask_metrics']['super_categories']
   self.assertIn('supercat1', super_categories)
   self.assertIn('supercat2', super_categories)
   self.assertAllEqual(super_categories['supercat1'], ['a', 'b', 'c'])
   self.assertAllEqual(super_categories['supercat2'], ['d', 'e', 'f'])
Пример #2
0
    def test_get_evaluator_with_keypoint_metrics(self):
        eval_config = eval_pb2.EvalConfig()
        person_keypoints_metric = eval_config.parameterized_metric.add()
        person_keypoints_metric.coco_keypoint_metrics.class_label = 'person'
        person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
            'left_eye'] = 0.1
        person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
            'right_eye'] = 0.2
        dog_keypoints_metric = eval_config.parameterized_metric.add()
        dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog'
        dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
            'tail_start'] = 0.3
        dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
            'mouth'] = 0.4
        categories = self._get_categories_list_with_keypoints()

        evaluator = eval_util.get_evaluators(eval_config,
                                             categories,
                                             evaluator_options=None)

        # Verify keypoint evaluator class variables.
        self.assertLen(evaluator, 3)
        self.assertFalse(evaluator[0]._include_metrics_per_category)
        self.assertEqual(evaluator[1]._category_name, 'person')
        self.assertEqual(evaluator[2]._category_name, 'dog')
        self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3])
        self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2])
        self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas)
        self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas)
Пример #3
0
    def test_get_eval_metric_ops_for_evaluators(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend([
            'coco_detection_metrics', 'coco_mask_metrics',
            'precision_at_recall_detection_metrics'
        ])
        eval_config.include_metrics_per_category = True
        eval_config.recall_lower_bound = 0.2
        eval_config.recall_upper_bound = 0.6

        evaluator_options = eval_util.evaluator_options_from_eval_config(
            eval_config)
        self.assertTrue(evaluator_options['coco_detection_metrics']
                        ['include_metrics_per_category'])
        self.assertFalse(evaluator_options['coco_detection_metrics']
                         ['skip_predictions_for_unlabeled_class'])
        self.assertTrue(evaluator_options['coco_mask_metrics']
                        ['include_metrics_per_category'])
        self.assertAlmostEqual(
            evaluator_options['precision_at_recall_detection_metrics']
            ['recall_lower_bound'], eval_config.recall_lower_bound)
        self.assertAlmostEqual(
            evaluator_options['precision_at_recall_detection_metrics']
            ['recall_upper_bound'], eval_config.recall_upper_bound)
        self.assertFalse(
            evaluator_options['precision_at_recall_detection_metrics']
            ['skip_predictions_for_unlabeled_class'])
Пример #4
0
def get_configs_from_checkpoint_dir():
    """Reads evaluation configuration from checkpoint directory.

  Reads the evaluation config from the following files:
    model_config: Read from FLAGS.checkpoint_dir/model.config
    eval_config: Read from FLAGS.checkpoint_dir/eval.config
    input_config: Read from FLAGS.checkpoint_dir/input.config

  Returns:
    model_config: a model_pb2.DetectionModel
    eval_config: a eval_pb2.EvalConfig
    input_config: a input_reader_pb2.InputReader
  """
    eval_config = eval_pb2.EvalConfig()
    eval_config_path = os.path.join(FLAGS.checkpoint_dir, 'eval.config')
    with tf.gfile.GFile(eval_config_path, 'r') as f:
        text_format.Merge(f.read(), eval_config)

    model_config = model_pb2.DetectionModel()
    model_config_path = os.path.join(FLAGS.checkpoint_dir, 'model.config')
    with tf.gfile.GFile(model_config_path, 'r') as f:
        text_format.Merge(f.read(), model_config)

    input_config = input_reader_pb2.InputReader()
    if FLAGS.eval_training_data:
        input_config_path = os.path.join(FLAGS.checkpoint_dir,
                                         'train_input.config')
    else:
        input_config_path = os.path.join(FLAGS.checkpoint_dir,
                                         'eval_input.config')
    with tf.gfile.GFile(input_config_path, 'r') as f:
        text_format.Merge(f.read(), input_config)

    return model_config, eval_config, input_config
Пример #5
0
    def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
            self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend(
            ['coco_detection_metrics', 'coco_mask_metrics'])
        categories = self._get_categories_list()
        eval_dict = self._make_evaluation_dict(
            batch_size=batch_size,
            max_gt_boxes=max_gt_boxes,
            scale_to_absolute=scale_to_absolute,
            resized_groundtruth_masks=True)
        metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
            eval_config, categories, eval_dict)
        _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
        _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']

        with self.test_session() as sess:
            metrics = {}
            for key, (value_op, _) in six.iteritems(metric_ops):
                metrics[key] = value_op
            sess.run(update_op_boxes)
            sess.run(update_op_masks)
            metrics = sess.run(metrics)
            self.assertAlmostEqual(1.0,
                                   metrics['DetectionBoxes_Precision/mAP'])
            self.assertAlmostEqual(1.0,
                                   metrics['DetectionMasks_Precision/mAP'])
Пример #6
0
def get_configs_from_multiple_files():
  """Reads evaluation configuration from multiple config files.

  Reads the evaluation config from the following files:
    model_config: Read from --model_config_path
    eval_config: Read from --eval_config_path
    input_config: Read from --input_config_path

  Returns:
    model_config: a model_pb2.DetectionModel
    eval_config: a eval_pb2.EvalConfig
    input_config: a input_reader_pb2.InputReader
  """
  eval_config = eval_pb2.EvalConfig()
  with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f:
    text_format.Merge(f.read(), eval_config)

  model_config = model_pb2.DetectionModel()
  with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
    text_format.Merge(f.read(), model_config)

  input_config = input_reader_pb2.InputReader()
  with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
    text_format.Merge(f.read(), input_config)

  return model_config, eval_config, input_config
 def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
     eval_config = eval_pb2.EvalConfig()
     eval_config.metrics_set.extend(['unsupported_metric'])
     categories = self._get_categories_list()
     eval_dict = self._make_evaluation_dict()
     with self.assertRaises(ValueError):
         eval_util.get_eval_metric_ops_for_evaluators(
             eval_config, categories, eval_dict)
Пример #8
0
def get_configs_from_multiple_files(model_config_path="",
                                    train_config_path="",
                                    train_input_config_path="",
                                    eval_config_path="",
                                    eval_input_config_path="",
                                    offline_eval_input_config_path=""):
    """Reads training configuration from multiple config files.

  Args:
    model_config_path: Path to model_pb2.DetectionModel.
    train_config_path: Path to train_pb2.TrainConfig.
    train_input_config_path: Path to input_reader_pb2.InputReader.
    eval_config_path: Path to eval_pb2.EvalConfig.
    eval_input_config_path: Path to input_reader_pb2.InputReader.

  Returns:
    Dictionary of configuration objects. Keys are `model`, `train_config`,
      `train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
        returned only for valid (non-empty) strings.
  """
    configs = {}
    if model_config_path:
        model_config = model_pb2.DetectionModel()
        with tf.gfile.GFile(model_config_path, "r") as f:
            text_format.Merge(f.read(), model_config)
            configs["model"] = model_config

    if train_config_path:
        train_config = train_pb2.TrainConfig()
        with tf.gfile.GFile(train_config_path, "r") as f:
            text_format.Merge(f.read(), train_config)
            configs["train_config"] = train_config

    if train_input_config_path:
        train_input_config = input_reader_pb2.InputReader()
        with tf.gfile.GFile(train_input_config_path, "r") as f:
            text_format.Merge(f.read(), train_input_config)
            configs["train_input_config"] = train_input_config

    if eval_config_path:
        eval_config = eval_pb2.EvalConfig()
        with tf.gfile.GFile(eval_config_path, "r") as f:
            text_format.Merge(f.read(), eval_config)
            configs["eval_config"] = eval_config

    if eval_input_config_path:
        eval_input_config = input_reader_pb2.InputReader()
        with tf.gfile.GFile(eval_input_config_path, "r") as f:
            text_format.Merge(f.read(), eval_input_config)
            configs["eval_input_config"] = eval_input_config

    if offline_eval_input_config_path:
        offline_eval_input_config = input_reader_pb2.InputReader()
        with tf.gfile.GFile(offline_eval_input_config_path, "r") as f:
            text_format.Merge(f.read(), offline_eval_input_config)
            configs["offline_eval_input_config"] = offline_eval_input_config

    return configs
Пример #9
0
    def test_get_configs_from_multiple_files(self):
        """Tests that proto configs can be read from multiple files."""
        print(
            '\n=========================================================================='
        )
        print('test_get_configs_from_multiple_files')

        #temp_dir = self.get_temp_dir()
        temp_dir = '/home/zq/tmp/'

        # Write model config file.
        model_config_path = os.path.join(temp_dir, "model.config")
        model = model_pb2.DetectionModel()
        model.faster_rcnn.num_classes = 10
        _write_config(model, model_config_path)

        # Write train config file.
        train_config_path = os.path.join(temp_dir, "train.config")
        train_config = train_pb2.TrainConfig()
        train_config.batch_size = 32
        _write_config(train_config, train_config_path)

        # Write train input config file.
        train_input_config_path = os.path.join(temp_dir, "train_input.config")
        train_input_config = input_reader_pb2.InputReader()
        train_input_config.label_map_path = "path/to/label_map"
        _write_config(train_input_config, train_input_config_path)

        # Write eval config file.
        eval_config_path = os.path.join(temp_dir, "eval.config")
        eval_config = eval_pb2.EvalConfig()
        eval_config.num_examples = 20
        _write_config(eval_config, eval_config_path)

        # Write eval input config file.
        eval_input_config_path = os.path.join(temp_dir, "eval_input.config")
        eval_input_config = input_reader_pb2.InputReader()
        eval_input_config.label_map_path = "path/to/another/label_map"
        _write_config(eval_input_config, eval_input_config_path)

        configs = config_util.get_configs_from_multiple_files(
            model_config_path=model_config_path,
            train_config_path=train_config_path,
            train_input_config_path=train_input_config_path,
            eval_config_path=eval_config_path,
            eval_input_config_path=eval_input_config_path)

        self.assertProtoEquals(model, configs["model"])
        self.assertProtoEquals(train_config, configs["train_config"])
        self.assertProtoEquals(train_input_config,
                               configs["train_input_config"])
        self.assertProtoEquals(eval_config, configs["eval_config"])
        self.assertProtoEquals(eval_input_config, configs["eval_input_config"])
    def test_get_evaluator_with_evaluator_options(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend(['coco_detection_metrics'])
        eval_config.include_metrics_per_category = True
        categories = self._get_categories_list()

        evaluator_options = eval_util.evaluator_options_from_eval_config(
            eval_config)
        evaluator = eval_util.get_evaluators(eval_config, categories,
                                             evaluator_options)

        self.assertTrue(evaluator[0]._include_metrics_per_category)
    def test_get_eval_metric_ops_for_evaluators(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend(
            ['coco_detection_metrics', 'coco_mask_metrics'])
        eval_config.include_metrics_per_category = True

        evaluator_options = eval_util.evaluator_options_from_eval_config(
            eval_config)
        self.assertTrue(evaluator_options['coco_detection_metrics']
                        ['include_metrics_per_category'])
        self.assertTrue(evaluator_options['coco_mask_metrics']
                        ['include_metrics_per_category'])
Пример #12
0
  def test_get_evaluator_with_unmatched_label(self):
    eval_config = eval_pb2.EvalConfig()
    person_keypoints_metric = eval_config.parameterized_metric.add()
    person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched'
    person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
        'kpt'] = 0.1
    categories = self._get_categories_list_with_keypoints()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)
    self.assertLen(evaluator, 1)
    self.assertNotIsInstance(
        evaluator[0], coco_evaluation.CocoKeypointEvaluator)
Пример #13
0
 def test_evaluator_options_from_eval_config_no_super_categories(self):
   eval_config_text_proto = """
     metrics_set: "coco_detection_metrics"
     metrics_set: "coco_mask_metrics"
     include_metrics_per_category: true
     use_moving_averages: false
     batch_size: 1;
   """
   eval_config = eval_pb2.EvalConfig()
   text_format.Merge(eval_config_text_proto, eval_config)
   evaluator_options = eval_util.evaluator_options_from_eval_config(
       eval_config)
   self.assertNotIn('super_categories', evaluator_options['coco_mask_metrics'])
    def test_get_evaluator_with_no_evaluator_options(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend(['coco_detection_metrics'])
        eval_config.include_metrics_per_category = True
        categories = self._get_categories_list()

        evaluator = eval_util.get_evaluators(eval_config,
                                             categories,
                                             evaluator_options=None)

        # Even though we are setting eval_config.include_metrics_per_category = True
        # this option is never passed into the DetectionEvaluator constructor (via
        # `evaluator_options`).
        self.assertFalse(evaluator[0]._include_metrics_per_category)
Пример #15
0
  def test_get_evaluator_with_no_evaluator_options(self):
    eval_config = eval_pb2.EvalConfig()
    eval_config.metrics_set.extend(
        ['coco_detection_metrics', 'precision_at_recall_detection_metrics'])
    eval_config.include_metrics_per_category = True
    eval_config.recall_lower_bound = 0.2
    eval_config.recall_upper_bound = 0.6
    categories = self._get_categories_list()

    evaluator = eval_util.get_evaluators(
        eval_config, categories, evaluator_options=None)

    # Even though we are setting eval_config.include_metrics_per_category = True
    # and bounds on recall, these options are never passed into the
    # DetectionEvaluator constructor (via `evaluator_options`).
    self.assertFalse(evaluator[0]._include_metrics_per_category)
    self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0)
    self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0)
    def test_get_eval_metric_ops_for_coco_detections(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend(['coco_detection_metrics'])
        categories = self._get_categories_list()
        eval_dict = self._make_evaluation_dict()
        metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
            eval_config, categories, eval_dict)
        _, update_op = metric_ops['DetectionBoxes_Precision/mAP']

        with self.test_session() as sess:
            metrics = {}
            for key, (value_op, _) in metric_ops.iteritems():
                metrics[key] = value_op
            sess.run(update_op)
            metrics = sess.run(metrics)
            print(metrics)
            self.assertAlmostEqual(1.0,
                                   metrics['DetectionBoxes_Precision/mAP'])
            self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
Пример #17
0
    def test_get_evaluator_with_evaluator_options(self):
        eval_config = eval_pb2.EvalConfig()
        eval_config.metrics_set.extend([
            'coco_detection_metrics', 'precision_at_recall_detection_metrics'
        ])
        eval_config.include_metrics_per_category = True
        eval_config.recall_lower_bound = 0.2
        eval_config.recall_upper_bound = 0.6
        categories = self._get_categories_list()

        evaluator_options = eval_util.evaluator_options_from_eval_config(
            eval_config)
        evaluator = eval_util.get_evaluators(eval_config, categories,
                                             evaluator_options)

        self.assertTrue(evaluator[0]._include_metrics_per_category)
        self.assertAlmostEqual(evaluator[1]._recall_lower_bound,
                               eval_config.recall_lower_bound)
        self.assertAlmostEqual(evaluator[1]._recall_upper_bound,
                               eval_config.recall_upper_bound)
    with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile:
        metrics_writer = csv.writer(csvfile, delimiter=',')
        for metric_name, metric_value in metrics.items():
            metrics_writer.writerow([metric_name, str(metric_value)])


# FILENAME = "camera_data/training/training.record-00000-of-00075"
FILENAME = "../../old/predictions_v0/initial_crop_size_28/validation_detections.tfrecord-00000-of-00001"
data_parser = tf_example_parser.TfExampleDetectionAndGTParser()
dataset = tf.data.TFRecordDataset(FILENAME, compression_type='')

# serialized_example = next(iter(dataset))
categories = label_map_util.create_categories_from_labelmap(
    "../../data/camera_data/label_map.pbtxt")

eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
# Per category metrics not working
# eval_config.include_metrics_per_category = True

evaluator_options = evaluator_options_from_eval_config(eval_config)
object_detection_evaluators = get_evaluators(eval_config, categories,
                                             evaluator_options)
object_detection_evaluator = object_detection_evaluators[0]


def scale_boxes_to_absolute_coordinates(decoded_dict):
    def _scale_box_to_absolute(args):
        boxes, height, width = args
        return box_list_ops.to_absolute_coordinates(box_list.BoxList(boxes),
                                                    height, width).get()