def test_get_configs_from_pipeline_file(self):
        """Test that proto configs can be read from pipeline config file."""
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.model.faster_rcnn.num_classes = 10
        pipeline_config.train_config.batch_size = 32
        pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
        pipeline_config.eval_config.num_examples = 20
        pipeline_config.eval_input_reader.add().queue_capacity = 100

        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        self.assertProtoEquals(pipeline_config.model, configs["model"])
        self.assertProtoEquals(pipeline_config.train_config,
                               configs["train_config"])
        self.assertProtoEquals(pipeline_config.train_input_reader,
                               configs["train_input_config"])
        self.assertProtoEquals(pipeline_config.eval_config,
                               configs["eval_config"])
        self.assertProtoEquals(pipeline_config.eval_input_reader,
                               configs["eval_input_configs"])
    def testNewFocalLossParameters(self):
        """Tests that the loss weight ratio is updated appropriately."""
        original_alpha = 1.0
        original_gamma = 1.0
        new_alpha = 0.3
        new_gamma = 2.0
        hparams = tf.contrib.training.HParams(focal_loss_alpha=new_alpha,
                                              focal_loss_gamma=new_gamma)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        classification_loss = pipeline_config.model.ssd.loss.classification_loss
        classification_loss.weighted_sigmoid_focal.alpha = original_alpha
        classification_loss.weighted_sigmoid_focal.gamma = original_gamma
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        classification_loss = configs["model"].ssd.loss.classification_loss
        self.assertAlmostEqual(
            new_alpha, classification_loss.weighted_sigmoid_focal.alpha)
        self.assertAlmostEqual(
            new_gamma, classification_loss.weighted_sigmoid_focal.gamma)
  def test_write_graph_and_checkpoint(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
    output_directory = os.path.join(tmp_dir, 'output')
    model_path = os.path.join(output_directory, 'model.ckpt')
    meta_graph_path = model_path + '.meta'
    tf.gfile.MakeDirs(output_directory)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(
          add_detection_keypoints=True, add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      detection_model = model_builder.build(pipeline_config.model,
                                            is_training=False)
      exporter._build_detection_graph(
          input_type='tf_example',
          detection_model=detection_model,
          input_shape=None,
          output_collection_name='inference_op',
          graph_hook_fn=None)
      saver = tf.train.Saver()
      input_saver_def = saver.as_saver_def()
      exporter.write_graph_and_checkpoint(
          inference_graph_def=tf.get_default_graph().as_graph_def(),
          model_path=model_path,
          input_saver_def=input_saver_def,
          trained_checkpoint_prefix=trained_checkpoint_prefix)

    tf_example_np = np.hstack([self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8))] * 2)
    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        new_saver = tf.train.import_meta_graph(meta_graph_path)
        new_saver.restore(sess, model_path)

        tf_example = od_graph.get_tensor_by_name('tf_example:0')
        boxes = od_graph.get_tensor_by_name('detection_boxes:0')
        scores = od_graph.get_tensor_by_name('detection_scores:0')
        classes = od_graph.get_tensor_by_name('detection_classes:0')
        keypoints = od_graph.get_tensor_by_name('detection_keypoints:0')
        masks = od_graph.get_tensor_by_name('detection_masks:0')
        num_detections = od_graph.get_tensor_by_name('num_detections:0')
        (boxes_np, scores_np, classes_np, keypoints_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, keypoints, masks, num_detections],
             feed_dict={tf_example: tf_example_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
    def testUpdateMaskTypeForAllInputConfigs(self):
        original_mask_type = input_reader_pb2.NUMERICAL_MASKS
        new_mask_type = input_reader_pb2.PNG_MASKS

        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        train_config = pipeline_config.train_input_reader
        train_config.mask_type = original_mask_type
        eval_1 = pipeline_config.eval_input_reader.add()
        eval_1.mask_type = original_mask_type
        eval_1.name = "eval_1"
        eval_2 = pipeline_config.eval_input_reader.add()
        eval_2.mask_type = original_mask_type
        eval_2.name = "eval_2"
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        override_dict = {"mask_type": new_mask_type}
        configs = config_util.merge_external_params_with_configs(
            configs, kwargs_dict=override_dict)

        self.assertEqual(configs["train_input_config"].mask_type,
                         new_mask_type)
        for eval_input_config in configs["eval_input_configs"]:
            self.assertEqual(eval_input_config.mask_type, new_mask_type)
 def test_export_model_with_detection_only_nodes(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=True)
   output_directory = os.path.join(tmp_dir, 'output')
   inference_graph_path = os.path.join(output_directory,
                                       'frozen_inference_graph.pb')
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel(add_detection_masks=False)
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     exporter.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
   inference_graph = self._load_inference_graph(inference_graph_path)
   with self.test_session(graph=inference_graph):
     inference_graph.get_tensor_by_name('image_tensor:0')
     inference_graph.get_tensor_by_name('detection_boxes:0')
     inference_graph.get_tensor_by_name('detection_scores:0')
     inference_graph.get_tensor_by_name('detection_classes:0')
     inference_graph.get_tensor_by_name('num_detections:0')
     with self.assertRaises(KeyError):
       inference_graph.get_tensor_by_name('detection_keypoints:0')
       inference_graph.get_tensor_by_name('detection_masks:0')
 def test_export_model_with_quantization_nodes(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(
       trained_checkpoint_prefix,
       use_moving_averages=False,
       enable_quantization=True)
   output_directory = os.path.join(tmp_dir, 'output')
   inference_graph_path = os.path.join(output_directory,
                                       'inference_graph.pbtxt')
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     text_format.Merge(
         """graph_rewriter {
              quantization {
                delay: 50000
                activation_bits: 8
                weight_bits: 8
              }
            }""", pipeline_config)
     exporter.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory,
         write_inference_graph=True)
   self._load_inference_graph(inference_graph_path, is_binary=False)
   has_quant_nodes = False
   for v in tf.global_variables():
     if v.op.name.endswith('act_quant/min'):
       has_quant_nodes = True
       break
   self.assertTrue(has_quant_nodes)
  def test_export_graph_with_fixed_size_image_tensor_input(self):
    input_shape = [1, 320, 320, 3]

    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(
        trained_checkpoint_prefix, use_moving_averages=False)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      output_directory = os.path.join(tmp_dir, 'output')
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory,
          input_shape=input_shape)
      saved_model_path = os.path.join(output_directory, 'saved_model')
      self.assertTrue(
          os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))

    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        meta_graph = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
        self.assertSequenceEqual(image_tensor.get_shape().as_list(),
                                 input_shape)
  def test_export_saved_model_and_run_inference(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
    output_directory = os.path.join(tmp_dir, 'output')
    saved_model_path = os.path.join(output_directory, 'saved_model')

    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(
          add_detection_keypoints=True, add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

    tf_example_np = np.hstack([self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8))] * 2)
    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        meta_graph = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)

        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        tf_example = od_graph.get_tensor_by_name(input_tensor_name)

        boxes = od_graph.get_tensor_by_name(
            signature.outputs['detection_boxes'].name)
        scores = od_graph.get_tensor_by_name(
            signature.outputs['detection_scores'].name)
        classes = od_graph.get_tensor_by_name(
            signature.outputs['detection_classes'].name)
        keypoints = od_graph.get_tensor_by_name(
            signature.outputs['detection_keypoints'].name)
        masks = od_graph.get_tensor_by_name(
            signature.outputs['detection_masks'].name)
        num_detections = od_graph.get_tensor_by_name(
            signature.outputs['num_detections'].name)

        (boxes_np, scores_np, classes_np, keypoints_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, keypoints, masks, num_detections],
             feed_dict={tf_example: tf_example_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
Beispiel #9
0
 def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get):
   pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
   tflite_graph_file = self._export_graph_with_postprocessing_op(
       pipeline_config)
   self.assertTrue(os.path.exists(tflite_graph_file))
   mock_get.assert_not_called()
 def testKeyValueOverrideBadKey(self):
     """Tests that overwriting with a bad key causes an exception."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     configs = self._create_and_load_test_configs(pipeline_config)
     hparams = tf.contrib.training.HParams(
         **{"train_config.no_such_field": 10})
     with self.assertRaises(ValueError):
         config_util.merge_external_params_with_configs(configs, hparams)
    def testCheckAndParseInputConfigKey(self):
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.eval_input_reader.add().name = "eval_1"
        pipeline_config.eval_input_reader.add().name = "eval_2"
        _write_config(pipeline_config, pipeline_config_path)
        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)

        specific_shuffle_update_key = "eval_input_configs:eval_2:shuffle"
        is_valid_input_config_key, key_name, input_name, field_name = (
            config_util.check_and_parse_input_config_key(
                configs, specific_shuffle_update_key))
        self.assertTrue(is_valid_input_config_key)
        self.assertEqual(key_name, "eval_input_configs")
        self.assertEqual(input_name, "eval_2")
        self.assertEqual(field_name, "shuffle")

        legacy_shuffle_update_key = "eval_shuffle"
        is_valid_input_config_key, key_name, input_name, field_name = (
            config_util.check_and_parse_input_config_key(
                configs, legacy_shuffle_update_key))
        self.assertTrue(is_valid_input_config_key)
        self.assertEqual(key_name, "eval_input_configs")
        self.assertEqual(input_name, None)
        self.assertEqual(field_name, "shuffle")

        non_input_config_update_key = "label_map_path"
        is_valid_input_config_key, key_name, input_name, field_name = (
            config_util.check_and_parse_input_config_key(
                configs, non_input_config_update_key))
        self.assertFalse(is_valid_input_config_key)
        self.assertEqual(key_name, None)
        self.assertEqual(input_name, None)
        self.assertEqual(field_name, "label_map_path")

        with self.assertRaisesRegexp(
                ValueError, "Invalid key format when overriding configs."):
            config_util.check_and_parse_input_config_key(
                configs, "train_input_config:shuffle")

        with self.assertRaisesRegexp(
                ValueError, "Invalid key_name when overriding input config."):
            config_util.check_and_parse_input_config_key(
                configs, "invalid_key_name:train_name:shuffle")

        with self.assertRaisesRegexp(
                ValueError,
                "Invalid input_name when overriding input config."):
            config_util.check_and_parse_input_config_key(
                configs, "eval_input_configs:unknown_eval_name:shuffle")

        with self.assertRaisesRegexp(
                ValueError,
                "Invalid field_name when overriding input config."):
            config_util.check_and_parse_input_config_key(
                configs, "eval_input_configs:eval_2:unknown_field_name")
Beispiel #12
0
 def test_export_with_nn_resize_op_called_with_fpn(self, mock_get):
   pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
   pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3
   pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7
   tflite_graph_file = self._export_graph_with_postprocessing_op(
       pipeline_config)
   self.assertTrue(os.path.exists(tflite_graph_file))
   mock_get.assert_called_once()
 def testOverwriteBatchSizeWithBadValueType(self):
     """Tests that overwriting with a bad valuye type causes an exception."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.train_config.batch_size = 2
     configs = self._create_and_load_test_configs(pipeline_config)
     # Type should be an integer, but we're passing a string "10".
     hparams = tf.contrib.training.HParams(
         **{"train_config.batch_size": "10"})
     with self.assertRaises(TypeError):
         config_util.merge_external_params_with_configs(configs, hparams)
 def testOverwriteBatchSizeWithKeyValue(self):
     """Tests that batch size is overwritten based on key/value."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.train_config.batch_size = 2
     configs = self._create_and_load_test_configs(pipeline_config)
     hparams = tf.contrib.training.HParams(
         **{"train_config.batch_size": 10})
     configs = config_util.merge_external_params_with_configs(
         configs, hparams)
     new_batch_size = configs["train_config"].batch_size
     self.assertEqual(10, new_batch_size)
    def testGetNumberOfClasses(self):
        """Tests that number of classes can be retrieved."""
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.model.faster_rcnn.num_classes = 20
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        number_of_classes = config_util.get_number_of_classes(configs["model"])
        self.assertEqual(20, number_of_classes)
  def test_export_and_run_inference_with_encoded_image_string_tensor(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(
          add_detection_keypoints=True, add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='encoded_image_string_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

    inference_graph = self._load_inference_graph(inference_graph_path)
    jpg_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
    png_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'png')
    with self.test_session(graph=inference_graph) as sess:
      image_str_tensor = inference_graph.get_tensor_by_name(
          'encoded_image_string_tensor:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
      keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
      for image_str in [jpg_image_str, png_image_str]:
        image_str_batch_np = np.hstack([image_str]* 2)
        (boxes_np, scores_np, classes_np, keypoints_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, keypoints, masks, num_detections],
             feed_dict={image_str_tensor: image_str_batch_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
    def testUseMovingAverageForEval(self):
        use_moving_averages_orig = False
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        override_dict = {"eval_with_moving_averages": True}
        configs = config_util.merge_external_params_with_configs(
            configs, kwargs_dict=override_dict)
        self.assertEqual(True, configs["eval_config"].use_moving_averages)
  def test_export_graph_saves_pipeline_file(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
      expected_pipeline_path = os.path.join(
          output_directory, 'pipeline.config')
      self.assertTrue(os.path.exists(expected_pipeline_path))

      written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      with tf.gfile.GFile(expected_pipeline_path, 'r') as f:
        proto_str = f.read()
        text_format.Merge(proto_str, written_pipeline_config)
        self.assertProtoEquals(pipeline_config, written_pipeline_config)
    def test_save_pipeline_config(self):
        """Tests that the pipeline config is properly saved to disk."""
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.model.faster_rcnn.num_classes = 10
        pipeline_config.train_config.batch_size = 32
        pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
        pipeline_config.eval_config.num_examples = 20
        pipeline_config.eval_input_reader.add().queue_capacity = 100

        config_util.save_pipeline_config(pipeline_config, self.get_temp_dir())
        configs = config_util.get_configs_from_pipeline_file(
            os.path.join(self.get_temp_dir(), "pipeline.config"))
        pipeline_config_reconstructed = (
            config_util.create_pipeline_proto_from_configs(configs))

        self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def main(argv):
    del argv  # Unused.
    flags.mark_flag_as_required('output_directory')
    flags.mark_flag_as_required('pipeline_config_path')
    flags.mark_flag_as_required('trained_checkpoint_prefix')

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()

    with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(FLAGS.config_override, pipeline_config)
    export_tflite_ssd_graph_lib.export_tflite_graph(
        pipeline_config, FLAGS.trained_checkpoint_prefix,
        FLAGS.output_directory, FLAGS.add_postprocessing_op,
        FLAGS.max_detections, FLAGS.max_classes_per_detection,
        FLAGS.use_regular_nms)
Beispiel #21
0
def main(_):
  pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
  with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
    text_format.Merge(f.read(), pipeline_config)
  text_format.Merge(FLAGS.config_override, pipeline_config)
  if FLAGS.input_shape:
    input_shape = [
        int(dim) if dim != '-1' else None
        for dim in FLAGS.input_shape.split(',')
    ]
  else:
    input_shape = None
  exporter.export_inference_graph(
      FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix,
      FLAGS.output_directory, input_shape=input_shape,
      write_inference_graph=FLAGS.write_inference_graph)
Beispiel #22
0
 def test_export_tflite_graph_without_moving_averages(self):
   pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
   pipeline_config.eval_config.use_moving_averages = False
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
   pipeline_config.model.ssd.num_classes = 2
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
   tflite_graph_file = self._export_graph(pipeline_config)
   self.assertTrue(os.path.exists(tflite_graph_file))
   (box_encodings_np, class_predictions_np
   ) = self._import_graph_and_run_inference(tflite_graph_file)
   self.assertAllClose(box_encodings_np,
                       [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
   self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
    def testNewBatchSize(self):
        """Tests that batch size is updated appropriately."""
        original_batch_size = 2
        hparams = tf.contrib.training.HParams(batch_size=16)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.batch_size = original_batch_size
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        new_batch_size = configs["train_config"].batch_size
        self.assertEqual(16, new_batch_size)
    def testNewBatchSizeWithClipping(self):
        """Tests that batch size is clipped to 1 from below."""
        original_batch_size = 2
        hparams = tf.contrib.training.HParams(batch_size=0.5)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.batch_size = original_batch_size
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        new_batch_size = configs["train_config"].batch_size
        self.assertEqual(1, new_batch_size)  # Clipped to 1.0.
 def test_export_graph_with_encoded_image_string_input(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=False)
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     output_directory = os.path.join(tmp_dir, 'output')
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.eval_config.use_moving_averages = False
     exporter.export_inference_graph(
         input_type='encoded_image_string_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'saved_model.pb')))
    def test_create_pipeline_proto_from_configs(self):
        """Tests that proto can be reconstructed from configs dictionary."""
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.model.faster_rcnn.num_classes = 10
        pipeline_config.train_config.batch_size = 32
        pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
        pipeline_config.eval_config.num_examples = 20
        pipeline_config.eval_input_reader.add().queue_capacity = 100
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        pipeline_config_reconstructed = (
            config_util.create_pipeline_proto_from_configs(configs))
        self.assertEqual(pipeline_config, pipeline_config_reconstructed)
    def testTrainShuffle(self):
        """Tests that `train_shuffle` keyword arguments are applied correctly."""
        original_shuffle = True
        desired_shuffle = False

        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_input_reader.shuffle = original_shuffle
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        override_dict = {"train_shuffle": desired_shuffle}
        configs = config_util.merge_external_params_with_configs(
            configs, kwargs_dict=override_dict)
        train_shuffle = configs["train_input_config"].shuffle
        self.assertEqual(desired_shuffle, train_shuffle)
    def testMergingKeywordArguments(self):
        """Tests that keyword arguments get merged as do hyperparameters."""
        original_num_train_steps = 100
        desired_num_train_steps = 10
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.num_steps = original_num_train_steps
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        override_dict = {"train_steps": desired_num_train_steps}
        configs = config_util.merge_external_params_with_configs(
            configs, kwargs_dict=override_dict)
        train_steps = configs["train_config"].num_steps
        self.assertEqual(desired_num_train_steps, train_steps)
    def testNewMomentumOptimizerValue(self):
        """Tests that new momentum value is updated appropriately."""
        original_momentum_value = 0.4
        hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
        optimizer_config.momentum_optimizer_value = original_momentum_value
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
        new_momentum_value = optimizer_config.momentum_optimizer_value
        self.assertAlmostEqual(1.0, new_momentum_value)  # Clipped to 1.0.
Beispiel #30
0
 def test_export_tflite_graph_with_softmax_score_conversion(self):
   pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
   pipeline_config.eval_config.use_moving_averages = False
   pipeline_config.model.ssd.post_processing.score_converter = (
       post_processing_pb2.PostProcessing.SOFTMAX)
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
   pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
   pipeline_config.model.ssd.num_classes = 2
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
   pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
   tflite_graph_file = self._export_graph(pipeline_config)
   self.assertTrue(os.path.exists(tflite_graph_file))
   (box_encodings_np, class_predictions_np
   ) = self._import_graph_and_run_inference(tflite_graph_file)
   self.assertAllClose(box_encodings_np,
                       [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
   self.assertAllClose(class_predictions_np,
                       [[[0.524979, 0.475021], [0.710949, 0.28905]]])