def test_export_graph_with_fixed_size_image_tensor_input(self):
    input_shape = [1, 320, 320, 3]

    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(
        trained_checkpoint_prefix, use_moving_averages=False)
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      output_directory = os.path.join(tmp_dir, 'output')
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory,
          input_shape=input_shape)
      saved_model_path = os.path.join(output_directory, 'saved_model')
      self.assertTrue(
          os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))

    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        meta_graph = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
        self.assertSequenceEqual(image_tensor.get_shape().as_list(),
                                 input_shape)
Ejemplo n.º 2
0
 def test_export_model_with_detection_only_nodes(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=True)
   output_directory = os.path.join(tmp_dir, 'output')
   inference_graph_path = os.path.join(output_directory,
                                       'frozen_inference_graph.pb')
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel(add_detection_masks=False)
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     exporter.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
   inference_graph = self._load_inference_graph(inference_graph_path)
   with self.test_session(graph=inference_graph):
     inference_graph.get_tensor_by_name('image_tensor:0')
     inference_graph.get_tensor_by_name('detection_boxes:0')
     inference_graph.get_tensor_by_name('detection_scores:0')
     inference_graph.get_tensor_by_name('detection_classes:0')
     inference_graph.get_tensor_by_name('num_detections:0')
     with self.assertRaises(KeyError):
       inference_graph.get_tensor_by_name('detection_masks:0')
Ejemplo n.º 3
0
def main(_):
    # Specify which gpu to be used
    # os.environ["CUDA_VISIBLE_DEVICES"] = '1'

    cls_model = resnet_model.Model(resnet_size=50,
                                   bottleneck=True,
                                   num_classes=26,
                                   num_filters=64,
                                   kernel_size=7,
                                   conv_stride=2,
                                   first_pool_size=3,
                                   first_pool_stride=2,
                                   block_sizes=[3, 4, 6, 3],
                                   block_strides=[1, 2, 2, 2],
                                   resnet_version=resnet_model.DEFAULT_VERSION,
                                   data_format='channels_first',
                                   dtype=resnet_model.DEFAULT_DTYPE)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != -1 else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = [None, None, None, 3]
    exporter.export_inference_graph(FLAGS.input_type, cls_model,
                                    FLAGS.trained_checkpoint_prefix,
                                    FLAGS.output_directory, input_shape)
  def test_export_saved_model_and_run_inference(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=False)
    output_directory = os.path.join(tmp_dir, 'output')
    saved_model_path = os.path.join(output_directory, 'saved_model')

    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='tf_example',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

    tf_example_np = np.hstack([self._create_tf_example(
        np.ones((4, 4, 3)).astype(np.uint8))] * 2)
    with tf.Graph().as_default() as od_graph:
      with self.test_session(graph=od_graph) as sess:
        meta_graph = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)

        signature = meta_graph.signature_def['serving_default']
        input_tensor_name = signature.inputs['inputs'].name
        tf_example = od_graph.get_tensor_by_name(input_tensor_name)

        boxes = od_graph.get_tensor_by_name(
            signature.outputs['detection_boxes'].name)
        scores = od_graph.get_tensor_by_name(
            signature.outputs['detection_scores'].name)
        classes = od_graph.get_tensor_by_name(
            signature.outputs['detection_classes'].name)
        masks = od_graph.get_tensor_by_name(
            signature.outputs['detection_masks'].name)
        num_detections = od_graph.get_tensor_by_name(
            signature.outputs['num_detections'].name)

        (boxes_np, scores_np, classes_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, masks, num_detections],
             feed_dict={tf_example: tf_example_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
def main(_):
    cls_model = model.Model(is_training=False)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != -1 else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = [None, 28, 28, 3]
    exporter.export_inference_graph(FLAGS.input_type, cls_model,
                                    FLAGS.trained_checkpoint_prefix,
                                    FLAGS.output_directory, input_shape)
Ejemplo n.º 6
0
def main(_):
    assert FLAGS.pipeline_config_path, '`pipeline_config_path` is missing'
    assert FLAGS.trained_checkpoint_prefix, (
        '`trained_checkpoint_prefix` is missing')
    assert FLAGS.output_directory, '`output_directory` is missing'

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    exporter.export_inference_graph(FLAGS.input_type, pipeline_config,
                                    FLAGS.trained_checkpoint_prefix,
                                    FLAGS.output_directory)
Ejemplo n.º 7
0
def main(_):
    cls_model = model.Model(is_training=False, num_classes=CLASS_NUM)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != -1 else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = [None, INPUT_HEIGHT, INPUT_WIDTH, INPUT_CHANNEL]
    exporter.export_inference_graph(FLAGS.input_type, cls_model,
                                    FLAGS.checkpoint_prefix, FLAGS.output_dir,
                                    input_shape)
Ejemplo n.º 8
0
def main(_):
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = None
    exporter.export_inference_graph(FLAGS.input_type, pipeline_config,
                                    FLAGS.trained_checkpoint_prefix,
                                    FLAGS.output_directory, input_shape)
def main(_):
    # Specify which gpu to be used
    os.environ["CUDA_VISIBLE_DEVICES"] = '1'

    cls_model = model.Model(is_training=False)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != -1 else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = [None, None, None, 3]
    exporter.export_inference_graph(FLAGS.input_type, cls_model,
                                    FLAGS.trained_checkpoint_prefix,
                                    FLAGS.output_directory, input_shape)
Ejemplo n.º 10
0
  def test_export_and_run_inference_with_encoded_image_string_tensor(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    inference_graph_path = os.path.join(output_directory,
                                        'frozen_inference_graph.pb')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel(add_detection_masks=True)
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      pipeline_config.eval_config.use_moving_averages = False
      exporter.export_inference_graph(
          input_type='encoded_image_string_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)

    inference_graph = self._load_inference_graph(inference_graph_path)
    jpg_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
    png_image_str = self._create_encoded_image_string(
        np.ones((4, 4, 3)).astype(np.uint8), 'png')
    with self.test_session(graph=inference_graph) as sess:
      image_str_tensor = inference_graph.get_tensor_by_name(
          'encoded_image_string_tensor:0')
      boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
      scores = inference_graph.get_tensor_by_name('detection_scores:0')
      classes = inference_graph.get_tensor_by_name('detection_classes:0')
      masks = inference_graph.get_tensor_by_name('detection_masks:0')
      num_detections = inference_graph.get_tensor_by_name('num_detections:0')
      for image_str in [jpg_image_str, png_image_str]:
        image_str_batch_np = np.hstack([image_str]* 2)
        (boxes_np, scores_np, classes_np, masks_np,
         num_detections_np) = sess.run(
             [boxes, scores, classes, masks, num_detections],
             feed_dict={image_str_tensor: image_str_batch_np})
        self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
                                        [0.5, 0.5, 0.8, 0.8]],
                                       [[0.5, 0.5, 1.0, 1.0],
                                        [0.0, 0.0, 0.0, 0.0]]])
        self.assertAllClose(scores_np, [[0.7, 0.6],
                                        [0.9, 0.0]])
        self.assertAllClose(classes_np, [[1, 2],
                                         [2, 1]])
        self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
        self.assertAllClose(num_detections_np, [2, 1])
Ejemplo n.º 11
0
 def test_export_graph_with_moving_averages(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=True)
   output_directory = os.path.join(tmp_dir, 'output')
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.eval_config.use_moving_averages = True
     exporter.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
def main(_):
    if FLAGS.input_type not in input_type:
        raise ValueError("The input_type must be one from 'image_tensor' or 'inputs':{}".format(FLAGS.input_type))
    if FLAGS.data_type not in types:
        raise ValueError("The data_type must be one from 'uint' or float:{}".format(FLAGS.data_type))
    
    if not os.path.exists(FLAGS.pipline_config_path):
        raise FileNotFoundError("The file {} is not found".format(FLAGS.pipline_config_path))
    
    if not os.path.exists(FLAGS.output_dict):
        os.mkdir(FLAGS.output_dict)
        print("Create model's save output path {}".format(FLAGS.output_dict))
    
    
    exporter.export_inference_graph(input_type=FLAGS.input_type,config=FLAGS.pipline_config_path,
                                    trained_checkpoint_prefix=FLAGS.trained_checkpoint_prefix,output_directory=FLAGS.output_dict)
 def test_export_graph_with_tf_example_input(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=False)
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     output_directory = os.path.join(tmp_dir, 'output')
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.eval_config.use_moving_averages = False
     exporter.export_inference_graph(
         input_type='tf_example',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'saved_model.pb')))
Ejemplo n.º 14
0
    def test_raise_runtime_error_on_images_with_different_sizes(self):
        tmp_dir = self.get_temp_dir()
        trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
        self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                              use_moving_averages=True)
        output_directory = os.path.join(tmp_dir, 'output')
        inference_graph_path = os.path.join(output_directory,
                                            'frozen_inference_graph.pb')
        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = FakeModel(add_detection_masks=True)
            pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
            pipeline_config.eval_config.use_moving_averages = False
            exporter.export_inference_graph(
                input_type='encoded_image_string_tensor',
                pipeline_config=pipeline_config,
                trained_checkpoint_prefix=trained_checkpoint_prefix,
                output_directory=output_directory)

        inference_graph = self._load_inference_graph(inference_graph_path)
        large_image = self._create_encoded_image_string(
            np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
        small_image = self._create_encoded_image_string(
            np.ones((2, 2, 3)).astype(np.uint8), 'jpg')

        image_str_batch_np = np.hstack([large_image, small_image])
        with self.test_session(graph=inference_graph) as sess:
            image_str_tensor = inference_graph.get_tensor_by_name(
                'encoded_image_string_tensor:0')
            boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
            scores = inference_graph.get_tensor_by_name('detection_scores:0')
            classes = inference_graph.get_tensor_by_name('detection_classes:0')
            masks = inference_graph.get_tensor_by_name('detection_masks:0')
            num_detections = inference_graph.get_tensor_by_name(
                'num_detections:0')
            with self.assertRaisesRegexp(
                    tf.errors.InvalidArgumentError,
                    '^TensorArray has inconsistent shapes.'):
                sess.run([boxes, scores, classes, masks, num_detections],
                         feed_dict={image_str_tensor: image_str_batch_np})
 def test_export_graph_with_moving_averages(self):
   tmp_dir = self.get_temp_dir()
   trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
   self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                         use_moving_averages=True)
   output_directory = os.path.join(tmp_dir, 'output')
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.eval_config.use_moving_averages = True
     exporter.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=trained_checkpoint_prefix,
         output_directory=output_directory)
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'saved_model.pb')))
   expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step'])
   actual_variables = set(
       [var_name for var_name, _ in tf.train.list_variables(output_directory)])
   self.assertTrue(expected_variables.issubset(actual_variables))
def main(_):
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(FLAGS.config_override, pipeline_config)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        input_shape = None
    if FLAGS.use_side_inputs:
        side_input_shapes, side_input_names, side_input_types = (
            exporter.parse_side_inputs(FLAGS.side_input_shapes,
                                       FLAGS.side_input_names,
                                       FLAGS.side_input_types))
    else:
        side_input_shapes = None
        side_input_names = None
        side_input_types = None
    if FLAGS.additional_output_tensor_names:
        additional_output_tensor_names = list(
            FLAGS.additional_output_tensor_names.split(','))
    else:
        additional_output_tensor_names = None
    exporter.export_inference_graph(
        FLAGS.input_type,
        pipeline_config,
        FLAGS.trained_checkpoint_prefix,
        FLAGS.output_directory,
        input_shape=input_shape,
        write_inference_graph=FLAGS.write_inference_graph,
        additional_output_tensor_names=additional_output_tensor_names,
        use_side_inputs=FLAGS.use_side_inputs,
        side_input_shapes=side_input_shapes,
        side_input_names=side_input_names,
        side_input_types=side_input_types)
def main(_):
    # 选择要使用的硬件
    # os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    original_image_size = (480, 640)
    model_image_size = (240, 320)
    class_model = model.Model(num_classes=10,
                              is_training=False,
                              fixed_resize_side_min=300,
                              default_image_height=model_image_size[0],
                              default_image_width=model_image_size[1])
    input_shape = [None, None, None, 3]

    input_type = 'image_tensor'
    ckpt_name = 'model.ckpt-3901'
    ckpt_name = 'model.ckpt-29878'
    out_dir = r"E:\tmp\data\state-farm-distracted-driver-detection\output"
    logs_dir = os.path.join(out_dir, "logs")
    trained_checkpoint_prefix = os.path.join(logs_dir, ckpt_name)
    exporter.export_inference_graph(input_type,
                                    class_model,
                                    trained_checkpoint_prefix,
                                    output_directory=out_dir,
                                    input_shape=input_shape)
  def test_export_graph_saves_pipeline_file(self):
    tmp_dir = self.get_temp_dir()
    trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
    self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
                                          use_moving_averages=True)
    output_directory = os.path.join(tmp_dir, 'output')
    with mock.patch.object(
        model_builder, 'build', autospec=True) as mock_builder:
      mock_builder.return_value = FakeModel()
      pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      exporter.export_inference_graph(
          input_type='image_tensor',
          pipeline_config=pipeline_config,
          trained_checkpoint_prefix=trained_checkpoint_prefix,
          output_directory=output_directory)
      expected_pipeline_path = os.path.join(
          output_directory, 'pipeline.config')
      self.assertTrue(os.path.exists(expected_pipeline_path))

      written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
      with tf.gfile.GFile(expected_pipeline_path, 'r') as f:
        proto_str = f.read()
        text_format.Merge(proto_str, written_pipeline_config)
        self.assertProtoEquals(pipeline_config, written_pipeline_config)