def freeze_model():
    model_path_suffix = os.path.join(FLAGS.network_def,
                                     'input_{}_output_{}'.format(FLAGS.input_size, FLAGS.heatmap_size),
                                     'joints_{}'.format(FLAGS.num_of_joints),
                                     'stages_{}'.format(FLAGS.cpm_stages),
                                     'init_{}_rate_{}_step_{}'.format(FLAGS.init_lr, FLAGS.lr_decay_rate,
                                                                      FLAGS.lr_decay_step)
                                     )
    model_save_dir = os.path.join('models',
                                  'weights',
                                  model_path_suffix)
    model_path = os.path.join(model_save_dir, FLAGS.model_path)
    model_path = 'models/weights/cpm_hand'

    # Load graph and dump to protobuf
    meta_graph = tf.train.import_meta_graph(model_path + '.meta')
    tf.train.write_graph(tf.get_default_graph(), 'frozen_models/', 'graph_proto.pb')

    output_graph_path = os.path.join('frozen_models', '{}_frozen.pb'.format('cpm_hand'))
    freeze_graph(input_graph='frozen_models/graph_proto.pb',
                 input_saver='',
                 input_checkpoint=model_path,
                 output_graph=output_graph_path,
                 output_node_names=FLAGS.output_node_names,
                 restore_op_name='save/restore_all',
                 clear_devices=True,
                 initializer_nodes='',
                 variable_names_blacklist='',
                 input_binary=False,
                 filename_tensor_name='save/Const:0')
Esempio n. 2
0
  def testFreezeGraph(self):

    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    with tf.Graph().as_default():
      variable_node = tf.Variable(1.0, name="variable_node")
      output_node = tf.mul(variable_node, 2.0, name="output_node")
      sess = tf.Session()
      init = tf.initialize_all_variables()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = tf.train.Saver()
      saver.save(sess, checkpoint_prefix, global_step=0,
                 latest_filename=checkpoint_state_name)
      tf.train.write_graph(sess.graph.as_graph_def(), self.get_temp_dir(),
                           input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    input_checkpoint_path = checkpoint_prefix + "-0"
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, input_checkpoint_path,
                              output_node_names, restore_op_name,
                              filename_tensor_name, output_graph_path,
                              clear_devices, "")

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with tf.Graph().as_default():
      output_graph_def = tf.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Variable", node.op)

      with tf.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
def convert_tf_session_bundle(session_bundle_dir,
                              output_node_names,
                              output_dir,
                              quantization_dtype=None,
                              skip_op_check=False,
                              strip_debug_ops=False):
  """Freeze the Session Bundle model and check the model compatibility with
  Tensorflow.js.

  Optimize and convert the model to Tensorflow.js format, when the model passes
  the compatiblity check.

  Args:
    session_bundle_dir: string The session bundle model directory.
    output_node_names: string The names of the output nodes, comma separated.
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'tensorflowjs_model.pb'
      - a JSON weights manifest file named 'weights_manifest.json'
      - possibly sharded binary weight files.
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
  """

  print("Tensorflow has deprecated the Session Bundle format, ",
        "please migrate to SavedModel.")
  if not os.path.exists(output_dir):
    os.makedirs(output_dir)
  output_graph = os.path.join(output_dir, DEFAULT_MODEL_PB_FILENAME)

  checkpoint = tf.train.get_checkpoint_state(session_bundle_dir)
  input_checkpoint = checkpoint.model_checkpoint_path
  frozen_file = output_graph + '.frozen'
  freeze_graph.freeze_graph(
      '',
      '',
      True,
      input_checkpoint,
      output_node_names,
      '',
      '',
      frozen_file,
      True,
      '',
      input_meta_graph=input_checkpoint + '.meta')
  graph = load_graph(output_graph + '.frozen', output_node_names)

  try:
    optimize_graph(graph, output_graph, quantization_dtype=quantization_dtype,
                   skip_op_check=skip_op_check, strip_debug_ops=strip_debug_ops)
  finally:
    # Clean up the temp files.
    if os.path.exists(frozen_file):
      os.remove(frozen_file)
def convert_tf_saved_model(saved_model_dir, output_node_names,
                           output_dir, saved_model_tags='serve',
                           quantization_dtype=None,
                           skip_op_check=False,
                           strip_debug_ops=False):
  """Freeze the SavedModel and check the model compatibility with Tensorflow.js.

  Optimize and convert the model to Tensorflow.js format, when the model passes
  the compatiblity check.

  Args:
    saved_model_dir: string The saved model directory.
    output_node_names: string The names of the output nodes, comma separated.
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'tensorflowjs_model.pb'
      - a JSON weights manifest file named 'weights_manifest.json'
      - possibly sharded binary weight files.
    saved_model_tags: string Tagset of the MetaGraphDef to load, in comma
      separated string format. Defaulted to 'serve'
    quantization_dtype: An optional numpy dtype to quantize weights to for
      compression. Only np.uint8 and np.uint16 are supported.
    skip_op_check: Bool whether to skip the op check.
    strip_debug_ops: Bool whether to strip debug ops.
  """

  if not os.path.exists(output_dir):
    os.makedirs(output_dir)
  output_graph = os.path.join(output_dir, DEFAULT_MODEL_PB_FILENAME)

  frozen_file = output_graph + '.frozen'
  freeze_graph.freeze_graph(
      '',
      '',
      True,
      '',
      output_node_names,
      '',
      '',
      frozen_file,
      True,
      '',
      saved_model_tags=saved_model_tags,
      input_saved_model_dir=saved_model_dir)

  graph = load_graph(output_graph + '.frozen', output_node_names)
  try:
    optimize_graph(graph, output_graph, quantization_dtype=quantization_dtype,
                   skip_op_check=skip_op_check, strip_debug_ops=strip_debug_ops)
  finally:
    # Clean up the temp files.
    if os.path.exists(frozen_file):
      os.remove(frozen_file)
 def _export_graph(self):
     """
     Exports latest saved model to .bytes format for Unity embedding.
     """
     target_nodes = ','.join(self._process_graph())
     ckpt = tf.train.get_checkpoint_state(self.model_path)
     freeze_graph.freeze_graph(input_graph=self.model_path + '/raw_graph_def.pb',
                               input_binary=True,
                               input_checkpoint=ckpt.model_checkpoint_path,
                               output_node_names=target_nodes,
                               output_graph=self.model_path + '/' + self.env_name + "_" + self.run_id + '.bytes',
                               clear_devices=True, initializer_nodes="", input_saver="",
                               restore_op_name="save/restore_all", filename_tensor_name="save/Const:0")
  def testFreezeMetaGraph(self):
    tmp_dir = self.get_temp_dir()
    checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")

    with ops.Graph().as_default():
      variable_node = variables.VariableV1(1.0, name="variable_node")
      output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
      sess = session.Session()
      init = variables.global_variables_initializer()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)

    input_saver_def_path = ""
    input_binary = True
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    clear_devices = False
    input_meta_graph = checkpoint_path + ".meta"

    freeze_graph.freeze_graph(
        "", input_saver_def_path, input_binary, checkpoint_path,
        output_node_names, restore_op_name, filename_tensor_name,
        output_graph_filename, clear_devices, "", "", "", input_meta_graph)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_filename, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("VariableV2", node.op)
        self.assertNotEqual("Variable", node.op)

      with session.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
Esempio n. 7
0
def export_graph(model_path, env_name="env", target_nodes="action,value_estimate,action_probs"):
    """
    Exports latest saved model to .bytes format for Unity embedding.
    :param model_path: path of model checkpoints.
    :param env_name: Name of associated Learning Environment.
    :param target_nodes: Comma separated string of needed output nodes for embedded graph.
    """
    ckpt = tf.train.get_checkpoint_state(model_path)
    freeze_graph.freeze_graph(input_graph=model_path + '/raw_graph_def.pb',
                              input_binary=True,
                              input_checkpoint=ckpt.model_checkpoint_path,
                              output_node_names=target_nodes,
                              output_graph=model_path + '/' + env_name + '.bytes',
                              clear_devices=True, initializer_nodes="", input_saver="",
                              restore_op_name="save/restore_all", filename_tensor_name="save/Const:0")
def export_graphdef(filename):
    from tensorflow.python.tools import freeze_graph

    graph = tf.get_default_session().graph
    sess = tf.get_default_session()
    graph_def = graph.as_graph_def()

    # fix batch norm nodes
    for node in graph_def.node:
        if node.op == 'RefSwitch':
            node.op = 'Switch'
            for index in range(len(node.input)):
                if 'moving_' in node.input[index]:
                    node.input[index] = node.input[index] + '/read'
        elif node.op == 'AssignSub':
            node.op = 'Sub'
            if 'use_locking' in node.attr: del node.attr['use_locking']
        elif node.op == 'AssignAdd':
            node.op = 'Add'
            if 'use_locking' in node.attr: del node.attr['use_locking']
        
        inputs = []
        for inp in node.input:
            if inp[0] == '^':
                node.input.remove(inp)

    saver_path = tf.train.Saver().save(sess, 'cache/karras2019stylegan-ffhq-1024x1024.ckpt')
    converted_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['Gs/images_out'])
    tf.train.write_graph(converted_graph, 'cache', f'{filename}_converted.pb', as_text=False)
    graph_path = tf.train.write_graph(converted_graph, 'cache', f'{filename}.pbtxt')

    print('Freezing graph')
    freeze_graph.freeze_graph(
        input_graph=graph_path,
        input_saver='',
        input_binary=False,
        input_checkpoint=saver_path,
        output_node_names=['Gs/images_out'],
        restore_op_name='',
        filename_tensor_name='',
        output_graph=f'cache/frozen_{filename}.pb',
        clear_devices=False,
        initializer_nodes='',
        variable_names_whitelist="",
        variable_names_blacklist="",
        input_meta_graph=None,
        input_saved_model_dir=None
    )
Esempio n. 9
0
def tf_to_pb(sess, checkpoint, output, output_dir=None):
    """

    Saves a frozen tensorflow graph (a protobuf file).
    See also https://leimao.github.io/blog/Save-Load-Inference-From-TF-Frozen-Graph/

    Parameters
    ----------
    sess : tensorflow session
        session with graph to be saved

    checkpoint : string
        checkpoint of tensorflow model to be converted to protobuf (output will be <checkpoint>.pb)

    output : list of strings
        list of the names of output nodes (is returned by load_models)

    output_dir : string, optional
        path to the directory that exported models should be saved to.
        If None, will export to the directory of the checkpoint file.
    """

    output_dir = (os.path.expanduser(output_dir)
                  if output_dir else os.path.dirname(checkpoint))
    ckpt_base = os.path.basename(checkpoint)

    # save graph to pbtxt file
    pbtxt_file = os.path.normpath(output_dir + "/" + ckpt_base + ".pbtxt")
    tf.train.write_graph(sess.graph.as_graph_def(),
                         "",
                         pbtxt_file,
                         as_text=True)

    # create frozen graph from pbtxt file
    pb_file = os.path.normpath(output_dir + "/" + ckpt_base + ".pb")

    freeze_graph.freeze_graph(
        input_graph=pbtxt_file,
        input_saver="",
        input_binary=False,
        input_checkpoint=checkpoint,
        output_node_names=",".join(output),
        restore_op_name="save/restore_all",
        filename_tensor_name="save/Const:0",
        output_graph=pb_file,
        clear_devices=True,
        initializer_nodes="",
    )
Esempio n. 10
0
def convert_tf_session_bundle(session_bundle_dir, output_node_names,
                              output_dir):
    """Freeze the Session Bundle model and check the model compatibility with
  Tensorflow.js.

  Optimize and convert the model to Tensorflow.js format, when the model passes
  the compatiblity check.

  Args:
    session_bundle_dir: string The session bundle model directory.
    output_node_names: string The names of the output nodes, comma separated.
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'tensorflowjs_model.pb'
      - a JSON weights manifest file named 'weights_manifest.json'
      - possibly sharded binary weight files.
  """

    print("Tensorflow has deprecated the Session Bundle format, ",
          "please migrate to SavedModel.")
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    output_graph = os.path.join(output_dir, DEFAULT_MODEL_PB_FILENAME)

    checkpoint = tf.train.get_checkpoint_state(session_bundle_dir)
    input_checkpoint = checkpoint.model_checkpoint_path
    frozen_file = output_graph + '.frozen'
    freeze_graph.freeze_graph('',
                              '',
                              True,
                              input_checkpoint,
                              output_node_names,
                              '',
                              '',
                              frozen_file,
                              True,
                              '',
                              input_meta_graph=input_checkpoint + '.meta')
    graph = load_graph(output_graph + '.frozen', output_node_names)
    unsupported = validate(graph.as_graph_def().node)
    if unsupported:
        print('Unsupported Ops in the model\n' + ', '.join(unsupported))
    else:
        optimize_graph(graph, output_graph)

    # Clean up the temp files.
    if os.path.exists(frozen_file):
        os.remove(frozen_file)
Esempio n. 11
0
    def save_as_pb(self, directory, filename):

        if not os.path.exists(directory):
            os.makedirs(directory)

        # Save check point for graph frozen later
        ckpt_filepath = self.save(directory=directory, filename=filename)
        pbtxt_filename = filename + ".pbtxt"
        pbtxt_filepath = os.path.join(directory, pbtxt_filename)
        pb_filepath = os.path.join(directory, filename + ".pb")
        # This will only save the graph but the variables will not be saved.
        # You have to freeze your model first.
        tf.train.write_graph(
            graph_or_graph_def=self.sess.graph_def,
            logdir=directory,
            name=pbtxt_filename,
            as_text=True,
        )

        # Freeze graph
        # Method 1
        freeze_graph.freeze_graph(
            input_graph=pbtxt_filepath,
            input_saver="",
            input_binary=False,
            input_checkpoint=ckpt_filepath,
            output_node_names="cnn/output",
            restore_op_name="save/restore_all",
            filename_tensor_name="save/Const:0",
            output_graph=pb_filepath,
            clear_devices=True,
            initializer_nodes="",
        )

        # Method 2
        """
        graph = tf.get_default_graph()
        input_graph_def = graph.as_graph_def()
        output_node_names = ['cnn/output']

        output_graph_def = graph_util.convert_variables_to_constants(self.sess,
         input_graph_def, output_node_names)

        with tf.gfile.GFile(pb_filepath, 'wb') as f:
            f.write(output_graph_def.SerializeToString())
        """

        return pb_filepath
Esempio n. 12
0
def convert_tf_saved_model(saved_model_dir,
                           output_node_names,
                           output_dir,
                           saved_model_tags='serve'):
    """Freeze the SavedModel and check the model compatibility with Tensorflow.js.

  Optimize and convert the model to Tensorflow.js format, when the model passes
  the compatiblity check.

  Args:
    saved_model_dir: string The saved model directory.
    output_node_names: string The names of the output nodes, comma separated.
    output_dir: string The name of the output directory. The directory
      will consist of
      - a file named 'tensorflowjs_model.pb'
      - a JSON weights manifest file named 'weights_manifest.json'
      - possibly sharded binary weight files.
    saved_model_tags: string Tagset of the MetaGraphDef to load, in comma
      separated string format. Defaulted to 'serve'
  """

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    output_graph = os.path.join(output_dir, DEFAULT_MODEL_PB_FILENAME)

    frozen_file = output_graph + '.frozen'
    freeze_graph.freeze_graph('',
                              '',
                              True,
                              '',
                              output_node_names,
                              '',
                              '',
                              frozen_file,
                              True,
                              '',
                              saved_model_tags=saved_model_tags,
                              input_saved_model_dir=saved_model_dir)
    graph = load_graph(output_graph + '.frozen', output_node_names)
    unsupported = validate(graph.as_graph_def().node)
    if unsupported:
        print('Unsupported Ops in the model\n' + ', '.join(unsupported))
    else:
        optimize_graph(graph, output_graph)

    # Clean up the temp files.
    if os.path.exists(frozen_file):
        os.remove(frozen_file)
    def test_1(self):
        with open(self.class_label_file, 'w+') as labels_file:
            for a in range(10):
                labels_file.write(str(a + 1) + "\n")

        image_size = 224

        with tf.Graph().as_default():
            batch_size, height, width, channels = 1, image_size, image_size, 3
            images = tf.random.uniform([batch_size, height, width, channels],
                                       maxval=1)

            # Create the model.
            i_placeholder = tf.placeholder(
                name='input',
                dtype=tf.float32,
                shape=[1, image_size, image_size, 3])
            net = self.my_conv_2d(i_placeholder, [1, 3, 3, 1], 1, 1, 'first')
            net = tf.nn.avg_pool2d(net,
                                   224,
                                   strides=1,
                                   padding='VALID',
                                   name='AvgPool_1a')
            net = self.my_conv_2d(net, [1, 1, 1, 10],
                                  10,
                                  1,
                                  'fc',
                                  activation_fn=None,
                                  with_bias_add=False)
            net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
            probabilities = tf.nn.softmax(net, name='Softmax')

            saver = tf.train.Saver()
            init_op = tf.global_variables_initializer()

            with tf.Session() as sess:
                sess.run(init_op)
                probabilities = sess.run(probabilities,
                                         {i_placeholder: images.eval()})
                save_path = saver.save(sess, self.checkpoint_file)

                with gfile.GFile(self.graph_file, 'wb') as f:
                    f.write(sess.graph_def.SerializeToString())

                freeze_graph.freeze_graph(self.graph_file, '', True,
                                          self.checkpoint_file, 'Softmax', '',
                                          '', self.frozen_graph_file, False,
                                          '')
Esempio n. 14
0
def freeze_from_checkpoint(model_dir, checkpoint_subdir, model_name):
    checkpoint_dir = os.path.join(model_dir, checkpoint_subdir)
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    input_graph_path = os.path.join(checkpoint_dir, 'graph.pbtxt')
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "masked_pred_test,masked_scale_test"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_frozen_graph_name = os.path.join(model_dir, 'frozen_' + model_name + '.pb')
    clear_devices = True

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")
def freeze():
    '''
    cmd창에서도 할 수 있다.
    > python freeze_graph.py 
    --input_graph = ./model_pb/my_graph.pbtxt --input_checkpoint = ./model_pb/model.ckpt --output_graph=./model_pb/my_graph.pb
    
    '''

    input_graph = './model_pb/my_graph.pbtxt'
    input_checkpoint = './model_pb/model.ckpt'
    output_graph = './model_pb/my_graph.pb'  # 저장할 파일 이름
    output_node_names = 'L2/Sigmoid'  # 어디까지 내 보낼지

    freeze_graph.freeze_graph(input_graph, "", False, input_checkpoint,
                              output_node_names, None, None, output_graph,
                              True, None)
def convert_keras_to_pb(keras_model, out_names, models_dir, model_filename):
    model = load_model(keras_model)
    out_names = model.outputs
    print(out_names)
    model.summary()
    K.set_learning_phase(0)
    sess = K.get_session()
    saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
    checkpoint_path = saver.save(sess,
                                 'saved_ckpt',
                                 global_step=0,
                                 latest_filename='checkpoint_state')
    graph_io.write_graph(sess.graph, '.', 'tmp.pb')
    freeze_graph.freeze_graph('./tmp.pb', '', False, checkpoint_path,
                              out_names, "save/restore_all", "save/Const:0",
                              models_dir + model_filename, False, "")
Esempio n. 17
0
def export_model(input_node_names, output_node_name):
    freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt', None, False,
                              'out/' + MODEL_NAME + '.chkp', output_node_name,
                              "save/restore_all", "save/Const:0",
                              'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, [input_node_names], [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())
def freeze_keras_model(keras_model_path):
    # Load keras model
    model = load_model(keras_model_path)
    # Observe the input_node_name and output_node_name which are used for creating inference graph with tensorrt
    print(model.inputs)
    print(model.outputs)
    saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
    with K.get_session() as sess:
        # saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
        checkpoint_path = saver.save(sess, FLAGS.saved_ckpt,global_step=0,latest_filename='checkpoint_state')
        graph_io.write_graph(sess.graph,'.',FLAGS.graphdef_file)
        print(checkpoint_path)
        freeze_graph.freeze_graph(FLAGS.graphdef_file, '',
                                  False, checkpoint_path, FLAGS.output_node_name,
                                  "save/restore_all", "save/Const:0",
                                  FLAGS.frozen_graph, False, "")
Esempio n. 19
0
 def freeze_graph(sess, ckpt, output):
     print("Loading checkpoint...")
     saver = tf.train.Saver()
     saver.restore(sess, ckpt)
     print("Writing graph...")
     if not os.path.isdir("_Cache"):
         os.makedirs("_Cache")
     _dir = os.path.join("_Cache", "Model")
     saver.save(sess, _dir)
     graph_io.write_graph(sess.graph, "_Cache", "Model.pb", False)
     print("Freezing graph...")
     freeze_graph.freeze_graph(os.path.join("_Cache", "Model.pb"), "", True,
                               os.path.join("_Cache", "Model"), output,
                               "save/restore_all", "save/Const:0",
                               "Frozen.pb", True, "")
     print("Done")
    def exportFrozenGraphForInference(self, clear_devices=True):

        # Freeze the graph
        input_graph_path = os.path.join(self.model_ckpt_directory,
                                        self.graph_file_name)
        ckpt_path = tf.train.latest_checkpoint(self.model_ckpt_directory)
        input_saver_def_path = ""
        output_node_name = "inference_model/" + self.y_inference

        print('Freezing graph {}'.format(input_graph_path))
        freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                                  self.use_binary_export, ckpt_path,
                                  output_node_name, self.restore_op_name,
                                  self.frozen_filename,
                                  self.output_frozen_graph_name, clear_devices,
                                  "")
Esempio n. 21
0
    def freeze(self):
        init = tf.global_variables_initializer()
        self._tensorflow_session.run(init)
        self.checkpoint.load_all()
        saver = tf.train.Saver()
        saver.save(self._tensorflow_session, './gazeml.ckpt')
        tf.train.write_graph(self._tensorflow_session.graph.as_graph_def(),
                             '.',
                             'gazeml.pbtxt',
                             as_text=True)

        from tensorflow.python.tools import freeze_graph
        freeze_graph.freeze_graph(
            './gazeml.pbtxt', '', False, './gazeml.ckpt',
            'hourglass/hg_2/after/hmap/conv/BiasAdd:0,upscale/mul:0,radius/out/fc/BiasAdd:0',
            'save/restore_all', 'save/Const:0', './gazeml.pb', True, '')
Esempio n. 22
0
def freezer(sm, input_ckpt, output_graph):
    output_node_names = 'train/c'
    initializer_nodes = ''
    #tags = tag_constants.SERVING,
    tags = 'serve'
    input_ckpt = tf.train.latest_checkpoint(ckpt_dir)
    #meta_graph_file = input_ckpt + '.meta'
    meta_graph_file = input_ckpt + '.meta'
    print('meta_graph_file: ', meta_graph_file)

    with tf.Session() as sess:
        new_graph_location = freeze_graph.freeze_graph(
            input_graph=utils.get_only_graph_def_from_sm(sm),
            input_saver='',
            input_binary=True,
            input_checkpoint=input_ckpt,
            output_node_names=output_node_names,
            restore_op_name=None,
            filename_tensor_name=None,
            output_graph=output_graph,
            clear_devices=True,
            initializer_nodes=initializer_nodes,
            #input_meta_graph = sm.meta_graphs[0],
            #input_meta_graph = False,
            input_meta_graph=meta_graph_file,
            input_saved_model_dir=model_dir,
            saved_model_tags=tags,
            #checkpoint_version = saver_pb2.SaverDef.V2,
        )

    print('the new graph is at: ', new_graph_location)

    print('graph freezed')
 def export_model(self):
     """
     Exports latest saved model to .tf format for Unity embedding.
     """
     with self.graph.as_default():
         target_nodes = ','.join(self._process_graph())
         ckpt = tf.train.get_checkpoint_state(self.model_path)
         freeze_graph.freeze_graph(
             input_graph=self.model_path + '/raw_graph_def.pb',
             input_binary=True,
             input_checkpoint=ckpt.model_checkpoint_path,
             output_node_names=target_nodes,
             output_graph=(self.model_path + '.bytes'),
             clear_devices=True, initializer_nodes='', input_saver='',
             restore_op_name='save/restore_all',
             filename_tensor_name='save/Const:0')
Esempio n. 24
0
def main():

    saver = tf.train.import_meta_graph(
        '../datasets_test/cifar/model/model.ckpt-832.meta')

    with tf.Session() as sess:
        last_point = tf.train.latest_checkpoint(
            '../datasets_test/cifar/model/')
        saver.restore(sess, last_point)
        tf.train.write_graph(sess.graph_def, 'output_model/pb_model',
                             'model.pb')
        freeze_graph.freeze_graph('output_model/pb_model/model.pb', '', False,
                                  model_path, 'add', 'save/restore_all',
                                  'save/Const:0',
                                  'output_model/pb_model/frozen_model.pb',
                                  False, "")
Esempio n. 25
0
def do_all(_):
    if tf.gfile.Exists(FLAGS.checkpoint_dir):
        tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)
    tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
    if tf.gfile.Exists(FLAGS.model_dir):
        tf.gfile.DeleteRecursively(FLAGS.model_dir)
    tf.gfile.MakeDirs(FLAGS.model_dir)

    content_targets = get_files(FLAGS.train_path)


    print(FLAGS.style)
    print(list_files(FLAGS.style))
    for name in get_files(FLAGS.style):
        print(name)

        style_target = get_img(name)
        name = name.split('/')[len(name.split('/'))-1]
        name = name[:len(name)-4]
        print(name)

        for epoch, iteration, ckpt in optimize(
                content_targets, style_target, content_weight,
                style_weight, tv_weight, FLAGS.vgg_path, name, FLAGS.light_version):
            if (FLAGS.test):
                assert FLAGS.test_dir is not False
                preds_img_name = "{}_{}.png".format(epoch, iteration)
                preds_img_path = os.path.join(FLAGS.test_dir, preds_img_name)
                evaluate(FLAGS.test, preds_img_path, ckpt, name, FLAGS.light_version)

        start_time = time.time()
        freeze_graph(
            input_graph=os.path.join(FLAGS.model_dir,
                                    FLAGS.model_name + '_' + name + '.pb.txt'),
            input_saver='',
            input_binary=False,
            input_checkpoint=ckpt,
            output_node_names='output',
            restore_op_name='save/restore_all',
            filename_tensor_name='save/Const:0',
            output_graph=os.path.join(FLAGS.model_dir,
                                    '%s_frozen.pb' % (FLAGS.model_name + '_' + name)),
            clear_devices=False,
            initializer_nodes='')
        end_time = time.time()
        ex_time = end_time - start_time
        print('Save file graph frozen pb done!, time:', ex_time)
Esempio n. 26
0
def main():
    im_test = io.imread(
        '/home/bo718.wang/irfan/pacling/train_2_out/images/u1025-inputs.png')
    im_test = transform.resize(im_test, [512, 256, 3])
    in_dir = "/home/bo718.wang/irfan/pacling/train_2/"
    out_dir = "./out"
    tf.reset_default_graph()
    im = tf.placeholder(tf.float32, [None, None, 3])
    # im1 = tf.placeholder(tf.float32, [None, None, 3])
    # im1 = tf.placeholder(tf.float32, [None, None, 3])
    im1 = tf.expand_dims(im, 0)
    im1 *= 2
    im1 -= 1
    # im1 *= 256
    # im1 -= 128
    with tf.variable_scope('generator'):
        g_out = create_generator(im1, 3)
    # g_out *= 128
    g_out /= 2
    g_out += 0.5
    # out = g_out + 128.
    out = tf.identity(g_out, 'out')
    # print(out.name)
    # print('--------------------')
    # print("type of out",type(g_out))
    saver = tf.train.Saver(max_to_keep=1)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        pretrained = tf.train.latest_checkpoint(in_dir)
        saver.restore(sess, pretrained)
        out = sess.run(out, feed_dict={im: im_test})
        io.imsave('out.jpg', out[0])
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        pretrained = tf.train.latest_checkpoint(in_dir)
        saver.restore(sess, pretrained)
        #   TF SAVER
        checkpoint_path = in_dir + 'model-881800'
        #  FREEZE GRAPH
        tf.train.write_graph(sess.graph_def, in_dir, 'model.pb')

        freeze_graph.freeze_graph(in_dir + '/model.pb', '', False,
                                  checkpoint_path, 'out', 'save/restore_all',
                                  'save/Const:0',
                                  out_dir + '/119_cartoon_size_sketch.pb',
                                  False, "")
        print('==================')
Esempio n. 27
0
def main():

    # Define the name of your model
    model_name = 'batch=100,lr=0.0001,optimizer=GDS,epochs=1000'

    # define the path to the graph from training
    input_graph = os.path.join(os.getcwd(), 'tensorflow_logs', model_name,
                               'graph', 'graph.pb')

    # define the path in which to save the frozen graph
    output_graph = os.path.join(os.getcwd(), 'tensorflow_logs', model_name,
                                'frozen_graph', 'frozen_graph.pb')

    # the frozen_graph directory must exist in order to freeze the model
    directory = os.path.join(os.getcwd(), 'tensorflow_logs', model_name,
                             'frozen_graph')
    if not os.path.exists(directory):
        os.makedirs(directory)

    # define the checkpoint/weights you want to freeze inside the graph
    input_checkpoint = os.path.join(os.getcwd(), 'tensorflow_logs', model_name,
                                    'train-900')

    # define the name of the prediction output node
    # This name can be easily extracted using Tensorboard. In GRAPHS tab of Tensorboard, check the inputs of Loss scope.
    # In this case they are "vel_true" and "ConvNet/fc_layer_2/BiasAdd".The CNN's predictions are provided from the
    # "ConvNet/fc_layer_2/BiasAdd" element, whereas the true omega velocities from the "vel_true". Here we have to define
    # the element which provides the CNN's predictions and thus we defined as output_node_names the "ConvNet/fc_layer_2/BiasAdd".
    output_node_names = "ConvNet/fc_layer_2/BiasAdd"

    # The following settings should remain the same
    input_saver = ""
    input_binary = True
    restore_op_name = 'save/restore_all'
    filename_tensor_name = 'save/Const:0'
    clear_devices = True
    initializer_nodes = ""
    variable_names_blacklist = ""

    # Freeze the graph
    freeze_graph.freeze_graph(input_graph, input_saver, input_binary,
                              input_checkpoint, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph, clear_devices, initializer_nodes,
                              variable_names_blacklist)

    print("The frozen graph is saved in {}.".format(output_graph))
Esempio n. 28
0
    def testFreezeSavedModel(self):
        tmp_dir = self.get_temp_dir()
        saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
        feature_name = "feature"
        self._writeDummySavedModel(saved_model_dir, feature_name)
        output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")

        input_saved_model_dir = saved_model_dir
        output_node_names = "output_node"
        input_binary = False
        input_saver_def_path = False
        restore_op_name = None
        filename_tensor_name = None
        clear_devices = False
        input_meta_graph = False
        checkpoint_path = None
        input_graph_filename = None
        saved_model_tags = tag_constants.SERVING

        freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
                                  input_binary, checkpoint_path,
                                  output_node_names, restore_op_name,
                                  filename_tensor_name, output_graph_filename,
                                  clear_devices, "", "", "", input_meta_graph,
                                  input_saved_model_dir, saved_model_tags)

        # Now we make sure the variable is now a constant, and that the graph still
        # produces the expected result.
        with ops.Graph().as_default():
            output_graph_def = graph_pb2.GraphDef()
            with open(output_graph_filename, "rb") as f:
                output_graph_def.ParseFromString(f.read())
                _ = importer.import_graph_def(output_graph_def, name="")

            self.assertEqual(8, len(output_graph_def.node))
            for node in output_graph_def.node:
                self.assertNotEqual("VariableV2", node.op)
                self.assertNotEqual("Variable", node.op)

            feature_value = 2.0
            example = self._createTFExampleString(feature_name, feature_value)
            with session.Session() as sess:
                input_node = sess.graph.get_tensor_by_name("input_node:0")
                output_node = sess.graph.get_tensor_by_name("output_node:0")
                output = sess.run(output_node,
                                  feed_dict={input_node: [example]})
                self.assertNear(feature_value, output, 0.00001)
Esempio n. 29
0
def freeze_saved_model_graph(output_node_names, input_saved_model_dir,
                             output_graph_filename):
    input_binary = False
    input_saver_def_path = False
    restore_op_name = None
    filename_tensor_name = None
    clear_devices = False
    input_meta_graph = False
    checkpoint_path = None
    input_graph_filename = None
    saved_model_tags = tag_constants.SERVING
    freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_filename, clear_devices, "", "", "",
                              input_meta_graph, input_saved_model_dir,
                              saved_model_tags)
def export_model(input_node_names, output_node_name):
    freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt', None, False,
        'out/' + MODEL_NAME + '.chkp', output_node_name, "save/restore_all",
        "save/Const:0", 'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
            input_graph_def, input_node_names, [output_node_name],
            tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
Esempio n. 31
0
    def _freeze_keras_saved_model(self, saved_model_dir):
        """Freezes the model and returns the frozen GraphDef.

        Frozen here means that all variables are converted to placeholders.

        Args:
          saved_model_dir: Directory with the Keras SavedModel export.

        Returns:
          Frozen GraphDef for the model.
        """
        temp_dir = tempfile.mkdtemp("tflite-transfer-convert")
        graph_def_file_name = os.path.join(temp_dir, "frozen.pb")
        output_names = [
            utils.tensor_to_op_name(output.name)
            for output in self._eval_signature.outputs.values()
        ]

        freeze_graph.freeze_graph(
            input_graph=None,
            input_saver=False,
            input_binary=True,
            input_checkpoint=None,
            output_node_names=",".join(output_names),
            restore_op_name=None,
            filename_tensor_name=None,
            output_graph=graph_def_file_name,
            clear_devices=True,
            initializer_nodes="",
            input_saved_model_dir=saved_model_dir,
            saved_model_tags="eval",
        )

        const_graph_def = tfv1.GraphDef()
        with open(graph_def_file_name, "rb") as graph_def_file:
            const_graph_def.ParseFromString(graph_def_file.read())

        # Convert constants produced from trainable variables to placeholders.
        # Note: eval model might have other variables that should not be trainable,
        # they are kept as constants. Only variables that are present in serve
        # model are converted.
        graph_def = utils.convert_constants_to_placeholders(
            const_graph_def, self._variable_names)

        shutil.rmtree(temp_dir)
        return graph_def
Esempio n. 32
0
 def freeze_graph(sess, ckpt, output):
     print("Loading checkpoint...")
     saver = tf.train.Saver()
     saver.restore(sess, ckpt)
     print("Writing graph...")
     if not os.path.isdir("_Cache"):
         os.makedirs("_Cache")
     _dir = os.path.join("_Cache", "Model")
     saver.save(sess, _dir)
     graph_io.write_graph(sess.graph, "_Cache", "Model.pb", False)
     print("Freezing graph...")
     freeze_graph.freeze_graph(
         os.path.join("_Cache", "Model.pb"),
         "", True, os.path.join("_Cache", "Model"),
         output, "save/restore_all", "save/Const:0", "Frozen.pb", True, ""
     )
     print("Done")
Esempio n. 33
0
def export_model(saver, input_node_names, output_node_name):
    # creates the 'out' folder where our frozen graphs will be saved
    if not path.exists('out'):
        os.mkdir('out')

    # an arbitrary name for our graph
    GRAPH_NAME = 'heli_test_to _Unity'

    # GRAPH SAVING - '.pbtxt'
    tf.train.write_graph(K.get_session().graph_def, 'out',
                         GRAPH_NAME + '_graph.pbtxt')

    # GRAPH SAVING - '.chkp'
    # KEY: This method saves the graph at it's last checkpoint (hence '.chkp')
    saver.save(K.get_session(), 'out/' + GRAPH_NAME + '.chkp')

    # GRAPH SAVING - '.bytes'
    # freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
    # input_binary, checkpoint_path, output_node_names,
    # restore_op_name, filename_tensor_name,
    # output_frozen_graph_name, clear_devices, "")
    freeze_graph.freeze_graph('out/' + GRAPH_NAME + '_graph.pbtxt', None,
                              False, 'out/' + GRAPH_NAME + '.chkp',
                              output_node_name, "save/restore_all",
                              "save/Const:0",
                              'out/frozen_' + GRAPH_NAME + '.bytes', True, "")
    # freeze_graph.freeze_graph(input_graph='out/' + GRAPH_NAME + '_graph.pbtxt',
    #                           input_binary=True,
    #                           input_checkpoint='out/' + GRAPH_NAME + '.chkp',
    #                           output_node_names=output_node_name,
    #                           output_graph='out/frozen_' + GRAPH_NAME + '.bytes',
    #                           clear_devices=True, initializer_nodes="", input_saver="",
    #                           restore_op_name="save/restore_all", filename_tensor_name="save/Const:0")
    # GRAPH OPTIMIZING
    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + GRAPH_NAME + '.bytes', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + GRAPH_NAME + '.bytes', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
Esempio n. 34
0
def freeze_model(model_path, freeze_dir, output_graph_name):

    name = re.sub(r'-\d+$', '', os.path.basename(model_path))
    model_dir = os.path.dirname(model_path)
    model_type, _, _ = read_meta('%s/meta' % model_dir)

    if not os.path.isdir(freeze_dir):
        os.makedirs(freeze_dir)
    copyfile('%s/meta' % model_dir, '%s/meta' % freeze_dir)
    checkpoint_prefix = os.path.join(freeze_dir, "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"

    G2PModel, hparams = import_model_type(model_type)
    with open('%s/hparams' % model_dir, 'r') as infp:
        loaded = json.load(infp)
        hparams.parse_json(loaded)

    with ops.Graph().as_default():
        with tf.Session() as sess:
            model = G2PModel(hparams, is_training=False, with_target=False, reuse=False)
            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, model_path) 

            saver = saver_lib.Saver()
            checkpoint_path = saver.save(
                sess,
                checkpoint_prefix,
                global_step=0,
                latest_filename=checkpoint_state_name)
            graph_io.write_graph(sess.graph, freeze_dir, input_graph_name)

    input_graph_path = os.path.join(freeze_dir, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = 'g2p/predicted_1best,g2p/probs'
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(freeze_dir, output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(
          input_graph_path, input_saver_def_path, input_binary, checkpoint_path,
          output_node_names, restore_op_name, filename_tensor_name,
          output_graph_path, clear_devices, "")
def convertmodeltopb2():
  from tensorflow.python.tools import freeze_graph
  output_node_names = ['StatefulPartitionedCall']
  output_node_names = ','.join(output_node_names)
  save_pb_model_path = './tfrec/fast_real_frozen3/faster_5x1_obj.pb'
  input_saved_model_dir='./tfrec/fast_model_frozen/saved_model' 

  freeze_graph.freeze_graph(input_graph=None, input_saver=None,
                              input_binary=None,
                              input_checkpoint=None,
                              output_node_names=output_node_names,
                              restore_op_name=None,
                              filename_tensor_name=None,
                              output_graph=save_pb_model_path,
                              clear_devices=None,
                              initializer_nodes=None,
                              input_saved_model_dir=input_saved_model_dir)
Esempio n. 36
0
def exportModelToTF(tfModelOutputDir):

    if not os.path.exists(tfModelOutputDir):
        os.makedirs(tfModelOutputDir)

    # Save checkpoint
    saver = tf.train.Saver()
    save_path = saver.save(K.get_session(), tfModelOutputDir + "/model")

    # Save metagraph
    tf.train.write_graph(K.get_session().graph.as_graph_def(), "", tfModelOutputDir + "/metagraph.pb", False)

    # Freeze graph
    freeze_graph(input_graph=tfModelOutputDir + "/metagraph.pb", input_saver="", input_binary=True,
                 input_checkpoint=tfModelOutputDir + "/model", output_node_names='softmax/Softmax',
                 restore_op_name="save/restore_all", filename_tensor_name="save/Const:0",
                 output_graph=tfModelOutputDir + "/graph.pb", clear_devices=True, initializer_nodes="")
def convert_keras_to_freeze_pb(model, frozen_model_path):
        out_names = ",".join([layer.name.split(":")[0]  for layer in model.outputs])
        inp_names = ",".join([layer.name.split(":")[0]  for layer in model.inputs])
        print("OUTPUT: {}".format(out_names))
        print("INPUT: {}".format(inp_names))
        model.summary()
        K.set_learning_phase(0)
        sess = K.get_session()
        saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
        checkpoint_path = saver.save(sess, './saved_ckpt', global_step=0, latest_filename='checkpoint_state')
        graph_io.write_graph(sess.graph, '.', './tmp.pb')
        freeze_graph.freeze_graph('./tmp.pb', '',
                                False, checkpoint_path, out_names,
                                "save/restore_all",
                                "save/Const:0",
                                frozen_model_path,
                                False, "")
Esempio n. 38
0
  def testFreezeSavedModel(self):
    tmp_dir = self.get_temp_dir()
    saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
    feature_name = "feature"
    self._writeDummySavedModel(saved_model_dir, feature_name)
    output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")

    input_saved_model_dir = saved_model_dir
    output_node_names = "output_node"
    input_binary = False
    input_saver_def_path = False
    restore_op_name = None
    filename_tensor_name = None
    clear_devices = False
    input_meta_graph = False
    checkpoint_path = None
    input_graph_filename = None
    saved_model_tags = tag_constants.SERVING

    freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_filename, clear_devices, "", "", "",
                              input_meta_graph, input_saved_model_dir,
                              saved_model_tags)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_filename, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(8, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("VariableV2", node.op)
        self.assertNotEqual("Variable", node.op)

      feature_value = 2.0
      example = self._createTFExampleString(feature_name, feature_value)
      with session.Session() as sess:
        input_node = sess.graph.get_tensor_by_name("input_node:0")
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node, feed_dict={input_node: [example]})
        self.assertNear(feature_value, output, 0.00001)
Esempio n. 39
0
    def convert_to_frozen_graph(self):

        input_pb_path = self.frozenpb_config_worker.input_dir_path + self.frozenpb_config_worker.input_pb_name
        input_ckpt_path = self.frozenpb_config_worker.input_dir_path + self.frozenpb_config_worker.input_ckpt_name
        output_frozen_pb_path = self.frozenpb_config_worker.output_dir_path + self.frozenpb_config_worker.output_pb_name

        freeze_graph.freeze_graph(\
            input_graph=input_pb_path,
            input_saver= "",                    # this argument is used with SavedModel
            input_binary=self.frozenpb_config_worker.binary_opt,
            input_checkpoint=input_ckpt_path,
            output_node_names=self.frozenpb_config_worker.output_node_names,
            restore_op_name="save/restore_all",  # unused in freeze_graph()
            filename_tensor_name="save/Const:0", # unused in freeze_graph()
            output_graph=output_frozen_pb_path,
            clear_devices=False,                # not clear how to use
            initializer_nodes="")
Esempio n. 40
0
    def _load_saved_model(self):
        """Load the tensorflow saved model."""
        try:
            from tensorflow.python.tools import freeze_graph
            from tensorflow.python.framework import ops
            from tensorflow.python.framework import graph_util
        except ImportError:
            raise ImportError(
                "InputConfiguration: Unable to import tensorflow which is "
                "required to restore from saved model.")

        saved_model_dir = self._model_dir
        output_graph_filename = self._tmp_dir.relpath("tf_frozen_model.pb")
        input_saved_model_dir = saved_model_dir
        output_node_names = self._get_output_names()

        input_binary = False
        input_saver_def_path = False
        restore_op_name = None
        filename_tensor_name = None
        clear_devices = True
        input_meta_graph = False
        checkpoint_path = None
        input_graph_filename = None
        saved_model_tags = ",".join(self._get_tag_set())

        freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
                                  input_binary, checkpoint_path, output_node_names,
                                  restore_op_name, filename_tensor_name,
                                  output_graph_filename, clear_devices, "", "", "",
                                  input_meta_graph, input_saved_model_dir,
                                  saved_model_tags)

        with ops.Graph().as_default():
            output_graph_def = graph_pb2.GraphDef()
            with open(output_graph_filename, "rb") as f:
                output_graph_def.ParseFromString(f.read())
            output_graph_def = graph_util.remove_training_nodes(output_graph_def)
            return output_graph_def
Esempio n. 41
0
    def saveModel(self, sess, outputDirectory = ""):
        from tensorflow.python.framework import graph_io
        from tensorflow.python.tools import freeze_graph

        input_graph_path = outputDirectory + "tfModel.pb"
        graph_io.write_graph(sess.graph, "./", input_graph_path)
    
        #create frozen version of graph for distribution
        input_saver_def_path = ""
        input_binary = False
        checkpoint_path = outputDirectory + "models/model.ckpt"
        output_node_names = "y_ph"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_graph_path = outputDirectory + "tfModel_frozen.pb"
        clear_devices = False
    
        freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                                  input_binary, checkpoint_path, output_node_names,
                                  restore_op_name, filename_tensor_name,
                                  output_graph_path, clear_devices, "")
    
        print("Frozen model (model and weights) saved in file: %s" % output_graph_path)
def dump_frozen_graph(sess, graph_file, output_node_names=None):
  assert graph_file.endswith('.pb')
  assert output_node_names is None or isinstance(output_node_names, list)
  output_node_names = output_node_names or estimate_inputs_outputs(sess.graph)[1]

  dir_ = os.path.dirname(graph_file)
  base = os.path.basename(graph_file)
  ckpt = graph_file.replace('.pb', '.ckpt')
  frozen = graph_file.replace('.pb', '.pb.frozen')

  os.system('mkdir -p {}'.format(dir_))
  print('>> Saving `{}`... '.format(graph_file), end='')
  tf.train.write_graph(sess.graph, dir_, base, as_text=False)
  tf.train.write_graph(sess.graph, dir_, base + "txt", as_text=True)
  print('Done')

  print('>> Saving `{}`... '.format(ckpt), end='')
  tf.train.Saver().save(sess, ckpt, write_meta_graph=False)
  print('Done')

  print('>> Freezing graph to `{}`... '.format(frozen))
  print('Outputs:\n  {}'.format(', '.join(output_node_names)))

  from tensorflow.python.tools.freeze_graph import freeze_graph
  freeze_graph(input_graph=graph_file,
               input_saver='',
               input_binary=True,
               input_checkpoint=ckpt,
               output_node_names=','.join(output_node_names),
               restore_op_name='save/restore_all',
               filename_tensor_name='save/Const:0',
               output_graph=frozen,
               clear_devices=True,
               initializer_nodes='',
               saved_model_tags='serve')

  return frozen
Esempio n. 43
0
def main(_):

    # Always keep the cpu as default
    with tf.Graph().as_default(), tf.device('/cpu:0'):

        if FLAGS.validation_interval == 0:
            FLAGS.validation_db = None

        # Set Tensorboard log directory
        if FLAGS.summaries_dir:
            # The following gives a nice but unrobust timestamp
            FLAGS.summaries_dir = os.path.join(FLAGS.summaries_dir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))

        if not FLAGS.train_db and not FLAGS.validation_db and not FLAGS.inference_db and not FLAGS.visualizeModelPath:
            logging.error("At least one of the following file sources should be specified: "
                          "train_db, validation_db or inference_db")
            exit(-1)

        if FLAGS.seed:
            tf.set_random_seed(FLAGS.seed)

        batch_size_train = FLAGS.batch_size
        batch_size_val = FLAGS.batch_size
        logging.info("Train batch size is %s and validation batch size is %s", batch_size_train, batch_size_val)

        # This variable keeps track of next epoch, when to perform validation.
        next_validation = FLAGS.validation_interval
        logging.info("Training epochs to be completed for each validation : %s", next_validation)

        # This variable keeps track of next epoch, when to save model weights.
        next_snapshot_save = FLAGS.snapshotInterval
        logging.info("Training epochs to be completed before taking a snapshot : %s", next_snapshot_save)
        last_snapshot_save_epoch = 0

        snapshot_prefix = FLAGS.snapshotPrefix if FLAGS.snapshotPrefix else FLAGS.network.split('.')[0]
        logging.info("Model weights will be saved as %s_<EPOCH>_Model.ckpt", snapshot_prefix)

        if not os.path.exists(FLAGS.save):
            os.makedirs(FLAGS.save)
            logging.info("Created a directory %s to save all the snapshots", FLAGS.save)

        # Load mean variable
        if FLAGS.subtractMean == 'none':
            mean_loader = None
        else:
            if not FLAGS.mean:
                logging.error("subtractMean parameter not set to 'none' yet mean image path is unset")
                exit(-1)
            logging.info("Loading mean tensor from %s file", FLAGS.mean)
            mean_loader = tf_data.MeanLoader(FLAGS.mean, FLAGS.subtractMean, FLAGS.bitdepth)

        classes = 0
        nclasses = 0
        if FLAGS.labels_list:
            logging.info("Loading label definitions from %s file", FLAGS.labels_list)
            classes = loadLabels(FLAGS.labels_list)
            nclasses = len(classes)
            if not classes:
                logging.error("Reading labels file %s failed.", FLAGS.labels_list)
                exit(-1)
            logging.info("Found %s classes", nclasses)

        # Create a data-augmentation dict
        aug_dict = {
            'aug_flip': FLAGS.augFlip,
            'aug_noise': FLAGS.augNoise,
            'aug_contrast': FLAGS.augContrast,
            'aug_whitening': FLAGS.augWhitening,
            'aug_HSV': {
                'h': FLAGS.augHSVh,
                's': FLAGS.augHSVs,
                'v': FLAGS.augHSVv,
            },
        }

        # Import the network file
        path_network = os.path.join(os.path.dirname(os.path.realpath(__file__)), FLAGS.networkDirectory, FLAGS.network)
        exec(open(path_network).read(), globals())

        try:
            UserModel
        except NameError:
            logging.error("The user model class 'UserModel' is not defined.")
            exit(-1)
        if not inspect.isclass(UserModel):  # noqa
            logging.error("The user model class 'UserModel' is not a class.")
            exit(-1)
        # @TODO(tzaman) - add mode checks to UserModel

        if FLAGS.train_db:
            with tf.name_scope(digits.STAGE_TRAIN) as stage_scope:
                train_model = Model(digits.STAGE_TRAIN, FLAGS.croplen, nclasses, FLAGS.optimization, FLAGS.momentum)
                train_model.create_dataloader(FLAGS.train_db)
                train_model.dataloader.setup(FLAGS.train_labels,
                                             FLAGS.shuffle,
                                             FLAGS.bitdepth,
                                             batch_size_train,
                                             FLAGS.epoch,
                                             FLAGS.seed)
                train_model.dataloader.set_augmentation(mean_loader, aug_dict)
                train_model.create_model(UserModel, stage_scope)  # noqa

        if FLAGS.validation_db:
            with tf.name_scope(digits.STAGE_VAL) as stage_scope:
                val_model = Model(digits.STAGE_VAL, FLAGS.croplen, nclasses, reuse_variable=True)
                val_model.create_dataloader(FLAGS.validation_db)
                val_model.dataloader.setup(FLAGS.validation_labels,
                                           False,
                                           FLAGS.bitdepth,
                                           batch_size_val,
                                           1e9,
                                           FLAGS.seed)  # @TODO(tzaman): set numepochs to 1
                val_model.dataloader.set_augmentation(mean_loader)
                val_model.create_model(UserModel, stage_scope)  # noqa

        if FLAGS.inference_db:
            with tf.name_scope(digits.STAGE_INF) as stage_scope:
                inf_model = Model(digits.STAGE_INF, FLAGS.croplen, nclasses)
                inf_model.create_dataloader(FLAGS.inference_db)
                inf_model.dataloader.setup(None, False, FLAGS.bitdepth, FLAGS.batch_size, 1, FLAGS.seed)
                inf_model.dataloader.set_augmentation(mean_loader)
                inf_model.create_model(UserModel, stage_scope)  # noqa

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU
        # implementations.
        sess = tf.Session(config=tf.ConfigProto(
                          allow_soft_placement=True,  # will automatically do non-gpu supported ops on cpu
                          inter_op_parallelism_threads=TF_INTER_OP_THREADS,
                          intra_op_parallelism_threads=TF_INTRA_OP_THREADS,
                          log_device_placement=FLAGS.log_device_placement))

        if FLAGS.visualizeModelPath:
            visualize_graph(sess.graph_def, FLAGS.visualizeModelPath)
            exit(0)

        # Saver creation.
        if FLAGS.save_vars == 'all':
            vars_to_save = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        elif FLAGS.save_vars == 'trainable':
            vars_to_save = tf.all_variables()
        else:
            logging.error('Unknown save_var flag (%s)' % FLAGS.save_vars)
            exit(-1)
        saver = tf.train.Saver(vars_to_save, max_to_keep=0, sharded=FLAGS.serving_export)

        # Initialize variables
        init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init_op)

        # If weights option is set, preload weights from existing models appropriately
        if FLAGS.weights:
            load_snapshot(sess, FLAGS.weights, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))

        # Tensorboard: Merge all the summaries and write them out
        writer = tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir, 'tb'), sess.graph)

        # If we are inferencing, only do that.
        if FLAGS.inference_db:
            inf_model.start_queue_runners(sess)
            Inference(sess, inf_model)

        queue_size_op = []
        for n in tf.get_default_graph().as_graph_def().node:
            if '_Size' in n.name:
                queue_size_op.append(n.name+':0')

        start = time.time()  # @TODO(tzaman) - removeme

        # Initial Forward Validation Pass
        if FLAGS.validation_db:
            val_model.start_queue_runners(sess)
            Validation(sess, val_model, 0)

        if FLAGS.train_db:
            # During training, a log output should occur at least X times per epoch or every X images, whichever lower
            train_steps_per_epoch = train_model.dataloader.get_total() / batch_size_train
            if math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH) < math.ceil(5000/batch_size_train):
                logging_interval_step = int(math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH))
            else:
                logging_interval_step = int(math.ceil(5000/batch_size_train))
            logging.info("During training. details will be logged after every %s steps (batches)",
                         logging_interval_step)

            # epoch value will be calculated for every batch size. To maintain unique epoch value between batches,
            # it needs to be rounded to the required number of significant digits.
            epoch_round = 0  # holds the required number of significant digits for round function.
            tmp_batchsize = batch_size_train*logging_interval_step
            while tmp_batchsize <= train_model.dataloader.get_total():
                tmp_batchsize = tmp_batchsize * 10
                epoch_round += 1
            logging.info("While logging, epoch value will be rounded to %s significant digits", epoch_round)

            # Create the learning rate policy
            total_training_steps = train_model.dataloader.num_epochs * train_model.dataloader.get_total() / \
                train_model.dataloader.batch_size
            lrpolicy = lr_policy.LRPolicy(FLAGS.lr_policy,
                                          FLAGS.lr_base_rate,
                                          FLAGS.lr_gamma,
                                          FLAGS.lr_power,
                                          total_training_steps,
                                          FLAGS.lr_stepvalues)
            train_model.start_queue_runners(sess)

            # Training
            logging.info('Started training the model')

            current_epoch = 0
            try:
                step = 0
                step_last_log = 0
                print_vals_sum = 0
                while not train_model.queue_coord.should_stop():
                    log_runtime = FLAGS.log_runtime_stats_per_step and (step % FLAGS.log_runtime_stats_per_step == 0)

                    run_options = None
                    run_metadata = None
                    if log_runtime:
                        # For a HARDWARE_TRACE you need NVIDIA CUPTI, a 'CUDA-EXTRA'
                        # SOFTWARE_TRACE HARDWARE_TRACE FULL_TRACE
                        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                        run_metadata = tf.RunMetadata()

                    feed_dict = {train_model.learning_rate: lrpolicy.get_learning_rate(step)}

                    if False:
                        for op in train_model.train:
                            _, summary_str, step = sess.run([op, train_model.summary, train_model.global_step],
                                                            feed_dict=feed_dict,
                                                            options=run_options,
                                                            run_metadata=run_metadata)
                    else:
                        _, summary_str, step = sess.run([train_model.train,
                                                         train_model.summary,
                                                         train_model.global_step],
                                                        feed_dict=feed_dict,
                                                        options=run_options,
                                                        run_metadata=run_metadata)

                    # HACK
                    step = step / len(train_model.train)

                    # logging.info(sess.run(queue_size_op)) # DEVELOPMENT: for checking the queue size

                    if log_runtime:
                        writer.add_run_metadata(run_metadata, str(step))
                        save_timeline_trace(run_metadata, FLAGS.save, int(step))

                    writer.add_summary(summary_str, step)

                    # Parse the summary
                    tags, print_vals = summary_to_lists(summary_str)

                    print_vals_sum = print_vals + print_vals_sum

                    # @TODO(tzaman): account for variable batch_size value on very last epoch
                    current_epoch = round((step * batch_size_train) / train_model.dataloader.get_total(), epoch_round)
                    # Start with a forward pass
                    if ((step % logging_interval_step) == 0):
                        steps_since_log = step - step_last_log
                        print_list = print_summarylist(tags, print_vals_sum/steps_since_log)
                        logging.info("Training (epoch " + str(current_epoch) + "): " + print_list)
                        print_vals_sum = 0
                        step_last_log = step

                    # Potential Validation Pass
                    if FLAGS.validation_db and current_epoch >= next_validation:
                        Validation(sess, val_model, current_epoch)
                        # Find next nearest epoch value that exactly divisible by FLAGS.validation_interval:
                        next_validation = (round(float(current_epoch)/FLAGS.validation_interval) + 1) * \
                            FLAGS.validation_interval

                    # Saving Snapshot
                    if FLAGS.snapshotInterval > 0 and current_epoch >= next_snapshot_save:
                        checkpoint_path, graphdef_path = save_snapshot(sess,
                                                                       saver,
                                                                       FLAGS.save,
                                                                       snapshot_prefix,
                                                                       current_epoch,
                                                                       FLAGS.serving_export
                                                                       )

                        # To find next nearest epoch value that exactly divisible by FLAGS.snapshotInterval
                        next_snapshot_save = (round(float(current_epoch)/FLAGS.snapshotInterval) + 1) * \
                            FLAGS.snapshotInterval
                        last_snapshot_save_epoch = current_epoch
                    writer.flush()

            except tf.errors.OutOfRangeError:
                logging.info('Done training for epochs: tf.errors.OutOfRangeError')
            except ValueError as err:
                logging.error(err.args[0])
                exit(-1)  # DIGITS wants a dirty error.
            except (KeyboardInterrupt):
                logging.info('Interrupt signal received.')

            # If required, perform final snapshot save
            if FLAGS.snapshotInterval > 0 and FLAGS.epoch > last_snapshot_save_epoch:
                checkpoint_path, graphdef_path =\
                    save_snapshot(sess, saver, FLAGS.save, snapshot_prefix, FLAGS.epoch, FLAGS.serving_export)

        print('Training wall-time:', time.time()-start)  # @TODO(tzaman) - removeme

        # If required, perform final Validation pass
        if FLAGS.validation_db and current_epoch >= next_validation:
            Validation(sess, val_model, current_epoch)

        if FLAGS.train_db:
            if FLAGS.labels_list:
                output_tensor = train_model.towers[0].inference
                out_name, _ = output_tensor.name.split(':')

        if FLAGS.train_db:
            del train_model
        if FLAGS.validation_db:
            del val_model
        if FLAGS.inference_db:
            del inf_model

        # We need to call sess.close() because we've used a with block
        sess.close()

        writer.close()

    tf.reset_default_graph()

    del sess
    if FLAGS.train_db:
        if FLAGS.labels_list:
            path_frozen = os.path.join(FLAGS.save, 'frozen_model.pb')
            print('Saving frozen model at path {}'.format(path_frozen))
            freeze_graph.freeze_graph(
                input_graph=graphdef_path,
                input_saver='',
                input_binary=True,
                input_checkpoint=checkpoint_path,
                output_node_names=out_name,
                restore_op_name="save/restore_all",
                filename_tensor_name="save/Const:0",
                output_graph=path_frozen,
                clear_devices=True,
                initializer_nodes="",
            )

    logging.info('END')

    exit(0)
Esempio n. 44
0
try:
    model= Sequential.from_config(config)
except:
    model= Model.from_config(config)
    #model= model_from_config(config)
model.set_weights(weights)

model.summary()

print("Input name:")
print(model.input.name)
print("Output name:")
print(model.output.name)
output_name=model.output.name.split(':')[0]

#  not sure what this is for
export_version = 1 # version number (integer)

graph_file=export_path+".pb"
ckpt_file=export_path+".ckpt"
# create a saver
saver = tf.train.Saver(sharded=True)
tf.train.write_graph(sess.graph_def, '', graph_file)
save_path = saver.save(sess, ckpt_file)


input_graph_path = 'export/model.pb'
checkpoint_path = 'export/model.ckpt'

freeze_graph.freeze_graph(input_graph_path, "",  False, checkpoint_path, "output_1/Softmax", "save/restore_all", "save/Const", output_frozen_graph_name, True, "")
Esempio n. 45
0
    def save(self, path=None, name=None, overwrite=True):
        path = "Models" if path is None else path
        name = "Cache" if name is None else name
        folder = os.path.join(path, name)
        if not os.path.exists(folder):
            os.makedirs(folder)
        _dir = os.path.join(folder, "Model")
        if os.path.isfile(_dir):
            if not overwrite:
                _count = 1
                _new_dir = _dir + "({})".format(_count)
                while os.path.isfile(_new_dir):
                    _count += 1
                    _new_dir = _dir + "({})".format(_count)
                _dir = _new_dir
            else:
                os.remove(_dir)

        print()
        print("=" * 60)
        print("Saving Model to {}...".format(folder))
        print("-" * 60)

        with open(_dir + ".nn", "wb") as file:
            # We don't need w_stds & b_inits when we load a model
            _dic = {
                "structures": {
                    "_lr": self._lr,
                    "_layer_names": self.layer_names,
                    "_layer_params": self._layer_params,
                    "_next_dimension": self._current_dimension
                },
                "params": {
                    "_logs": self._logs,
                    "_metric_names": self._metric_names,
                    "_optimizer": self._optimizer.name,
                    "layer_special_params": self.layer_special_params
                }
            }
            pickle.dump(_dic, file)
        saver = tf.train.Saver()
        saver.save(self._sess, _dir)
        graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Model.pb", False)
        with tf.name_scope("OutputFlow"):
            self.get_rs(self._tfx)
        _output = ""
        for op in self._sess.graph.get_operations()[::-1]:
            if "OutputFlow" in op.name:
                _output = op.name
                break
        with open(os.path.join(path, name, "IO.txt"), "w") as file:
            file.write("\n".join([
                "Input  : Entry/Placeholder:0",
                "Output : {}:0".format(_output)
            ]))
        graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Cache.pb", False)
        freeze_graph.freeze_graph(
            os.path.join(path, name, "Cache.pb"),
            "", True, os.path.join(path, name, "Model"),
            _output, "save/restore_all", "save/Const:0",
            os.path.join(path, name, "Frozen.pb"), True, ""
        )
        os.remove(os.path.join(path, name, "Cache.pb"))

        print("Done")
        print("=" * 60)
Esempio n. 46
0
def single_worker_inference(infer_model,
                            ckpt,
                            inference_input_file,
                            inference_output_file,
                            hparams):
    """Inference with a single worker."""
    output_infer = inference_output_file

    # Read data
    infer_data = load_data(inference_input_file, hparams)
    print ("Batch size type:", type(hparams.infer_batch_size))
    with tf.Session(
            graph=infer_model.graph, config=utils.get_config_proto()) as sess:
        # revo debug
        # sess = tf_debug.TensorBoardDebugWrapperSession(sess, 'xy:6064')
        # initi table
        # sess.run(infer_model.insert_op[0])
        # sess.run(infer_model.insert_op[1])
        # sess.run(infer_model.insert_op[2])
        #
        loaded_infer_model = model_helper.load_model(
            infer_model.model, ckpt, sess, "infer", infer_model.insert_op)
        sess.run(
            infer_model.iterator.initializer,
            feed_dict={
                infer_model.src_placeholder: infer_data,
                infer_model.batch_size_placeholder: hparams.infer_batch_size
            })
        # Debug By Revo
        # value = sess.run(infer_model.iterator.source)
        # print ("Value:", value)
        # print ("Value,len:", len(value))
        # print ("Value,Type:", type(value))
        # print ("Value,Shape:", value.shape)
        # tmp_i = sess.run(infer_model.iterator)
        # print ("iterator:", tmp_i)
        # print ("iterator shape:", tmp_i.shape())
        # sys.exit()

        # print ("TEST")
        # # Initialize keys and values.
        # keys = tf.constant([1, 2, 3], dtype=tf.int64)
        # vals = tf.constant([1, 2, 3], dtype=tf.int64)
        # # Initialize hash table.
        # table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64, value_dtype=tf.int64, default_value=-1,
        #                                                 empty_key=0)
        # # Insert values to hash table and run the op.
        # insert_op = table.insert(keys, vals)
        # sess.run(insert_op)
        # # Print hash table lookups.
        # print(sess.run(table.lookup(keys)))
        # print("HERE2")

        # Saving Decoder model

        # Decode
        utils.print_out("# Start decoding ff3")
        # print ("indices:", hparams.inference_indices)

        if hparams.inference_indices:
            _decode_inference_indices(
                loaded_infer_model,
                sess,
                output_infer=output_infer,
                output_infer_summary_prefix=output_infer,
                inference_indices=hparams.inference_indices,
                tgt_eos=hparams.eos,
                subword_option=hparams.subword_option)
        else:
            nmt_utils.decode_and_evaluate(
                "infer",
                loaded_infer_model,
                sess,
                output_infer,
                ref_file=None,
                metrics=hparams.metrics,
                subword_option=hparams.subword_option,
                beam_width=hparams.beam_width,
                tgt_eos=hparams.eos,
                num_translations_per_input=hparams.num_translations_per_input)
        # saving model
        OUTPUT_FOLDER = '7.19'
        utils.print_out("Ouput Folder : " + OUTPUT_FOLDER)
        utils.print_out("# Saving Decoder model (Normal,ckpt) By Revo")
        loaded_infer_model.saver.save(sess, OUTPUT_FOLDER+"/current.ckpt")
        # save pb file
        graph_io.write_graph(sess.graph_def, OUTPUT_FOLDER, "current.graphdef")
        tf.train.export_meta_graph(filename=OUTPUT_FOLDER + '/current.meta')
        writer = tf.summary.FileWriter(OUTPUT_FOLDER, sess.graph)
        writer.close()
        # Frozen graph saving
        OUTPUT_FROZEN_FILE = 'nmt.pb'
        # OUTPUT_NAMES = ['index_to_string_Lookup', 'table_init', 'batch_iter_init']
        # maybe it is not utf8 as output
        OUTPUT_NODES = ['reverse_table_Lookup']
        utils.print_out("# Saving Decoder model (Frozen) By Revo")
        # extract method try
        # new_graph_def = tf.graph_util.extract_sub_graph(sess.graph_def, ["hash_table_2_Lookup"])
        #
        # remove train node
        utils.print_out("# Removed Training nodes and outputing graph_def")
        inference_graph = tf.graph_util.remove_training_nodes(sess.graph.as_graph_def())
        graph_io.write_graph(inference_graph, OUTPUT_FOLDER, "infer_model.graphdef")
        # This is oLd version freeze graph
        freeze_graph.freeze_graph("7.19/current.graphdef", "", False, "7.19/current.ckpt", "reverse_table_Lookup", "", "", OUTPUT_FOLDER + "/" + OUTPUT_FROZEN_FILE, True, "")
        #
        # frozen_graph = tf.graph_util.convert_variables_to_constants(sess, inference_graph, OUTPUT_NODES)
        # Normal
        frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, OUTPUT_NODES)
        # graph_io.write_graph(frozen_graph.graph_def, OUTPUT_FOLDER, "frozen.graphdef")
        # frozen_graph = tf.graph_util.convert_variables_to_constants(sess, new_graph_def, OUTPUT_NODES)
        #
        # tf.train.write_graph(frozen_graph, OUTPUT_FOLDER, OUTPUT_FROZEN_FILE, as_text=False)
        # TOCO with python
        utils.print_out("# Start converting into TOCO file.")
        converter = tf.contrib.lite.TocoConverter.from_frozen_graph(OUTPUT_FOLDER + "/" + OUTPUT_FROZEN_FILE, ['src_place'], OUTPUT_NODES)
        # try session way
        # input = sess.graph.get_tensor_by_name("src_place:0")
        # output = sess.graph.get_tensor_by_name("reverse_table_Lookup:0")
        # converter = tf.contrib.lite.TocoConverter.from_session(sess, [input], [output])
        #
        tflite_model = converter.convert()
        open(OUTPUT_FOLDER + "/converted_model.tflite", "wb").write(tflite_model)
Esempio n. 47
0
def main(args):
  # Read model parameters
  checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir)
  if checkpoint_path is None:
    log.error('Could not find a checkpoint in {}'.format(args.checkpoint_dir))
    return
  metapath = ".".join([checkpoint_path, "meta"])
  log.info("Loading {}".format(metapath))
  tf.train.import_meta_graph(metapath)
  with tf.Session() as sess:
    model_params = utils.get_model_params(sess)

  if not hasattr(models, model_params['model_name']):
    log.error("Model {} does not exist".format(model_params['model_name']))
    return
  mdl = getattr(models, model_params['model_name'])

  # Instantiate new evaluation graph
  tf.reset_default_graph()
  sz = model_params['net_input_size']

  log.info("Model {}".format(model_params['model_name']))

  input_tensor = tf.placeholder(tf.float32, [1, sz, sz, 3], name='lowres_input')
  with tf.variable_scope('inference'):
    prediction = mdl.inference(input_tensor, input_tensor, model_params, is_training=False)
  if model_params["model_name" ] == "HDRNetGaussianPyrNN":
    output_tensor = tf.get_collection('packed_coefficients')[0]
    output_tensor = tf.transpose(tf.squeeze(output_tensor), [3, 2, 0, 1, 4], name="output_coefficients")
    log.info("Output shape".format(output_tensor.get_shape()))
  else:
    output_tensor = tf.get_collection('packed_coefficients')[0]
    output_tensor = tf.transpose(tf.squeeze(output_tensor), [3, 2, 0, 1, 4], name="output_coefficients")
    log.info("Output shape {}".format(output_tensor.get_shape()))
  saver = tf.train.Saver()

  gdef = tf.get_default_graph().as_graph_def()

  log.info("Restoring weights from {}".format(checkpoint_path))
  test_graph_name = "test_graph.pbtxt"
  with tf.Session() as sess:
    saver.restore(sess, checkpoint_path)
    tf.train.write_graph(sess.graph, args.checkpoint_dir, test_graph_name)

    input_graph_path = os.path.join(args.checkpoint_dir, test_graph_name)
    output_graph_path = os.path.join(args.checkpoint_dir, "frozen_graph.pb")
    input_saver_def_path = ""
    input_binary = False
    output_binary = True
    input_node_names = input_tensor.name.split(":")[0]
    output_node_names = output_tensor.name.split(":")[0]
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    clear_devices = False

    log.info("Freezing to {}".format(output_graph_path))
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")
    log.info('input tensor: {} {}'.format(input_tensor.name, input_tensor.shape))
    log.info('output tensor: {} {}'.format(output_tensor.name, output_tensor.shape))

    # Dump guide parameters
    if model_params['model_name'] == 'HDRNetCurves':
      g = tf.get_default_graph()
      ccm = g.get_tensor_by_name('inference/guide/ccm:0')
      ccm_bias = g.get_tensor_by_name('inference/guide/ccm_bias:0')
      shifts = g.get_tensor_by_name('inference/guide/shifts:0')
      slopes = g.get_tensor_by_name('inference/guide/slopes:0')
      mixing_weights = g.get_tensor_by_name('inference/guide/channel_mixing/weights:0')
      mixing_bias = g.get_tensor_by_name('inference/guide/channel_mixing/biases:0')

      ccm_, ccm_bias_, shifts_, slopes_, mixing_weights_, mixing_bias_ = sess.run(
              [ccm, ccm_bias, shifts, slopes, mixing_weights, mixing_bias])
      shifts_ = np.squeeze(shifts_).astype(np.float32)
      slopes_ = np.squeeze(slopes_).astype(np.float32)
      mix_matrix_dump = np.append(np.squeeze(mixing_weights_), mixing_bias_[0]).astype(np.float32)
      ccm34_ = np.vstack((ccm_, ccm_bias_[np.newaxis, :]))

      save(ccm34_.T, os.path.join(args.checkpoint_dir, 'guide_ccm_f32_3x4.bin'))
      save(shifts_.T, os.path.join(args.checkpoint_dir, 'guide_shifts_f32_16x3.bin'))
      save(slopes_.T, os.path.join(args.checkpoint_dir, 'guide_slopes_f32_16x3.bin'))
      save(mix_matrix_dump, os.path.join(args.checkpoint_dir, 'guide_mix_matrix_f32_1x4.bin'))

    elif model_params['model_name'] == "HDRNetGaussianPyrNN":
      g = tf.get_default_graph()
      for lvl in range(3):
        conv1_w = g.get_tensor_by_name('inference/guide/level_{}/conv1/weights:0'.format(lvl))
        conv1_b = g.get_tensor_by_name('inference/guide/level_{}/conv1/BatchNorm/beta:0'.format(lvl))
        conv1_mu = g.get_tensor_by_name('inference/guide/level_{}/conv1/BatchNorm/moving_mean:0'.format(lvl))
        conv1_sigma = g.get_tensor_by_name('inference/guide/level_{}/conv1/BatchNorm/moving_variance:0'.format(lvl))
        conv1_eps = g.get_tensor_by_name('inference/guide/level_{}/conv1/BatchNorm/batchnorm/add/y:0'.format(lvl))
        conv2_w = g.get_tensor_by_name('inference/guide/level_{}/conv2/weights:0'.format(lvl))
        conv2_b = g.get_tensor_by_name('inference/guide/level_{}/conv2/biases:0'.format(lvl))

        conv1w_, conv1b_, conv1mu_, conv1sigma_, conv1eps_, conv2w_, conv2b_ = sess.run(
            [conv1_w, conv1_b, conv1_mu, conv1_sigma, conv1_eps, conv2_w, conv2_b])

        conv1b_ -= conv1mu_/np.sqrt((conv1sigma_+conv1eps_))
        conv1w_ = conv1w_/np.sqrt((conv1sigma_+conv1eps_))

        conv1w_ = np.squeeze(conv1w_.astype(np.float32))
        conv1b_ = np.squeeze(conv1b_.astype(np.float32))
        conv1b_ = conv1b_[np.newaxis, :]

        conv2w_ = np.squeeze(conv2w_.astype(np.float32))
        conv2b_ = np.squeeze(conv2b_.astype(np.float32))

        conv2 = np.append(conv2w_, conv2b_)
        conv1 = np.vstack([conv1w_, conv1b_])

        save(conv1.T, os.path.join(args.checkpoint_dir, 'guide_level{}_conv1.bin'.format(lvl)))
        save(conv2, os.path.join(args.checkpoint_dir, 'guide_level{}_conv2.bin'.format(lvl)))

    elif model_params['model_name'] in "HDRNetPointwiseNNGuide":
      g = tf.get_default_graph()
      conv1_w = g.get_tensor_by_name('inference/guide/conv1/weights:0')
      conv1_b = g.get_tensor_by_name('inference/guide/conv1/BatchNorm/beta:0')
      conv1_mu = g.get_tensor_by_name('inference/guide/conv1/BatchNorm/moving_mean:0')
      conv1_sigma = g.get_tensor_by_name('inference/guide/conv1/BatchNorm/moving_variance:0')
      conv1_eps = g.get_tensor_by_name('inference/guide/conv1/BatchNorm/batchnorm/add/y:0')
      conv2_w = g.get_tensor_by_name('inference/guide/conv2/weights:0')
      conv2_b = g.get_tensor_by_name('inference/guide/conv2/biases:0')

      conv1w_, conv1b_, conv1mu_, conv1sigma_, conv1eps_, conv2w_, conv2b_ = sess.run(
          [conv1_w, conv1_b, conv1_mu, conv1_sigma, conv1_eps, conv2_w, conv2_b])

      conv1b_ -= conv1mu_/np.sqrt((conv1sigma_+conv1eps_))
      conv1w_ = conv1w_/np.sqrt((conv1sigma_+conv1eps_))

      conv1w_ = np.squeeze(conv1w_.astype(np.float32))
      conv1b_ = np.squeeze(conv1b_.astype(np.float32))
      conv1b_ = conv1b_[np.newaxis, :]

      conv2w_ = np.squeeze(conv2w_.astype(np.float32))
      conv2b_ = np.squeeze(conv2b_.astype(np.float32))

      conv2 = np.append(conv2w_, conv2b_)
      conv1 = np.vstack([conv1w_, conv1b_])

      save(conv1.T, os.path.join(args.checkpoint_dir, 'guide_conv1.bin'))
      save(conv2, os.path.join(args.checkpoint_dir, 'guide_conv2.bin'))
# Freeze the graph

input_graph_path = MODEL_NAME+'.pbtxt'
checkpoint_path = './'+MODEL_NAME+'.ckpt'
input_saver_def_path = ""
input_binary = False
output_node_names = "output/dense/BiasAdd"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_frozen_graph_name = 'frozen_'+MODEL_NAME+'.pb'
output_optimized_graph_name = 'optimized_'+MODEL_NAME+'.pb'
clear_devices = True


freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                          input_binary, checkpoint_path, output_node_names,
                          restore_op_name, filename_tensor_name,
                          output_frozen_graph_name, clear_devices, "")



# Optimize for inference

input_graph_def = tf.GraphDef()
with tf.gfile.Open(output_frozen_graph_name, "r") as f:
    data = f.read()
    input_graph_def.ParseFromString(data)

output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        ["I"], # an array of the input node(s)
        ["O"], # an array of output nodes
Esempio n. 49
0
    def build(self, input_nodes=None, output_nodes=None):
        if input_nodes is None:
            input_nodes = self.gan.input_nodes()
        if output_nodes is None:
            output_nodes = self.gan.output_nodes()
        save_file_text = self.name+".pbtxt"
        build_file = os.path.expanduser("builds/"+save_file_text)
        def create_path(filename):
            return os.makedirs(os.path.expanduser(os.path.dirname(filename)), exist_ok=True)
        create_path(build_file)
        tf.train.write_graph(self.gan.session.graph, 'builds', save_file_text)
        inputs = [x.name.split(":")[0] for x in input_nodes]
        outputs = [x.name.split(":")[0] for x in output_nodes]

        with self.gan.session as sess:
            converter = tf.lite.TFLiteConverter.from_session(sess, self.gan.input_nodes(), self.gan.output_nodes())
            tflite_model = converter.convert()
            f = open("builds/"+ self.gan.name+".tflite", "wb")
            f.write(tflite_model)
            f.close()
        tf.reset_default_graph()
        self.gan.session.close()
        [print("Input: ", x) for x in self.gan.input_nodes()]
        [print("Output: ", y) for y in self.gan.output_nodes()]
        print("Written to builds/"+self.gan.name+".tflite")

        pbtxt_path = "builds/"+self.name +'.pbtxt'
        checkpoint_path = "saves/"+self.name +'/model.ckpt'
        input_saver_def_path = ""
        input_binary = False
        output_node_names = ",".join(outputs)
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_frozen_graph_name = 'builds/frozen_'+self.name +'.pb'
        output_optimized_graph_name = 'builds/optimized_'+self.name+'.pb'
        clear_devices = True

        freeze_graph.freeze_graph(pbtxt_path, input_saver_def_path,
          input_binary, checkpoint_path, output_node_names,
          restore_op_name, filename_tensor_name,
          output_frozen_graph_name, clear_devices, "")

        input_graph_def = tf.GraphDef()
        with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
            data = f.read()
            input_graph_def.ParseFromString(data)

        output_graph_def = optimize_for_inference_lib.optimize_for_inference(
                input_graph_def,
                inputs, # an array of the input node(s)
                outputs, # an array of output nodes
                tf.float32.as_datatype_enum)

        # Save the optimized graph

        f = tf.gfile.FastGFile(output_optimized_graph_name, "wb")
        f.write(output_graph_def.SerializeToString())
        f.flush()
        f.close()



        print("Saved generator to ", output_optimized_graph_name)

        print("Testing loading ", output_optimized_graph_name)
        with tf.gfile.FastGFile(output_optimized_graph_name, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
            #tflite_model = tf.lite.TFLiteConverter(graph_def, self.gan.input_nodes(), self.gan.output_nodes()).convert()
            #f = open("builds/"+ self.gan.name+".tflite", "wb")
            #f.write(tflite_model)
            #f.close()

        with tf.Session() as sess:
            for input in inputs:
                print("Input: ", input, sess.graph.get_tensor_by_name(input+":0"))
            for output in outputs:
                print("Output: ", output, sess.graph.get_tensor_by_name(output+":0"))
Esempio n. 50
0
        saver = tf.train.Saver()
        saver.save(sess, save_path=checkpoint_path)

        print('Checkpoint exported to {}'.format(checkpoint_path))

        tf.train.write_graph(sess.graph_def, export_base_path, graph_name,
                             as_text=not as_binary)

        print('Graph exported to {}'.format(graph_path))

        if do_freeze:
            print('Freezing graph...')
            freeze_graph.freeze_graph(
                input_graph=graph_path, input_saver='',
                input_binary=as_binary, input_checkpoint=checkpoint_path,
                output_node_names=output_node_name,
                restore_op_name='save/restore_all',
                filename_tensor_name='save/Const:0',
                output_graph=frozen_graph_path, clear_devices=True,
                initializer_nodes='')

            print('Frozen graph exported to {}'.format(frozen_graph_path))

            graph_path = frozen_graph_path

        if do_optimize:
            print('Optimizing graph...')
            input_graph_def = tf.GraphDef()

            with tf.gfile.Open(graph_path, 'rb') as f:
                data = f.read()
                input_graph_def.ParseFromString(data)