def strip_pruning_vars(checkpoint_dir, output_node_names, output_dir, filename):
  """Remove pruning-related auxiliary variables and ops from the graph.

  Accepts training checkpoints and produces a GraphDef in which the pruning vars
  and ops have been removed.

  Args:
    checkpoint_dir: Path to the checkpoints.
    output_node_names: The name of the output nodes, comma separated.
    output_dir: Directory where to write the graph.
    filename: Output GraphDef file name.

  Returns:
    None

  Raises:
    ValueError: if output_nodes_names are not provided.
  """
  if not output_node_names:
    raise ValueError(
        'Need to specify atleast 1 output node through output_node_names flag')
  output_node_names = output_node_names.replace(' ', '').split(',')

  initial_graph_def = strip_pruning_vars_lib.graph_def_from_checkpoint(
      checkpoint_dir, output_node_names)

  final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
      initial_graph_def, output_node_names)
  graph_io.write_graph(final_graph_def, output_dir, filename, as_text=False)
  logging.info('\nFinal graph written to %s', os.path.join(
      output_dir, filename))
def main(unused_args):
  if not gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  input_graph_def = graph_pb2.GraphDef()
  with gfile.Open(FLAGS.input, "rb") as f:
    data = f.read()
    if FLAGS.frozen_graph:
      input_graph_def.ParseFromString(data)
    else:
      text_format.Merge(data.decode("utf-8"), input_graph_def)

  output_graph_def = optimize_for_inference_lib.optimize_for_inference(
      input_graph_def,
      FLAGS.input_names.split(","),
      FLAGS.output_names.split(","),
      FLAGS.placeholder_type_enum,
      FLAGS.toco_compatible)

  if FLAGS.frozen_graph:
    f = gfile.FastGFile(FLAGS.output, "w")
    f.write(output_graph_def.SerializeToString())
  else:
    graph_io.write_graph(output_graph_def,
                         os.path.dirname(FLAGS.output),
                         os.path.basename(FLAGS.output))
  return 0
  def testStripUnusedMultipleInputs(self):
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that multiplies two input nodes.
    with ops.Graph().as_default():
      constant_node1 = constant_op.constant(1.0, name="constant_node1")
      constant_node2 = constant_op.constant(2.0, name="constant_node2")
      input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1")
      input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2")
      output_node = math_ops.multiply(
          input_node1, input_node2, name="output_node")
      math_ops.add(output_node, 2.0, name="later_node")
      sess = session.Session()
      output = sess.run(output_node)
      self.assertNear(6.0, output, 0.00001)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_binary = False
    input_node_names = "input_node1,input_node2"
    input_node_types = [
        dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum
    ]
    output_binary = True
    output_node_names = "output_node"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
                                             output_graph_path, output_binary,
                                             input_node_names,
                                             output_node_names,
                                             input_node_types)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(3, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Add", node.op)
        self.assertNotEqual("Sub", node.op)
        if node.name == input_node_names:
          self.assertTrue("shape" in node.attr)

      with session.Session() as sess:
        input_node1 = sess.graph.get_tensor_by_name("input_node1:0")
        input_node2 = sess.graph.get_tensor_by_name("input_node2:0")
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node,
                          feed_dict={input_node1: [10.0],
                                     input_node2: [-5.0]})
        self.assertNear(-50.0, output, 0.00001)
def main(unused_argv):
  # Model definition.
  g = ops.Graph()
  with g.as_default():
    images = array_ops.placeholder(
        dtypes.float32, shape=(1, None, None, 3), name='input_image')
    inception.inception_resnet_v2_base(images)

  graph_io.write_graph(g.as_graph_def(), cmd_args.graph_dir,
                       cmd_args.graph_filename)
 def _WriteGraph(self, run_params, gdef, graph_state):
   if graph_state == GraphState.ORIGINAL:
     label = "Original"
   elif graph_state == GraphState.CALIBRATE:
     label = "CalibEngine"
   elif graph_state == GraphState.INFERENCE:
     label = "InferEngine"
   graph_name = (
       self.__class__.__name__ + "_" + run_params.test_name + "_" + label +
       ".pbtxt")
   temp_dir = os.getenv("TRT_TEST_TMPDIR", self.get_temp_dir())
   if temp_dir:
     logging.info("Writing graph to %s/%s", temp_dir, graph_name)
     graph_io.write_graph(gdef, temp_dir, graph_name)
Beispiel #6
0
 def freeze_graph(sess, ckpt, output):
     print("Loading checkpoint...")
     saver = tf.train.Saver()
     saver.restore(sess, ckpt)
     print("Writing graph...")
     if not os.path.isdir("_Cache"):
         os.makedirs("_Cache")
     _dir = os.path.join("_Cache", "Model")
     saver.save(sess, _dir)
     graph_io.write_graph(sess.graph, "_Cache", "Model.pb", False)
     print("Freezing graph...")
     freeze_graph.freeze_graph(
         os.path.join("_Cache", "Model.pb"),
         "", True, os.path.join("_Cache", "Model"),
         output, "save/restore_all", "save/Const:0", "Frozen.pb", True, ""
     )
     print("Done")
Beispiel #7
0
    def saveModel(self, sess, outputDirectory = ""):
        from tensorflow.python.framework import graph_io
        from tensorflow.python.tools import freeze_graph

        input_graph_path = outputDirectory + "tfModel.pb"
        graph_io.write_graph(sess.graph, "./", input_graph_path)
    
        #create frozen version of graph for distribution
        input_saver_def_path = ""
        input_binary = False
        checkpoint_path = outputDirectory + "models/model.ckpt"
        output_node_names = "y_ph"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_graph_path = outputDirectory + "tfModel_frozen.pb"
        clear_devices = False
    
        freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                                  input_binary, checkpoint_path, output_node_names,
                                  restore_op_name, filename_tensor_name,
                                  output_graph_path, clear_devices, "")
    
        print("Frozen model (model and weights) saved in file: %s" % output_graph_path)
  def testStripUnused(self):
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single constant containing 1.0,
    # and that then multiplies it by 2.
    with ops.Graph().as_default():
      constant_node = constant_op.constant(1.0, name="constant_node")
      wanted_input_node = math_ops.subtract(constant_node,
                                            3.0,
                                            name="wanted_input_node")
      output_node = math_ops.multiply(
          wanted_input_node, 2.0, name="output_node")
      math_ops.add(output_node, 2.0, name="later_node")
      sess = session.Session()
      output = sess.run(output_node)
      self.assertNear(-4.0, output, 0.00001)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_binary = False
    output_binary = True
    output_node_names = "output_node"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    def strip(input_node_names):
      strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
                                               output_graph_path, output_binary,
                                               input_node_names,
                                               output_node_names,
                                               dtypes.float32.as_datatype_enum)

    with self.assertRaises(KeyError):
      strip("does_not_exist")

    with self.assertRaises(ValueError):
      strip("wanted_input_node:0")

    input_node_names = "wanted_input_node"
    strip(input_node_names)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(3, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Add", node.op)
        self.assertNotEqual("Sub", node.op)
        if node.name == input_node_names:
          self.assertTrue("shape" in node.attr)

      with session.Session() as sess:
        input_node = sess.graph.get_tensor_by_name("wanted_input_node:0")
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node, feed_dict={input_node: [10.0]})
        self.assertNear(20.0, output, 0.00001)
Beispiel #9
0
def single_worker_inference(infer_model,
                            ckpt,
                            inference_input_file,
                            inference_output_file,
                            hparams):
    """Inference with a single worker."""
    output_infer = inference_output_file

    # Read data
    infer_data = load_data(inference_input_file, hparams)
    print ("Batch size type:", type(hparams.infer_batch_size))
    with tf.Session(
            graph=infer_model.graph, config=utils.get_config_proto()) as sess:
        # revo debug
        # sess = tf_debug.TensorBoardDebugWrapperSession(sess, 'xy:6064')
        # initi table
        # sess.run(infer_model.insert_op[0])
        # sess.run(infer_model.insert_op[1])
        # sess.run(infer_model.insert_op[2])
        #
        loaded_infer_model = model_helper.load_model(
            infer_model.model, ckpt, sess, "infer", infer_model.insert_op)
        sess.run(
            infer_model.iterator.initializer,
            feed_dict={
                infer_model.src_placeholder: infer_data,
                infer_model.batch_size_placeholder: hparams.infer_batch_size
            })
        # Debug By Revo
        # value = sess.run(infer_model.iterator.source)
        # print ("Value:", value)
        # print ("Value,len:", len(value))
        # print ("Value,Type:", type(value))
        # print ("Value,Shape:", value.shape)
        # tmp_i = sess.run(infer_model.iterator)
        # print ("iterator:", tmp_i)
        # print ("iterator shape:", tmp_i.shape())
        # sys.exit()

        # print ("TEST")
        # # Initialize keys and values.
        # keys = tf.constant([1, 2, 3], dtype=tf.int64)
        # vals = tf.constant([1, 2, 3], dtype=tf.int64)
        # # Initialize hash table.
        # table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64, value_dtype=tf.int64, default_value=-1,
        #                                                 empty_key=0)
        # # Insert values to hash table and run the op.
        # insert_op = table.insert(keys, vals)
        # sess.run(insert_op)
        # # Print hash table lookups.
        # print(sess.run(table.lookup(keys)))
        # print("HERE2")

        # Saving Decoder model

        # Decode
        utils.print_out("# Start decoding ff3")
        # print ("indices:", hparams.inference_indices)

        if hparams.inference_indices:
            _decode_inference_indices(
                loaded_infer_model,
                sess,
                output_infer=output_infer,
                output_infer_summary_prefix=output_infer,
                inference_indices=hparams.inference_indices,
                tgt_eos=hparams.eos,
                subword_option=hparams.subword_option)
        else:
            nmt_utils.decode_and_evaluate(
                "infer",
                loaded_infer_model,
                sess,
                output_infer,
                ref_file=None,
                metrics=hparams.metrics,
                subword_option=hparams.subword_option,
                beam_width=hparams.beam_width,
                tgt_eos=hparams.eos,
                num_translations_per_input=hparams.num_translations_per_input)
        # saving model
        OUTPUT_FOLDER = '7.19'
        utils.print_out("Ouput Folder : " + OUTPUT_FOLDER)
        utils.print_out("# Saving Decoder model (Normal,ckpt) By Revo")
        loaded_infer_model.saver.save(sess, OUTPUT_FOLDER+"/current.ckpt")
        # save pb file
        graph_io.write_graph(sess.graph_def, OUTPUT_FOLDER, "current.graphdef")
        tf.train.export_meta_graph(filename=OUTPUT_FOLDER + '/current.meta')
        writer = tf.summary.FileWriter(OUTPUT_FOLDER, sess.graph)
        writer.close()
        # Frozen graph saving
        OUTPUT_FROZEN_FILE = 'nmt.pb'
        # OUTPUT_NAMES = ['index_to_string_Lookup', 'table_init', 'batch_iter_init']
        # maybe it is not utf8 as output
        OUTPUT_NODES = ['reverse_table_Lookup']
        utils.print_out("# Saving Decoder model (Frozen) By Revo")
        # extract method try
        # new_graph_def = tf.graph_util.extract_sub_graph(sess.graph_def, ["hash_table_2_Lookup"])
        #
        # remove train node
        utils.print_out("# Removed Training nodes and outputing graph_def")
        inference_graph = tf.graph_util.remove_training_nodes(sess.graph.as_graph_def())
        graph_io.write_graph(inference_graph, OUTPUT_FOLDER, "infer_model.graphdef")
        # This is oLd version freeze graph
        freeze_graph.freeze_graph("7.19/current.graphdef", "", False, "7.19/current.ckpt", "reverse_table_Lookup", "", "", OUTPUT_FOLDER + "/" + OUTPUT_FROZEN_FILE, True, "")
        #
        # frozen_graph = tf.graph_util.convert_variables_to_constants(sess, inference_graph, OUTPUT_NODES)
        # Normal
        frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, OUTPUT_NODES)
        # graph_io.write_graph(frozen_graph.graph_def, OUTPUT_FOLDER, "frozen.graphdef")
        # frozen_graph = tf.graph_util.convert_variables_to_constants(sess, new_graph_def, OUTPUT_NODES)
        #
        # tf.train.write_graph(frozen_graph, OUTPUT_FOLDER, OUTPUT_FROZEN_FILE, as_text=False)
        # TOCO with python
        utils.print_out("# Start converting into TOCO file.")
        converter = tf.contrib.lite.TocoConverter.from_frozen_graph(OUTPUT_FOLDER + "/" + OUTPUT_FROZEN_FILE, ['src_place'], OUTPUT_NODES)
        # try session way
        # input = sess.graph.get_tensor_by_name("src_place:0")
        # output = sess.graph.get_tensor_by_name("reverse_table_Lookup:0")
        # converter = tf.contrib.lite.TocoConverter.from_session(sess, [input], [output])
        #
        tflite_model = converter.convert()
        open(OUTPUT_FOLDER + "/converted_model.tflite", "wb").write(tflite_model)
sess = K.get_session()

if args.graph_def:
    f = args.output_graphdef_file
    tf.train.write_graph(sess.graph.as_graph_def(),
                         output_fld,
                         f,
                         as_text=True)
    print('saved the graph definition in ascii format at: ',
          osp.join(output_fld, f))

# convert variables to constants and save
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.tools.graph_transforms import TransformGraph
if args.quantize:
    transforms = ["quantize_weights", "quantize_nodes"]
    transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                           pred_node_names, transforms)
    constant_graph = graph_util.convert_variables_to_constants(
        sess, transformed_graph_def, pred_node_names)
else:
    constant_graph = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph,
                     output_fld,
                     args.output_model_file,
                     as_text=False)
print('saved the freezed graph (ready for inference) at: ',
      osp.join(output_fld, args.output_model_file))
Beispiel #11
0
input_size = 16
output_size = 2
input_shape = [1, input_size]
x1 = tf.placeholder(tf.float32, shape=input_shape)
x2 = tf.placeholder(tf.float32, shape=input_shape)

x = x1 + x2
w1 = tf.get_variable("w1", [input_size, output_size],
                     initializer=tf.random_normal_initializer())
b1 = tf.get_variable("b1", [output_size],
                     initializer=tf.random_normal_initializer())
x = tf.matmul(x, w1) + b1
y = tf.sigmoid(x)

pred_node_names = ["output"]
pred = [tf.identity(y, name=pred_node_names[0])]

sess = tf.Session()
sess.run(tf.global_variables_initializer())

constant_graph = graph_util.convert_variables_to_constants(
    sess, sess.graph.as_graph_def(), pred_node_names)

frozen = graph_util.remove_training_nodes(constant_graph)

graph_io.write_graph(frozen, ".", output_file, as_text=False)
print("saved the frozen graph (ready for inference) at: ", output_file)

data = np.random.standard_normal(input_shape)
np.save(input_data_file, data)
def freeze_graph(graph, session, output, save_pb_dir='.', save_pb_name='frozen_model.pb', save_pb_as_text=False):
    with graph.as_default():
        graphdef_inf = tf.graph_util.remove_training_nodes(graph.as_graph_def())
        graphdef_frozen = tf.graph_util.convert_variables_to_constants(session, graphdef_inf, output)
        graph_io.write_graph(graphdef_frozen, save_pb_dir, save_pb_name, as_text=save_pb_as_text)
        return graphdef_frozen
def keras_to_tensorflow(num_output=1,
                        quantize=False,
                        input_fld=".",
                        output_fld=".",
                        input_model_file='final_model.hdf5',
                        output_model_file="",
                        output_node_prefix="output_node"):
    """
    Input arguments:

    num_output: this value has nothing to do with the number of classes, batch_size, etc.,
    and it is mostly equal to 1. If the network is a **multi-stream network**
    (forked network with multiple outputs), set the value to the number of outputs.

    quantize: if set to True, use the quantize feature of Tensorflow
    (https://www.tensorflow.org/performance/quantization) [default: False]

    input_fld: directory holding the keras weights file [default: .]

    output_fld: destination directory to save the tensorflow files [default: .]

    input_model_file: name of the input weight file [default: 'model.h5']

    output_model_file: name of the output weight file [default: args.input_model_file + '.pb']

    output_node_prefix: the prefix to use for output nodes. [default: output_node]

    """

    # initialize
    from keras.models import load_model
    import tensorflow as tf
    from pathlib import Path
    from keras import backend as K

    output_fld = input_fld if output_fld == '' else output_fld
    if output_model_file == '':
        output_model_file = str(Path(input_model_file).name) + '.pb'
    Path(output_fld).mkdir(parents=True, exist_ok=True)
    weight_file_path = str(Path(input_fld) / input_model_file)

    K.set_learning_phase(0)
    K.set_image_data_format('channels_last')

    # Load keras model and rename output
    try:
        net_model = load_model(weight_file_path)
    except ValueError as err:
        print(
            '''Input file specified ({}) only holds the weights, and not the model definition.
        Save the model using mode.save(filename.h5) which will contain the network architecture
        as well as its weights. 
        If the model is saved using model.save_weights(filename.h5), the model architecture is 
        expected to be saved separately in a json format and loaded prior to loading the weights.
        Check the keras documentation for more details (https://keras.io/getting-started/faq/)'''
            .format(weight_file_path))
        raise err
    pred = [None] * num_output
    pred_node_names = [None] * num_output
    for i in range(num_output):
        pred_node_names[i] = output_node_prefix + str(i)
        pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
    print('output nodes names are: ', pred_node_names)

    sess = K.get_session()

    # convert variables to constants and save
    from tensorflow.python.framework import graph_util
    from tensorflow.python.framework import graph_io
    if quantize:
        from tensorflow.tools.graph_transforms import TransformGraph
        transforms = ["quantize_weights", "quantize_nodes"]
        transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                               pred_node_names, transforms)
        constant_graph = graph_util.convert_variables_to_constants(
            sess, transformed_graph_def, pred_node_names)
    else:
        constant_graph = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(), pred_node_names)
    graph_io.write_graph(constant_graph,
                         output_fld,
                         output_model_file,
                         as_text=False)
    print('saved the freezed graph (ready for inference) at: ',
          str(Path(output_fld) / output_model_file))
Beispiel #14
0
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
keras.backend.tensorflow_backend.set_session(get_session())

model = models.load_model(model_path, backbone_name='resnet50')
graph_z = tf.get_default_graph()

out_node_name = ""
for i in range(len(model.outputs)):
    output_name = model.outputs[i].name.split(':')[0]
    tf.identity(model.outputs[i], output_name)
    out_node_name = out_node_name + "," + output_name
    print(out_node_name)
out_node_name = out_node_name[1:]
# get rid of the comma
K.set_learning_phase(0)
model_dir = './'
model_filename = 'retinanet.pb'
sess = K.get_session()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
checkpoint_path = saver.save(
    sess,
    '/home/ubuntu/orlink1/orlink/app/modules/orlink_module/saved_ckpt',
    global_step=0)
nodes = tf.get_default_graph().as_graph_def().node
graph_io.write_graph(sess.graph, '.', 'tmp1.pb')
freeze_graph.freeze_graph('./tmp1.pb', '', False, checkpoint_path,
                          out_node_name, "save/restore_all", "save/Const:0",
                          model_dir + model_filename, False, "")

print "saving"
output_model_dir = "tf_model"

K.set_learning_phase(0)
sess = K.get_session()

test_model = models.load_model(input_model_path)
orig_output_node_names = [node.op.name for node in test_model.outputs]

constant_graph = graph_util.convert_variables_to_constants(
    sess,
    sess.graph.as_graph_def(),
    orig_output_node_names)

graph_io.write_graph(
    constant_graph,
    output_model_dir,
    output_model_name,
    as_text=False)

from keras.models import load_model
model = load_model('top_layers.iv3.hdf5')

from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input

#Test DataSet Generator with Augmentation
test_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

test_flow = test_generator.flow_from_directory(
    'test',
    shuffle=False,
Beispiel #16
0
  def build_model(self, model_fn, eval_model_fn, params, config):
    """Build the TPU model for training and eval."""
    tf.logging.info("LowLevelRunner: build_model method for training and eval.")

    def tpu_train_step(loss):
      """Generate the TPU graph."""
      del loss
      values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
      unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
                                                      values)
      features = unflattened_inputs["features"]
      labels = unflattened_inputs["labels"]
      estimator_spec = model_fn(
          features,
          labels,
          tf.estimator.ModeKeys.TRAIN,
          params=params,
          config=config)
      loss, train_op = estimator_spec.loss, estimator_spec.train_op
      self.scaffold_fn = estimator_spec.scaffold_fn
      with tf.control_dependencies([train_op]):
        return tf.identity(loss)

    @tpu_function.on_device_training_loop
    def train_loop():
      return training_loop.repeat(
          self.train_steps_tensor, tpu_train_step, [_INITIAL_LOSS])

    def tpu_eval_step():
      """Generate the TPU graph."""
      values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
      unflattened_inputs = data_nest.pack_sequence_as(
          self.eval_feature_structure, values)
      features = unflattened_inputs["features"]
      estimator_spec = eval_model_fn(
          features,
          None,
          tf.estimator.ModeKeys.PREDICT,
          params=params,
          config=config)
      for k, v in six.iteritems(estimator_spec.predictions):
        self.outfeed_names.append(k)
        self.outfeed_tensors.append(v)

      with tf.device(low_level_utils.device_for_tpu_core(self._get_host(0))):
        outfeed_enqueue_ops = tpu_ops.outfeed_enqueue_tuple(
            self.outfeed_tensors)
      with tf.control_dependencies([outfeed_enqueue_ops]):
        return tf.no_op()

    @tpu_function.on_device_training_loop
    def eval_loop():
      return training_loop.repeat(self.eval_steps_tensor, tpu_eval_step, [])

    def train_eval_step():
      with tf.control_dependencies(train_loop()):
        return eval_loop()

    @tpu_function.on_device_training_loop
    def train_eval_loop():
      return training_loop.repeat(self.num_epochs_tensor, train_eval_step, [])

    with self.graph.as_default():
      (self.compile_op, self.train_eval_op,) = tpu.split_compile_and_shard(
          train_eval_loop,
          inputs=[],
          num_shards=FLAGS.tpu_num_shards,
          outputs_from_all_shards=False,
      )
      if self.scaffold_fn:
        self.scaffold_fn()
      self.sess.run(tf.global_variables_initializer())
      self.sess.run(tf.local_variables_initializer())

      graph_io.write_graph(
          self.graph.as_graph_def(add_shapes=True),
          FLAGS.output_dir,
          "graph.pbtxt")

    def create_dequeue_ops(host_id):
      """Create outfeed dequeue ops."""
      dequeue_ops = []
      tensor_dtypes = []
      tensor_shapes = []
      for v in self.outfeed_tensors:
        dequeue_ops.append([])
        tensor_dtypes.append(v.dtype)
        tensor_shapes.append(v.shape)
      for i in range(FLAGS.tpu_num_shards_per_host):
        with tf.device(
            low_level_utils.device_for_host(self._get_host(host_id))):
          outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
              dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i)
          for j, item in enumerate(outfeed_tensors):
            dequeue_ops[j].append(item)
      for j in range(len(outfeed_tensors)):
        dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
      return dequeue_ops

    with self.output_graph.as_default():
      # Get dequeue ops from each hosts.
      for i in range(0, self.num_hosts):
        self.dequeue_ops.append({})
        tf.logging.info(
            "LowLevelRunner: get dequeue ops for host: %d.", i)
        for j, dequeue_tenor in enumerate(create_dequeue_ops(i)):
          self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor
Beispiel #17
0
with graph.as_default():
    with tf.variable_scope('net'):
        net_inp = tf.placeholder(tf.float32, INPUT_SIZE, name='input')
        net_out = model.model(net_inp, is_training=False)
    saver = tf.train.Saver()

## Create TF Session and Load Snapshot
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(graph=graph, config=sess_config)
snapshot_fpath = tf.train.latest_checkpoint(MODEL_PATH)
print snapshot_fpath
saver.restore(sess, snapshot_fpath)

## Freeze Graph
graphdef_inf = tf.graph_util.remove_training_nodes(graph.as_graph_def())

graphdef_frozen = tf.graph_util.convert_variables_to_constants(
    sess, graphdef_inf, OUTPUT_NAMES)

graph_io.write_graph(graphdef_frozen, './', FROZEN_FPATH, as_text=False)

## List fronze nodes
[x.name for x in graphdef_frozen.node]

## Export frozen graph for visualization
graph_frozen = tf.Graph()
with graph_frozen.as_default():
    tf.import_graph_def(graphdef_frozen)
_ = tf.summary.FileWriter('output/vggA_BN_frozen/', graph_frozen)
Beispiel #18
0
def export_model_to_tensorflow(path_to_trained_keras_model: str):
    print("Loading model for exporting to Protocol Buffer format...")
    model = keras.models.load_model(path_to_trained_keras_model)

    sess = K.get_session()

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = os.path.abspath(os.path.join(
        "export", "simple"))  # where to save the exported graph
    os.makedirs(export_path)
    checkpoint_state_name = "checkpoint_state"
    export_version = 1  # version number (integer)
    saver = tensorflow.train.Saver(sharded=True, name=checkpoint_state_name)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                  scores_tensor=model.output)

    # # Version 1 of exporter
    # model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)
    # model_exporter.export(export_path, tensorflow.constant(export_version), sess)
    #
    # # Version 2 of exporter
    # tensorflow.train.write_graph(sess.graph.as_graph_def(), logdir=".", name="simple.pbtxt", as_text=True)

    # Version 3 with Freezer from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"
    saver_write_version = saver_pb2.SaverDef.V2

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    saver = tensorflow.train.Saver(write_version=saver_write_version)
    checkpoint_path = saver.save(sess,
                                 export_path,
                                 global_step=0,
                                 latest_filename=checkpoint_state_name)
    graph_io.write_graph(sess.graph, export_path, input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(export_path, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node/Softmax"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(export_path, output_graph_name)
    clear_devices = False
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    shutil.copy(os.path.join("export", "simple", "output_graph.pb"),
                output_graph_name)
    shutil.rmtree("export")
    print("Exported model: {0}".format(os.path.abspath(output_graph_name)))
Beispiel #19
0
            session_init=get_model_loader(args.load), \
            input_names=MODEL.get_inference_tensor_names()[0], \
            output_names=MODEL.get_inference_tensor_names()[1])
    if args.evaluate:
        assert args.evaluate.endswith('.json') or args.evaluate.endswith(
            '.npz'), args.evaluate
        if args.evalfromjson:
            ret = print_evaluation_scores(args.evaluate)
        else:
            ret = do_evaluate(pred_config, args.evaluate, batch_size)
        print('mIoU = {:.3f}'.format(ret['miou']))
    else:
        pred = OfflinePredictor(pred_config)
        if args.export_graph:
            from tensorflow.python.framework import graph_io
            export_path, export_name = os.path.split(args.export_graph)
            graph_io.write_graph(pred.sess.graph,
                                 export_path,
                                 export_name + 'txt',
                                 as_text=True)
            graph_io.write_graph(pred.sess.graph,
                                 export_path,
                                 export_name,
                                 as_text=False)
        elif args.predict:
            do_predict(pred, args.predict)
        # if args.pred_video:
        #     predict_video(pred, args.predict)
        # else:
        #     do_predict(pred, args.predict)
        f.write(y_proto.SerializeToString())

init = tf.global_variables_initializer()
with tf.Session() as sess:
    init.run()
    for epoch in range(args.epochs):
        print('Epoch: {}'.format(epoch))
        for i in range(x_train.shape[0] // args.batch_size):
            batch_indices = np.random.randint(x_train.shape[0],
                                              size=args.batch_size)
            x_batch = x_train[batch_indices]
            y_batch = y_train[batch_indices]
            sess.run(train_op,
                     feed_dict={
                         input_layer: x_batch,
                         label_layer: y_batch
                     })
        acc_test = accuracy.eval(feed_dict={
            input_layer: x_test,
            label_layer: y_test
        })
        #acc_train = accuracy.eval(feed_dict={
        #input_layer: x_train, label_layer: y_train})
        print("Test accuracy:", acc_test)
        #print("Train accuracy:", acc_train)

    constant_graph = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), ['probabilities_out', 'gradient_out'])

    graph_io.write_graph(constant_graph, '.', args.output, as_text=False)
weight_file_path = osp.join(input_fld, weight_file)

K.set_learning_phase(0)
net_model = load_model(weight_file_path)


print('input is :', net_model.input.name)
print ('output is:', net_model.output.name)

sess = K.get_session()

frozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])

from tensorflow.python.framework import graph_io

graph_io.write_graph(frozen_graph, output_fld, output_graph_name, as_text=False)

print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))


# --------

from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import  build_signature_def, predict_signature_def
from tensorflow.contrib.session_bundle import exporter
export_path = 'folder_to_export'
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'images': net_model.input},
                                  outputs={'scores': net_model.output})
Beispiel #22
0
# Load keras model and rename output
K.set_learning_phase(0)
keras_model = load_model(keras_model_path)

pred = [None] * num_output
pred_node_names = [None] * num_output
for i in range(num_output):
    pred_node_names[i] = prefix_output_node_names_of_final_network + str(i)
    pred[i] = tf.identity(keras_model.output[i], name=pred_node_names[i])
print('Output nodes names: ', pred_node_names)

# [optional] write graph definition in ascii
sess = K.get_session()
if write_graph_def_ascii_flag:
    f = tensorflow_graph_name + '.ascii'
    tf.train.write_graph(sess.graph.as_graph_def(),
                         output_dir,
                         f,
                         as_text=True)
    print('Saved the graph definition: ', osp.join(output_dir, f))

# convert variables to constants and save
constant_graph = graph_util.convert_variables_to_constants(
    sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph,
                     output_dir,
                     tensorflow_graph_name,
                     as_text=False)

print('Saved the TensorFlow graph: ',
      osp.join(output_dir, tensorflow_graph_name))
Beispiel #23
0
def freeze_model():
    feed = np.random.random((1, 320, 400, 3))

    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, (None, 320, 400, 3))
        # model = ConvModule(64)
        model = SegModel()
        # model.load_weights(model_save_file)
        adam = tf.train.AdamOptimizer()
        global_step = tf.train.get_or_create_global_step()

        checkpoint = tfe.Checkpoint(model=model,
                                    optimizer=adam,
                                    step_counter=global_step)
        logits = model(x)

        with tf.Session() as sess:
            # checkpoint.restore(tf.train.latest_checkpoint(flags.model_dir))#.assert_consumed().run_restore_ops()
            print("Testing")
            sess.run(tf.global_variables_initializer())
            y_result = sess.run(logits, feed_dict={x: feed})

            # test speed
            start = time.time()
            steps = 100
            for i in range(steps):
                y_result = sess.run(logits, feed_dict={x: feed})
            duration = (time.time() - start) / steps
            print("average duration:", round(duration, 4))

            print(y_result.shape)

            # for op in tf.get_default_graph().get_operations():
            # print(op.name)
            for input in model.inputs:
                print(input)

            for out in model.outputs:
                print(out.op.name)
            # print(model.output)

            # # Now, let's use the Tensorflow backend to get the TF graphdef and frozen graph
            # saver = tf.train.Saver()
            # # save model weights in TF checkpoint
            # checkpoint_path = saver.save(sess, "./models/snapshot", global_step=0, latest_filename='checkpoint_state')

            # train_graph = sess.graph
            # inference_graph = tf.graph_util.remove_training_nodes(train_graph.as_graph_def())
            #
            # graph_io.write_graph(inference_graph, '.', "./models/keras_graphdef.pb")

            # frozen_graph = freeze_session(sess, output_names=["u_net/conv2d/Reshape_1"])
            frozen_graph = freeze_session(
                sess, output_names=["dilated_cnn/conv2d_1/Reshape_1"])

            graph_io.write_graph(frozen_graph,
                                 '.',
                                 "./models/keras_frozen_model.pb",
                                 as_text=False)
    """This doesn't seem to work at the moment, so using freeze_session instead"""
    # freeze_graph.freeze_graph(
    #   "./models/keras_graphdef.pb",
    #   '',
    #   False,
    #   checkpoint_path,
    #   "dilated_cnn/conv2d_1/Reshape_1",
    #   "save/restore_all",
    #   "save/Const:0",
    #   "./models/keras_frozen_model.pb",
    #   False,
    #   ""
    # )

    print("Global_step:", global_step)
  def testSinglePartitionedVariable(self):
    """Ensures partitioned variables fail cleanly with freeze graph."""
    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # Create a graph with partition variables. When weights are partitioned into
    # a single partition, the weights variable is followed by a identity ->
    # identity (an additional identity node).
    partitioner = partitioned_variables.fixed_size_partitioner(1)
    with ops.Graph().as_default():
      with variable_scope.variable_scope("part", partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros(
            (batch_size, height, width, depth), name="input1")
        input2 = array_ops.zeros(
            (batch_size, height, width, depth), name="input2")

        num_nodes = depth
        filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
        filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
        conv = nn.conv2d(
            input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
        node = math_ops.add(conv, input2, name="test/add")
        node = nn.relu6(node, name="test/relu6")

      # Save graph and checkpoints.
      sess = session.Session()
      sess.run(variables.global_variables_initializer())

      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

      # Ensure this graph has partition variables.
      self.assertTrue([
          tensor.name.split(":")[0]
          for op in sess.graph.get_operations()
          for tensor in op.values()
          if re.search(r"/part_\d+/", tensor.name)
      ])

    # Test freezing graph doesn't make it crash.
    output_node_names = "save/restore_all"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    with self.assertRaises(ValueError):
      freeze_graph.freeze_graph_with_def_protos(
          input_graph_def=sess.graph_def,
          input_saver_def=None,
          input_checkpoint=checkpoint_path,
          output_node_names=output_node_names,
          restore_op_name="save/restore_all",  # default value
          filename_tensor_name="save/Const:0",  # default value
          output_graph=output_graph_path,
          clear_devices=False,
          initializer_nodes="")
def frozen_graph_maker(checkpoints_dir, model_name, output_graph):
    # with tf.Session(graph=tf.Graph()) as sess:
    #     tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir)
    #     output_nodes = [n.name for n in tf.get_default_graph().as_graph_def().node]
    #     gd = sess.graph.as_graph_def()
    idx = 0
    checkpoints = [
        500, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
        100000, 0
    ]
    #checkpoint_file_format = '{}/{}_{}.{}'
    checkpoint_file_format = '{}/{}_{}.{}'
    #.format(checkpoints_dir,model_name,idx,suffix) # checkpoints_dir+"/"+model_name++str(0)+
    while os.path.isfile(
            checkpoint_file_format.format(checkpoints_dir, model_name,
                                          checkpoints[idx], "ckpt.meta")):
        print("Handling checkpoint #", idx, " = ", checkpoints[idx])
        with tf.Session() as sess:
            saver = tf.train.import_meta_graph(
                checkpoint_file_format.format(
                    checkpoints_dir, model_name, checkpoints[idx],
                    "ckpt.meta"))  #/tmp/model.ckpt.meta')
            saver.restore(
                sess,
                checkpoint_file_format.format(checkpoints_dir, model_name,
                                              checkpoints[idx], "ckpt"))
            output_nodes = [
                n.name for n in tf.get_default_graph().as_graph_def().node
            ]
            gd = sess.graph.as_graph_def()
            # fix nodes
            for node in gd.node:
                if node.op == 'RefSwitch':
                    node.op = 'Switch'
                    for index in range(len(node.input)):
                        if 'moving_' in node.input[index]:
                            node.input[index] = node.input[index] + '/read'
                elif node.op == 'RefEnter':
                    node.op = 'Enter'
                    for index in range(len(node.input)):
                        if 'moving_' in node.input[index]:
                            node.input[index] = node.input[index] + '/read'
                elif node.op == 'AssignSub':
                    node.op = 'Sub'
                    if 'use_locking' in node.attr: del node.attr['use_locking']
                elif node.op == 'AssignAdd':
                    node.op = 'Add'
                    if 'use_locking' in node.attr: del node.attr['use_locking']
                elif node.op == 'Assign':
                    node.op = 'Identity'
                    if 'use_locking' in node.attr: del node.attr['use_locking']
                    if 'validate_shape' in node.attr:
                        del node.attr['validate_shape']
                    if len(node.input) == 2:
                        # input0: ref: Should be from a Variable node. May be uninitialized.
                        # input1: value: The value to be assigned to the variable.
                        node.input[0] = node.input[1]
                        del node.input[1]

            whitelist_names = []
            for node in gd.node:
                if (node.name.startswith('InceptionResnet')
                        or node.name.startswith('embeddings')
                        or node.name.startswith('image_batch')
                        or node.name.startswith('label_batch')
                        or node.name.startswith('phase_train')
                        or node.name.startswith('Logits')):
                    whitelist_names.append(node.name)

            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                gd,
                # output_nodes  # The output node names are used to select the usefull nodes
                ["add_1", "Sub"])
            # Finally we serialize and dump the output graph to the filesystem
            # print(output_nodes[-1])

            graph_io.write_graph(output_graph_def,
                                 output_graph,
                                 'output_graph_{}.pb'.format(idx),
                                 as_text=False)
            # with tf.gfile.GFile(output_graph, "wb") as f:
            #     f.write(output_graph_def.SerializeToString())
            idx += 1
  def _testFreezeGraph(self, saver_write_version):

    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    with ops.Graph().as_default():
      variable_node = variables.VariableV1(1.0, name="variable_node")
      output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
      sess = session.Session()
      init = variables.global_variables_initializer()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = saver_lib.Saver(write_version=saver_write_version)
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(
        input_graph_path,
        input_saver_def_path,
        input_binary,
        checkpoint_path,
        output_node_names,
        restore_op_name,
        filename_tensor_name,
        output_graph_path,
        clear_devices,
        "",
        "",
        "",
        checkpoint_version=saver_write_version)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("VariableV2", node.op)
        self.assertNotEqual("Variable", node.op)

      with session.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
  def initialize(self, model_fn, input_fn, eval_input_fn):
    """Build graph and do initialization for training."""
    tf.logging.info("TrainAndEvalLowLevelRunner: initialize method")

    self.num_hosts = (
        self.num_shards * self.num_cores_per_replica //
        self.train_params["cores_per_worker"])
    for i in range(self.num_hosts):
      self.build_enqueue_ops(input_fn, self.train_params, self.num_hosts, i,
                             self.iterations_per_loop, True)
      self.build_enqueue_ops(eval_input_fn, self.eval_params, self.num_hosts, i,
                             self.eval_steps, False)

    def infeed_thread_fn():
      """Build and infeed session.run calls in a background thread."""
      for cur_epoch in range(self.total_epoch):
        tf.logging.info("Start to infeed train batches for epoch %d", cur_epoch)
        self.input_sess.run([self.enqueue_ops])
        tf.logging.info("Start to infeed eval batches for epoch %d", cur_epoch)
        self.input_sess.run([self.eval_enqueue_ops])
      tf.logging.info("infeed thread exited.")

    def tpu_train_step(loss):
      """Generate the TPU graph."""
      del loss
      values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
      features, labels = self.input_flattener.unflatten_features_and_labels(
          values)
      estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
                                self.train_params)
      loss, train_op = estimator_spec.loss, estimator_spec.train_op
      self.scaffold_fn = estimator_spec.scaffold_fn
      with tf.control_dependencies([train_op]):
        return tf.identity(loss)

    @tpu_function.on_device_training_loop
    def train_loop():
      return tf.contrib.tpu.repeat(self.iterations_per_loop, tpu_train_step,
                                   [_INITIAL_LOSS])

    def tpu_eval_step():
      """Generate the TPU graph."""
      values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
      (features,
       _) = self.eval_input_flattener.unflatten_features_and_labels(values)
      estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,
                                self.eval_params)
      for k, v in six.iteritems(estimator_spec.predictions):
        self.outfeed_names.append(k)
        self.outfeed_tensors.append(v)

      with tf.device(runner_utils.device_for_tpu_core(self._get_host(0))):
        outfeed_enqueue_ops = tf.contrib.tpu.outfeed_enqueue_tuple(
            self.outfeed_tensors)
      with tf.control_dependencies([outfeed_enqueue_ops]):
        return tf.no_op()

    @tpu_function.on_device_training_loop
    def eval_loop():
      return tf.contrib.tpu.repeat(self.eval_steps, tpu_eval_step, [])

    def train_eval_step():
      with tf.control_dependencies(train_loop()):
        return eval_loop()

    @tpu_function.on_device_training_loop
    def train_eval_loop():
      return tf.contrib.tpu.repeat(
          self.total_epoch if self.train_params["all_in_one_session"] else 1,
          train_eval_step, [])

    def create_dequeue_ops(host_id):
      """Create outfeed dequeue ops."""
      dequeue_ops = []
      tensor_dtypes = []
      tensor_shapes = []
      for v in self.outfeed_tensors:
        dequeue_ops.append([])
        tensor_dtypes.append(v.dtype)
        tensor_shapes.append(v.shape)
      for i in range(self.eval_params["replicas_per_worker"]):
        with tf.device(runner_utils.device_for_host(self._get_host(host_id))):
          if self.use_spatial_partition:
            replica_id = self.device_assignment.lookup_replicas(host_id, 0)[i]
            ordinal = self.device_assignment.tpu_ordinal(
                replica=replica_id, logical_core=0)
          else:
            ordinal = i
          outfeed_tensors = tf.contrib.tpu.outfeed_dequeue_tuple(
              dtypes=tensor_dtypes,
              shapes=tensor_shapes,
              device_ordinal=ordinal)
          for j, item in enumerate(outfeed_tensors):
            dequeue_ops[j].append(item)
      for j in range(len(outfeed_tensors)):
        dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
      return dequeue_ops

    with self.train_eval_graph.as_default():
      (self.train_eval_compile_op,
       self.train_eval_op) = tpu.split_compile_and_shard(
           train_eval_loop,
           inputs=[],
           num_shards=self.train_params["num_shards"],
           outputs_from_all_shards=False,
           device_assignment=self.device_assignment
       )
      for i in range(self.num_hosts):
        self.dequeue_ops.append({})
        tf.logging.info(
            "TrainAndEvalLowLevelRunner: get dequeue ops for host:%d", i)
        host_dequeue_ops = create_dequeue_ops(i)
        for j, dequeue_tenor in enumerate(host_dequeue_ops):
          self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor
      if self.scaffold_fn:
        self.scaffold_fn()
      global_initializer = tf.global_variables_initializer()
      local_initializer = tf.local_variables_initializer()
      graph_io.write_graph(
          self.train_eval_graph.as_graph_def(add_shapes=True), self.model_dir,
          "graph.pbtxt")
      self.saver = tf.train.Saver()

    # Build tpu train model session and initialize graph
    self.train_eval_sess = tf.Session(
        self.master,
        graph=self.train_eval_graph,
        config=self.session_config)

    self.train_eval_sess.run(global_initializer)
    self.train_eval_sess.run(local_initializer)
    # Compiles the train program.
    self.train_eval_sess.run([self.train_eval_compile_op])

    # Complete infeed graph generation and session.run calls
    self.input_sess = tf.Session(
        self.master,
        graph=self.input_graph,
        config=self.session_config)
    self.input_sess.run(self.dataset_initializer)
    self.input_sess.run(self.eval_dataset_initializer)
    self.infeed_thread = threading.Thread(target=infeed_thread_fn)

    # Starts the clock.
    mlp_log.mlperf_print(key="init_stop", value=None)
    mlp_log.mlperf_print(key="run_start", value=None)
    self.infeed_thread.start()
Beispiel #28
0
    def tf_run_const_folding(self, file):
        print("run const folding----------------------------")
        tf.reset_default_graph()
        graph_def, graph = self.import_graph(file)

        print()
        if (self.debug):
            print('Placeholders:')
        assert graph is not None
        ops = graph.get_operations()  # type: Iterable[tf.Operation]
        input_nodes = []
        last_nodes = []
        for op in ops:
            if op.type == 'Placeholder':
                for tensor in op.outputs:
                    if (self.debug):
                        print('- {0:20s} {1}'.format("Tensor", tensor.name))
                    input_nodes.append(tensor.name)

        if (self.debug):
            print()
            print('Sinks (operations without outputs):')
        last_outputs = []
        num_nodes = len(ops)
        name2nodeIdx_map = {}
        for i in range(num_nodes):
            name2nodeIdx_map[ops[i].name] = i
        node_outputs_ = [[] for i in range(num_nodes)]
        for n in range(num_nodes):
            op = ops[n]
            pending_count = len(op.inputs)
            for i in range(pending_count):
                input_name_id = op.inputs[i].name.split(':')
                node_outputs_[name2nodeIdx_map[input_name_id[0]]].append(n)
        for n in range(num_nodes):
            if len(node_outputs_[n]) == 0 and ops[n].type != 'NoOp':
                if (self.debug):
                    print('- {0:20s} {1}'.format(ops[n].type, ops[n].name))
                for m in range(len(ops[n].inputs)):
                    if (self.debug):
                        print('<-in-- {0:20s}'.format(ops[n].inputs[m].name))
                    last_outputs.append(ops[n].inputs[m].name)
            '''
            if len(node_outputs_[n]) == 0 and ops[n].type == 'NoOp':
                for m in range(len(ops[n].control_inputs)):
                    print('<-in-^ {0:20s}'.format(ops[n].control_inputs[m].name))
                    last_outputs.append(ops[n].control_inputs[m].name)
            '''
        print(input_nodes)
        print(last_outputs)
        g_def_const = tf.import_graph_def(graph_def, name="")
        g_def_const = graph_transforms.TransformGraph(
            graph_def, input_nodes, last_outputs,
            ["fold_constants", "strip_unused_nodes"])

        print()
        self.folded_graph = file[:-3] + ".const_folded.pb"
        print("Saving Const-folded Graph... as " + self.folded_graph)
        graph_io.write_graph(as_text=False,
                             name=self.folded_graph,
                             logdir="./",
                             graph_or_graph_def=g_def_const)
        print("Finished.")
Beispiel #29
0
def export_scoped_meta_graph(filename=None,
                             graph_def=None,
                             graph=None,
                             export_scope=None,
                             as_text=False,
                             unbound_inputs_col_name="unbound_inputs",
                             clear_devices=False,
                             saver_def=None,
                             clear_extraneous_savers=False,
                             strip_default_attrs=False,
                             save_debug_info=False,
                             **kwargs):
    """Returns `MetaGraphDef` proto. Optionally writes it to filename.

  This function exports the graph, saver, and collection objects into
  `MetaGraphDef` protocol buffer with the intention of it being imported
  at a later time or location to restart training, run inference, or be
  a subgraph.

  Args:
    filename: Optional filename including the path for writing the
      generated `MetaGraphDef` protocol buffer.
    graph_def: `GraphDef` protocol buffer.
    graph: The `Graph` to export. If `None`, use the default graph.
    export_scope: Optional `string`. Name scope under which to extract
      the subgraph. The scope name will be stripped from the node definitions
      for easy import later into new name scopes. If `None`, the whole graph
      is exported.
    as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
    unbound_inputs_col_name: Optional `string`. If provided, a string collection
      with the given name will be added to the returned `MetaGraphDef`,
      containing the names of tensors that must be remapped when importing the
      `MetaGraphDef`.
    clear_devices: Boolean which controls whether to clear device information
      before exporting the graph.
    saver_def: `SaverDef` protocol buffer.
    clear_extraneous_savers: Remove any Saver-related information from the
        graph (both Save/Restore ops and SaverDefs) that are not associated
        with the provided SaverDef.
    strip_default_attrs: Set to true if default valued attributes must be
      removed while exporting the GraphDef.
    save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
      which in the same directory of filename and with `_debug` added before the
      file extension.
    **kwargs: Optional keyed arguments, including meta_info_def and
        collection_list.

  Returns:
    A `MetaGraphDef` proto and dictionary of `Variables` in the exported
    name scope.

  Raises:
    ValueError: When the `GraphDef` is larger than 2GB.
    ValueError: When executing in Eager mode and either `graph_def` or `graph`
      is undefined.
  """
    if context.executing_eagerly() and not (graph_def is not None
                                            and graph is not None):
        raise ValueError(
            "Exporting/importing meta graphs is not supported when "
            "Eager Execution is enabled.")
    graph = graph or ops.get_default_graph()

    exclude_nodes = None
    unbound_inputs = []
    if export_scope or clear_extraneous_savers or clear_devices:
        if graph_def:
            new_graph_def = graph_pb2.GraphDef()
            new_graph_def.versions.CopyFrom(graph_def.versions)
            new_graph_def.library.CopyFrom(graph_def.library)

            if clear_extraneous_savers:
                exclude_nodes = _find_extraneous_saver_nodes(
                    graph_def, saver_def)

            for node_def in graph_def.node:
                if _should_include_node(node_def.name, export_scope,
                                        exclude_nodes):
                    new_node_def = _node_def(node_def,
                                             export_scope,
                                             unbound_inputs,
                                             clear_devices=clear_devices)
                    new_graph_def.node.extend([new_node_def])
            graph_def = new_graph_def
        else:
            # Only do this complicated work if we want to remove a name scope.
            graph_def = graph_pb2.GraphDef()
            # pylint: disable=protected-access
            graph_def.versions.CopyFrom(graph.graph_def_versions)
            bytesize = 0

            if clear_extraneous_savers:
                exclude_nodes = _find_extraneous_saver_nodes(
                    graph.as_graph_def(), saver_def)

            for key in sorted(graph._nodes_by_id):
                if _should_include_node(graph._nodes_by_id[key].name,
                                        export_scope, exclude_nodes):
                    value = graph._nodes_by_id[key]
                    # pylint: enable=protected-access
                    node_def = _node_def(value.node_def,
                                         export_scope,
                                         unbound_inputs,
                                         clear_devices=clear_devices)
                    graph_def.node.extend([node_def])
                    if value.outputs:
                        assert "_output_shapes" not in graph_def.node[-1].attr
                        graph_def.node[-1].attr[
                            "_output_shapes"].list.shape.extend([
                                output.get_shape().as_proto()
                                for output in value.outputs
                            ])
                    bytesize += value.node_def.ByteSize()
                    if bytesize >= (1 << 31) or bytesize < 0:
                        raise ValueError("GraphDef cannot be larger than 2GB.")

            graph._copy_functions_to_graph_def(graph_def, bytesize)  # pylint: disable=protected-access

        # It's possible that not all the inputs are in the export_scope.
        # If we would like such information included in the exported meta_graph,
        # add them to a special unbound_inputs collection.
        if unbound_inputs_col_name:
            # Clears the unbound_inputs collections.
            graph.clear_collection(unbound_inputs_col_name)
            for k in unbound_inputs:
                graph.add_to_collection(unbound_inputs_col_name, k)

    var_list = {}
    variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
                                     scope=export_scope)
    for v in variables:
        if _should_include_node(v, export_scope, exclude_nodes):
            var_list[ops.strip_name_scope(v.name, export_scope)] = v

    scoped_meta_graph_def = create_meta_graph_def(
        graph_def=graph_def,
        graph=graph,
        export_scope=export_scope,
        exclude_nodes=exclude_nodes,
        clear_extraneous_savers=clear_extraneous_savers,
        saver_def=saver_def,
        strip_default_attrs=strip_default_attrs,
        **kwargs)

    if filename:
        graph_io.write_graph(scoped_meta_graph_def,
                             os.path.dirname(filename),
                             os.path.basename(filename),
                             as_text=as_text)
        if save_debug_info:
            name, _ = os.path.splitext(filename)
            debug_filename = "{name}{ext}".format(name=name, ext=".debug")

            # Gets the operation from the graph by the name. Exludes variable nodes,
            # so only the nodes in the frozen models are included.
            ops_to_export = []
            for node in scoped_meta_graph_def.graph_def.node:
                scoped_op_name = ops.prepend_name_scope(
                    node.name, export_scope)
                ops_to_export.append(
                    graph.get_operation_by_name(scoped_op_name))

            graph_debug_info = create_graph_debug_info_def(ops_to_export)

            graph_io.write_graph(graph_debug_info,
                                 os.path.dirname(debug_filename),
                                 os.path.basename(debug_filename),
                                 as_text=as_text)

    return scoped_meta_graph_def, var_list
def write_graph(graph, fname):
    d, f = os.path.split(os.path.abspath(fname))
    graph_io.write_graph(graph, d, f, as_text=False)
Beispiel #31
0
def main(args):
    # If output_model path is relative and in cwd, make it absolute from root
    output_model = FLAGS.output_model
    if str(Path(output_model).parent) == '.':
        output_model = str((Path.cwd() / output_model))

    output_fld = Path(output_model).parent
    output_model_name = Path(output_model).name
    output_model_stem = Path(output_model).stem
    output_model_pbtxt_name = output_model_stem + '.pbtxt'

    # Create output directory if it does not exist
    Path(output_model).parent.mkdir(parents=True, exist_ok=True)

    if FLAGS.channels_first:
        K.set_image_data_format('channels_first')
    else:
        K.set_image_data_format('channels_last')

    model = load_model(FLAGS.input_model, FLAGS.input_model_json)

    # TODO(amirabdi): Support networks with multiple inputs
    orig_output_node_names = [node.op.name for node in model.outputs]
    if FLAGS.output_nodes_prefix:
        num_output = len(orig_output_node_names)
        pred = [None] * num_output
        converted_output_node_names = [None] * num_output

        # Create dummy tf nodes to rename output
        for i in range(num_output):
            converted_output_node_names[i] = '{}{}'.format(
                FLAGS.output_nodes_prefix, i)
            pred[i] = tf.identity(model.outputs[i],
                                  name=converted_output_node_names[i])
    else:
        converted_output_node_names = orig_output_node_names
    logging.info('Converted output node names are: %s',
                 str(converted_output_node_names))

    sess = K.get_session()
    if FLAGS.output_meta_ckpt:
        saver = tf.train.Saver()
        saver.save(sess, str(output_fld / output_model_stem))

    if FLAGS.save_graph_def:
        tf.train.write_graph(sess.graph.as_graph_def(),
                             str(output_fld),
                             output_model_pbtxt_name,
                             as_text=True)
        logging.info('Saved the graph definition in ascii format at %s',
                     str(Path(output_fld) / output_model_pbtxt_name))

    if FLAGS.quantize:
        from tensorflow.tools.graph_transforms import TransformGraph
        transforms = ["quantize_weights", "quantize_nodes"]
        transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                               converted_output_node_names,
                                               transforms)
        constant_graph = graph_util.convert_variables_to_constants(
            sess, transformed_graph_def, converted_output_node_names)
    else:
        constant_graph = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(), converted_output_node_names)

    graph_io.write_graph(constant_graph,
                         str(output_fld),
                         output_model_name,
                         as_text=False)
    logging.info('Saved the freezed graph at %s',
                 str(Path(output_fld) / output_model_name))
Beispiel #32
0
from data_load import load_vocab, load_test_data, load_test_string
from train import Graph
import codecs
import distance
import os

from tensorflow.python.framework import graph_io
from tensorflow.python.framework.graph_util import convert_variables_to_constants


g = Graph(is_training=False)

# Load vocab
pnyn2idx, idx2pnyn, hanzi2idx, idx2hanzi = load_vocab()

with g.graph.as_default():    
    sv = tf.train.Supervisor()
    with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore parameters
        print(hp.logdir)

        sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)); print("Restored!")
        
        graph=g.graph
        output_names=["ToInt32"]
        input_graph_def = graph.as_graph_def()
        frozen_graph = convert_variables_to_constants(sess, input_graph_def, output_names)
        graph_io.write_graph(frozen_graph, hp.logdir, 'deploy.pb', as_text=False)        
        # print("Finish")

        
Beispiel #33
0
def main(argv):
  argparser = argparse.ArgumentParser(description='Compile some op')
  argparser.add_argument('config', help="filename to config-file")
  argparser.add_argument('--train', type=int, default=0, help='0 disable (default), 1 enable, -1 dynamic')
  argparser.add_argument('--eval', type=int, default=0, help='calculate losses. 0 disable (default), 1 enable')
  argparser.add_argument('--search', type=int, default=0, help='beam search. 0 disable (default), 1 enable')
  argparser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)")
  argparser.add_argument("--summaries_tensor_name")
  argparser.add_argument("--output_file", help='output pb, pbtxt or meta, metatxt file')
  argparser.add_argument("--output_file_model_params_list", help="line-based, names of model params")
  argparser.add_argument("--output_file_state_vars_list", help="line-based, name of state vars")
  args = argparser.parse_args(argv[1:])
  assert args.train in [0, 1, 2] and args.eval in [0, 1] and args.search in [0, 1]
  init(config_filename=args.config, log_verbosity=args.verbosity)
  with tf.Graph().as_default() as graph:
    assert isinstance(graph, tf.Graph)
    print("Create graph...")
    # See :func:`Engine._init_network`.
    tf.set_random_seed(42)
    if args.train < 0:
      from TFUtil import get_global_train_flag_placeholder
      train_flag = get_global_train_flag_placeholder()
    else:
      train_flag = bool(args.train)
    eval_flag = bool(args.eval)
    search_flag = bool(args.search)
    network = create_graph(train_flag=train_flag, eval_flag=eval_flag, search_flag=search_flag)

    from TFNetworkLayer import LayerBase
    for layer in network.layers.values():
      assert isinstance(layer, LayerBase)
      if layer.output.time_dim_axis is None:
        continue
      with layer.cls_layer_scope(layer.name):
        tf.identity(layer.output.get_placeholder_as_batch_major(), name="output_batch_major")

    tf.group(*network.get_post_control_dependencies(), name="post_control_dependencies")

    if args.summaries_tensor_name:
      summaries_tensor = tf.summary.merge_all()
      assert isinstance(summaries_tensor, tf.Tensor), "no summaries in the graph?"
      tf.identity(summaries_tensor, name=args.summaries_tensor_name)

    if args.output_file and os.path.splitext(args.output_file)[1] in [".meta", ".metatxt"]:
      # https://www.tensorflow.org/api_guides/python/meta_graph
      saver = tf.train.Saver(
        var_list=network.get_saveable_params_list(), max_to_keep=2 ** 31 - 1)
      graph_def = saver.export_meta_graph()
    else:
      graph_def = graph.as_graph_def(add_shapes=True)

    print("Graph collection keys:", graph.get_all_collection_keys())
    print("Graph num operations:", len(graph.get_operations()))
    print("Graph def size:", Util.human_bytes_size(graph_def.ByteSize()))

    if args.output_file:
      filename = args.output_file
      _, ext = os.path.splitext(filename)
      assert ext in [".pb", ".pbtxt", ".meta", ".metatxt"], 'filename %r extension invalid' % filename
      print("Write graph to file:", filename)
      graph_io.write_graph(
        graph_def,
        logdir=os.path.dirname(filename),
        name=os.path.basename(filename),
        as_text=ext.endswith("txt"))
    else:
      print("Use --output_file if you want to store the graph.")

    if args.output_file_model_params_list:
      print("Write model param list to:", args.output_file_model_params_list)
      with open(args.output_file_model_params_list, "w") as f:
        for param in network.get_params_list():
          assert param.name[-2:] == ":0"
          f.write("%s\n" % param.name[:-2])

    if args.output_file_state_vars_list:
      print("Write state var list to:", args.output_file_state_vars_list)
      from TFUtil import CollectionKeys
      with open(args.output_file_state_vars_list, "w") as f:
        for param in tf.get_collection(CollectionKeys.STATE_VARS):
          assert param.name[-2:] == ":0"
          f.write("%s\n" % param.name[:-2])
                    dnn_concat_node.input[i] = res
        if args.enable_column_fusion:
            fuse_categorical_numeric_columns()
            old_graph_def = graph.as_graph_def()
            for node in old_graph_def.node:
                if node.name == "new_concat_node":
                    node.input[1] = "new_numeric_placeholder:0"
                elif node.op == "BiasAdd" and "linear_model/weighted_sum" in node.name:
                    node.input[0] = "Sum:0"
                elif  node.op == "MatMul" and "hiddenlayer_0/MatMul" in node.name:
                    node.input[0] = "new_concat_node:0"
    except Exception as e:
        print(e)
        print('--------------------------------------------------------------------------')
        print("Cannot optimize the given graph. The given graph might be an optimized one")
        print('--------------------------------------------------------------------------')
        sys.exit()             

new_graph_def = tf.compat.v1.GraphDef()
new_graph_def = tf.compat.v1.graph_util.extract_sub_graph(
    old_graph_def,
    output_nodes
)

filename = args.output_graph
graph_io.write_graph(new_graph_def,
                     os.path.dirname(filename),
                     os.path.basename(filename),
                     as_text=False)
print('Optimized graph created')
output_names = [t.op.name for t in model.outputs]

# Prints input and output nodes names, take notes of them.
print(input_names, output_names)

frozen_graph = freeze_graph(session.graph, session, [out.op.name for out in model.outputs], save_pb_dir=save_pb_dir)


# In[3]:


import tensorflow.contrib.tensorrt as trt

trt_graph = trt.create_inference_graph(
    input_graph_def=frozen_graph,
    outputs=output_names,
    max_batch_size=1,
    max_workspace_size_bytes=1 << 25,
    precision_mode='FP16',
    minimum_segment_size=50
)

graph_io.write_graph(trt_graph, "./Saved-Model/", "ResNet-50-trt-graph.pb", as_text=False)


# In[ ]:




  def testSinglePartitionedVariable(self):
    """Ensures partitioned variables fail cleanly with freeze graph."""
    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # Create a graph with partition variables. When weights are partitioned into
    # a single partition, the weights variable is followed by a identity ->
    # identity (an additional identity node).
    partitioner = partitioned_variables.fixed_size_partitioner(1)
    with ops.Graph().as_default():
      with variable_scope.variable_scope("part", partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros(
            (batch_size, height, width, depth), name="input1")
        input2 = array_ops.zeros(
            (batch_size, height, width, depth), name="input2")

        num_nodes = depth
        filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
        filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
        conv = nn.conv2d(
            input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
        node = math_ops.add(conv, input2, name="test/add")
        node = nn.relu6(node, name="test/relu6")

      # Save graph and checkpoints.
      sess = session.Session()
      sess.run(variables.global_variables_initializer())

      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

      # Ensure this graph has partition variables.
      self.assertTrue([
          tensor.name.split(":")[0]
          for op in sess.graph.get_operations()
          for tensor in op.values()
          if re.search(r"/part_\d+/", tensor.name)
      ])

    # Test freezing graph doesn't make it crash.
    output_node_names = "save/restore_all"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    return_value = freeze_graph.freeze_graph_with_def_protos(
        input_graph_def=sess.graph_def,
        input_saver_def=None,
        input_checkpoint=checkpoint_path,
        output_node_names=output_node_names,
        restore_op_name="save/restore_all",  # default value
        filename_tensor_name="save/Const:0",  # default value
        output_graph=output_graph_path,
        clear_devices=False,
        initializer_nodes="")
    self.assertTrue(return_value, -1)

# [optional] write graph definition in ascii

# In[ ]:

sess = K.get_session()

if args.graph_def:
    f = args.output_graphdef_file 
    tf.train.write_graph(sess.graph.as_graph_def(), output_fld, f, as_text=True)
    print('saved the graph definition in ascii format at: ', str(Path(output_fld) / f))


# convert variables to constants and save

# In[ ]:

from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
if args.quantize:
    from tensorflow.tools.graph_transforms import TransformGraph
    transforms = ["quantize_weights", "quantize_nodes"]
    transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [], pred_node_names, transforms)
    constant_graph = graph_util.convert_variables_to_constants(sess, transformed_graph_def, pred_node_names)
else:
    constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)    
graph_io.write_graph(constant_graph, output_fld, args.output_model_file, as_text=False)
print('saved the freezed graph (ready for inference) at: ', str(Path(output_fld) / args.output_model_file))


# This line must be executed before loading Keras model.
tf.keras.backend.set_learning_phase(0)

model = load_model(model_fname)

session = tf.keras.backend.get_session()

input_names = [t.op.name for t in model.inputs]
output_names = [t.op.name for t in model.outputs]

# Prints input and output nodes names, take notes of them.
print(input_names, output_names)

frozen_graph = freeze_graph(session.graph,
                            session, [out.op.name for out in model.outputs],
                            save_pb_dir=save_pb_dir)

trt_graph = trt.create_inference_graph(input_graph_def=frozen_graph,
                                       outputs=output_names,
                                       max_batch_size=1,
                                       max_workspace_size_bytes=1 << 25,
                                       precision_mode='FP16',
                                       minimum_segment_size=50)

graph_io.write_graph(trt_graph,
                     "./trt_model/",
                     "trt_mixed_resnet50.pb",
                     as_text=False)
Beispiel #39
0
    def testStripUnused(self):
        input_graph_name = "input_graph.pb"
        output_graph_name = "output_graph.pb"

        # We'll create an input graph that has a single constant containing 1.0,
        # and that then multiplies it by 2.
        with ops.Graph().as_default():
            constant_node = constant_op.constant(1.0, name="constant_node")
            wanted_input_node = math_ops.subtract(constant_node,
                                                  3.0,
                                                  name="wanted_input_node")
            output_node = math_ops.multiply(wanted_input_node,
                                            2.0,
                                            name="output_node")
            math_ops.add(output_node, 2.0, name="later_node")
            sess = session.Session()
            output = self.evaluate(output_node)
            self.assertNear(-4.0, output, 0.00001)
            graph_io.write_graph(sess.graph, self.get_temp_dir(),
                                 input_graph_name)

        # We save out the graph to disk, and then call the const conversion
        # routine.
        input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
        input_binary = False
        output_binary = True
        output_node_names = "output_node"
        output_graph_path = os.path.join(self.get_temp_dir(),
                                         output_graph_name)

        def strip(input_node_names):
            strip_unused_lib.strip_unused_from_files(
                input_graph_path, input_binary, output_graph_path,
                output_binary, input_node_names, output_node_names,
                dtypes.float32.as_datatype_enum)

        with self.assertRaises(KeyError):
            strip("does_not_exist")

        with self.assertRaises(ValueError):
            strip("wanted_input_node:0")

        input_node_names = "wanted_input_node"
        strip(input_node_names)

        # Now we make sure the variable is now a constant, and that the graph still
        # produces the expected result.
        with ops.Graph().as_default():
            output_graph_def = graph_pb2.GraphDef()
            with open(output_graph_path, "rb") as f:
                output_graph_def.ParseFromString(f.read())
                _ = importer.import_graph_def(output_graph_def, name="")

            self.assertEqual(3, len(output_graph_def.node))
            for node in output_graph_def.node:
                self.assertNotEqual("Add", node.op)
                self.assertNotEqual("Sub", node.op)
                if node.name == input_node_names:
                    self.assertTrue("shape" in node.attr)

            with session.Session() as sess:
                input_node = sess.graph.get_tensor_by_name(
                    "wanted_input_node:0")
                output_node = sess.graph.get_tensor_by_name("output_node:0")
                output = sess.run(output_node, feed_dict={input_node: [10.0]})
                self.assertNear(20.0, output, 0.00001)
import tensorflow as tf

meta_path = 'model.ckpt-62000.meta'  # Your .meta file
output_node_names = ['stack']  # Output nodes

with tf.Session() as sess:

    # Restore the graph
    saver = tf.train.import_meta_graph(meta_path)

    # Load weights
    saver.restore(sess, tf.train.latest_checkpoint('.'))
    #with tf.variable_scope('Openpose'):
    #        (self.feed('Mconv7_stage6_L2',
    #                   'Mconv7_stage6_L1')
    #             .concat(3, name='concat_stage8'))
    for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]:
        #if 'concat' in ts:
        print(ts)
    # Freeze the graph
    frozen_graph_def = tf.graph_util.convert_variables_to_constants(
        sess, sess.graph_def, output_node_names)

    # Save the frozen graph
    with open('output_graph.pb', 'wb') as f:
        f.write(frozen_graph_def.SerializeToString())

    constant_graph = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), output_node_names)
    graph_io.write_graph(constant_graph, ".", "model1.pb", as_text=True)
Beispiel #41
0
    def testStripUnusedMultipleInputs(self):
        input_graph_name = "input_graph.pb"
        output_graph_name = "output_graph.pb"

        # We'll create an input graph that multiplies two input nodes.
        with ops.Graph().as_default():
            constant_node1 = constant_op.constant(1.0, name="constant_node1")
            constant_node2 = constant_op.constant(2.0, name="constant_node2")
            input_node1 = math_ops.subtract(constant_node1,
                                            3.0,
                                            name="input_node1")
            input_node2 = math_ops.subtract(constant_node2,
                                            5.0,
                                            name="input_node2")
            output_node = math_ops.multiply(input_node1,
                                            input_node2,
                                            name="output_node")
            math_ops.add(output_node, 2.0, name="later_node")
            sess = session.Session()
            output = self.evaluate(output_node)
            self.assertNear(6.0, output, 0.00001)
            graph_io.write_graph(sess.graph, self.get_temp_dir(),
                                 input_graph_name)

        # We save out the graph to disk, and then call the const conversion
        # routine.
        input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
        input_binary = False
        input_node_names = "input_node1,input_node2"
        input_node_types = [
            dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum
        ]
        output_binary = True
        output_node_names = "output_node"
        output_graph_path = os.path.join(self.get_temp_dir(),
                                         output_graph_name)

        strip_unused_lib.strip_unused_from_files(
            input_graph_path, input_binary, output_graph_path, output_binary,
            input_node_names, output_node_names, input_node_types)

        # Now we make sure the variable is now a constant, and that the graph still
        # produces the expected result.
        with ops.Graph().as_default():
            output_graph_def = graph_pb2.GraphDef()
            with open(output_graph_path, "rb") as f:
                output_graph_def.ParseFromString(f.read())
                _ = importer.import_graph_def(output_graph_def, name="")

            self.assertEqual(3, len(output_graph_def.node))
            for node in output_graph_def.node:
                self.assertNotEqual("Add", node.op)
                self.assertNotEqual("Sub", node.op)
                if node.name == input_node_names:
                    self.assertTrue("shape" in node.attr)

            with session.Session() as sess:
                input_node1 = sess.graph.get_tensor_by_name("input_node1:0")
                input_node2 = sess.graph.get_tensor_by_name("input_node2:0")
                output_node = sess.graph.get_tensor_by_name("output_node:0")
                output = sess.run(output_node,
                                  feed_dict={
                                      input_node1: [10.0],
                                      input_node2: [-5.0]
                                  })
                self.assertNear(-50.0, output, 0.00001)
Beispiel #42
0
def convert_to_pb(weight_file,input_fld='',output_fld='', model_type = None):

    import os
    import os.path as osp
    from tensorflow.python.framework import graph_util
    from tensorflow.python.framework import graph_io
    from keras.models import load_model
    


    # weight_file is a .h5 keras model file
    output_node_names_of_input_network = ["pred0"] 
    output_node_names_of_final_network = 'output_node'

    # change filename to a .pb tensorflow file
    output_graph_name = weight_file[:-2]+'pb'
    weight_file_path = osp.join(input_fld, weight_file)
    '''
    if(model_type=='AE'):
      input_img = Input(shape=(32, 32, 3))
      x = Conv2D(64, (3, 3), padding='same')(input_img)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      x = MaxPooling2D((2, 2), padding='same')(x)
      x = Conv2D(32, (3, 3), padding='same')(x)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      x = MaxPooling2D((2, 2), padding='same')(x)
      x = Conv2D(16, (3, 3), padding='same')(x)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      encoded = MaxPooling2D((2, 2), padding='same')(x)

      x = Conv2D(16, (3, 3), padding='same')(encoded)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      x = UpSampling2D((2, 2))(x)
      x = Conv2D(32, (3, 3), padding='same')(x)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      x = UpSampling2D((2, 2))(x)
      x = Conv2D(64, (3, 3), padding='same')(x)
      x = BatchNormalization()(x)
      x = Activation('relu')(x)
      x = UpSampling2D((2, 2))(x)
      x = Conv2D(3, (3, 3), padding='same')(x)
      x = BatchNormalization()(x)
      decoded = Activation('sigmoid')(x)

      net_model = Model(input_img, decoded)
      net_model.load_weights(weight_file_path)

    if(model_type =='Classifier'):
      num_classes = 10
      net_model = Sequential()
      net_model.add(Conv2D(32, (3, 3), padding='same',
                       input_shape=(32,32,3)))
      net_model.add(Activation('relu'))
      net_model.add(Conv2D(32, (3, 3)))
      net_model.add(Activation('relu'))
      net_model.add(MaxPooling2D(pool_size=(2, 2)))
      net_model.add(Dropout(0.25))

      net_model.add(Conv2D(64, (3, 3), padding='same'))
      net_model.add(Activation('relu'))
      net_model.add(Conv2D(64, (3, 3)))
      net_model.add(Activation('relu'))
      net_model.add(MaxPooling2D(pool_size=(2, 2)))
      net_model.add(Dropout(0.25))

      net_model.add(Flatten())
      net_model.add(Dense(512))
      net_model.add(Activation('relu'))
      net_model.add(Dropout(0.5))
      net_model.add(Dense(num_classes))
      net_model.add(Activation('softmax'))

      opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
      net_model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
      net_model.load_weights(weight_file_path)
    '''
    net_model = model.load(weight_file_path)
    #print("model.outputs: ", net_model.outputs)
    #print("model.inputs: ", net_model.inputs)
    num_output = len(output_node_names_of_input_network)
    pred = [None]*num_output
    pred_node_names = [None]*num_output

    for i in range(num_output):
        pred_node_names[i] = output_node_names_of_final_network+str(i)
        pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i])

    sess = K.get_session()

    constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
    graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False)
    print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))

    return output_fld+'/'+output_graph_name
Beispiel #43
0
def export_scoped_meta_graph(filename=None,
                             graph_def=None,
                             graph=None,
                             export_scope=None,
                             as_text=False,
                             unbound_inputs_col_name="unbound_inputs",
                             clear_devices=False,
                             saver_def=None,
                             clear_extraneous_savers=False,
                             strip_default_attrs=False,
                             **kwargs):
  """Returns `MetaGraphDef` proto. Optionally writes it to filename.

  This function exports the graph, saver, and collection objects into
  `MetaGraphDef` protocol buffer with the intention of it being imported
  at a later time or location to restart training, run inference, or be
  a subgraph.

  Args:
    filename: Optional filename including the path for writing the
      generated `MetaGraphDef` protocol buffer.
    graph_def: `GraphDef` protocol buffer.
    graph: The `Graph` to export. If `None`, use the default graph.
    export_scope: Optional `string`. Name scope under which to extract
      the subgraph. The scope name will be stripped from the node definitions
      for easy import later into new name scopes. If `None`, the whole graph
      is exported.
    as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
    unbound_inputs_col_name: Optional `string`. If provided, a string collection
      with the given name will be added to the returned `MetaGraphDef`,
      containing the names of tensors that must be remapped when importing the
      `MetaGraphDef`.
    clear_devices: Boolean which controls whether to clear device information
      before exporting the graph.
    saver_def: `SaverDef` protocol buffer.
    clear_extraneous_savers: Remove any Saver-related information from the
        graph (both Save/Restore ops and SaverDefs) that are not associated
        with the provided SaverDef.
    strip_default_attrs: Set to true if default valued attributes must be
        removed while exporting the GraphDef.
    **kwargs: Optional keyed arguments, including meta_info_def and
        collection_list.

  Returns:
    A `MetaGraphDef` proto and dictionary of `Variables` in the exported
    name scope.

  Raises:
    ValueError: When the `GraphDef` is larger than 2GB.
  """
  if context.executing_eagerly():
    raise ValueError("Exporting/importing meta graphs is not supported when "
                     "Eager Execution is enabled.")
  graph = graph or ops.get_default_graph()

  exclude_nodes = None
  unbound_inputs = []
  if export_scope or clear_extraneous_savers or clear_devices:
    if graph_def:
      new_graph_def = graph_pb2.GraphDef()
      new_graph_def.versions.CopyFrom(graph_def.versions)
      new_graph_def.library.CopyFrom(graph_def.library)

      if clear_extraneous_savers:
        exclude_nodes = _find_extraneous_saver_nodes(graph_def, saver_def)

      for node_def in graph_def.node:
        if _should_include_node(node_def.name, export_scope, exclude_nodes):
          new_node_def = _node_def(node_def, export_scope, unbound_inputs,
                                   clear_devices=clear_devices)
          new_graph_def.node.extend([new_node_def])
      graph_def = new_graph_def
    else:
      # Only do this complicated work if we want to remove a name scope.
      graph_def = graph_pb2.GraphDef()
      # pylint: disable=protected-access
      graph_def.versions.CopyFrom(graph.graph_def_versions)
      bytesize = 0

      if clear_extraneous_savers:
        exclude_nodes = _find_extraneous_saver_nodes(graph.as_graph_def(),
                                                     saver_def)

      for key in sorted(graph._nodes_by_id):
        if _should_include_node(graph._nodes_by_id[key].name,
                                export_scope,
                                exclude_nodes):
          value = graph._nodes_by_id[key]
          # pylint: enable=protected-access
          node_def = _node_def(value.node_def, export_scope, unbound_inputs,
                               clear_devices=clear_devices)
          graph_def.node.extend([node_def])
          if value.outputs:
            assert "_output_shapes" not in graph_def.node[-1].attr
            graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
                output.get_shape().as_proto() for output in value.outputs])
          bytesize += value.node_def.ByteSize()
          if bytesize >= (1 << 31) or bytesize < 0:
            raise ValueError("GraphDef cannot be larger than 2GB.")

      graph._copy_functions_to_graph_def(graph_def, bytesize)  # pylint: disable=protected-access

    # It's possible that not all the inputs are in the export_scope.
    # If we would like such information included in the exported meta_graph,
    # add them to a special unbound_inputs collection.
    if unbound_inputs_col_name:
      # Clears the unbound_inputs collections.
      graph.clear_collection(unbound_inputs_col_name)
      for k in unbound_inputs:
        graph.add_to_collection(unbound_inputs_col_name, k)

  var_list = {}
  variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
                                   scope=export_scope)
  for v in variables:
    if _should_include_node(v, export_scope, exclude_nodes):
      var_list[ops.strip_name_scope(v.name, export_scope)] = v

  scoped_meta_graph_def = create_meta_graph_def(
      graph_def=graph_def,
      graph=graph,
      export_scope=export_scope,
      exclude_nodes=exclude_nodes,
      clear_extraneous_savers=clear_extraneous_savers,
      saver_def=saver_def,
      strip_default_attrs=strip_default_attrs,
      **kwargs)

  if filename:
    graph_io.write_graph(
        scoped_meta_graph_def,
        os.path.dirname(filename),
        os.path.basename(filename),
        as_text=as_text)

  return scoped_meta_graph_def, var_list
Beispiel #44
0
K.set_learning_phase(0)
net_model = load_model(weight_file_path)

pred = [None] * num_output
pred_node_names = [None] * num_output
for i in range(num_output):
    pred_node_names[i] = prefix_output_node_names_of_final_network
    pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)

sess = K.get_session()

if write_graph_def_ascii_flag:
    f = 'only_the_graph_def.pb.ascii'
    tf.train.write_graph(sess.graph.as_graph_def(),
                         output_fld,
                         f,
                         as_text=True)
    print('saved the graph definition in ascii format at: ',
          osp.join(output_fld, f))

constant_graph = graph_util.convert_variables_to_constants(
    sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph,
                     output_fld,
                     output_graph_name,
                     as_text=False)
print('saved the constant graph (ready for inference) at: ',
      osp.join(output_fld, output_graph_name))
  def _testFreezeGraph(self, saver_write_version):

    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    with ops.Graph().as_default():
      variable_node = variables.VariableV1(1.0, name="variable_node")
      output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
      sess = session.Session()
      init = variables.global_variables_initializer()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = saver_lib.Saver(write_version=saver_write_version)
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(
        input_graph_path,
        input_saver_def_path,
        input_binary,
        checkpoint_path,
        output_node_names,
        restore_op_name,
        filename_tensor_name,
        output_graph_path,
        clear_devices,
        "",
        "",
        "",
        checkpoint_version=saver_write_version)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      output_graph_def = graph_pb2.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = importer.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("VariableV2", node.op)
        self.assertNotEqual("Variable", node.op)

      with session.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
Beispiel #46
0
    def save(self, path=None, name=None, overwrite=True):
        path = "Models" if path is None else path
        name = "Cache" if name is None else name
        folder = os.path.join(path, name)
        if not os.path.exists(folder):
            os.makedirs(folder)
        _dir = os.path.join(folder, "Model")
        if os.path.isfile(_dir):
            if not overwrite:
                _count = 1
                _new_dir = _dir + "({})".format(_count)
                while os.path.isfile(_new_dir):
                    _count += 1
                    _new_dir = _dir + "({})".format(_count)
                _dir = _new_dir
            else:
                os.remove(_dir)

        print()
        print("=" * 60)
        print("Saving Model to {}...".format(folder))
        print("-" * 60)

        with open(_dir + ".nn", "wb") as file:
            # We don't need w_stds & b_inits when we load a model
            _dic = {
                "structures": {
                    "_lr": self._lr,
                    "_layer_names": self.layer_names,
                    "_layer_params": self._layer_params,
                    "_next_dimension": self._current_dimension
                },
                "params": {
                    "_logs": self._logs,
                    "_metric_names": self._metric_names,
                    "_optimizer": self._optimizer.name,
                    "layer_special_params": self.layer_special_params
                }
            }
            pickle.dump(_dic, file)
        saver = tf.train.Saver()
        saver.save(self._sess, _dir)
        graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Model.pb", False)
        with tf.name_scope("OutputFlow"):
            self.get_rs(self._tfx)
        _output = ""
        for op in self._sess.graph.get_operations()[::-1]:
            if "OutputFlow" in op.name:
                _output = op.name
                break
        with open(os.path.join(path, name, "IO.txt"), "w") as file:
            file.write("\n".join([
                "Input  : Entry/Placeholder:0",
                "Output : {}:0".format(_output)
            ]))
        graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Cache.pb", False)
        freeze_graph.freeze_graph(
            os.path.join(path, name, "Cache.pb"),
            "", True, os.path.join(path, name, "Model"),
            _output, "save/restore_all", "save/Const:0",
            os.path.join(path, name, "Frozen.pb"), True, ""
        )
        os.remove(os.path.join(path, name, "Cache.pb"))

        print("Done")
        print("=" * 60)