示例#1
0
def freeze_graph(ckpt_dir, output_file, input_nodes, output_nodes):
    # load checkpoint
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    ckpt_path = ckpt.model_checkpoint_path

    saver = tf.train.import_meta_graph(ckpt_path + ".meta", clear_devices=True)

    graph = tf.get_default_graph()
    input_graph_def = graph.as_graph_def()

    with tf.Session() as sess:
        saver.restore(sess, ckpt_path)

        # freeze graph
        output_graph_def = graph_util.convert_variables_to_constants(
            sess, input_graph_def, output_nodes.split(","))

        # print nodes info
        # display_nodes(output_graph_def.node)

        # optimize graph - only take input~output graph
        optimize_for_inference_lib.optimize_for_inference(
            output_graph_def,
            input_nodes.split(","),
            output_nodes.split(","),
            tf.uint8.as_datatype_enum,
        )

        # save graph to file
        with tf.gfile.GFile(output_file, "wb") as f:
            f.write(output_graph_def.SerializeToString())

        print("Saving graph done.")
        print("{} operations in the graph.".format(len(output_graph_def.node)))
示例#2
0
def export_model(saver, input_node_names, output_node_name):
    # crea el folder out y escribe los archivos de salidda
    if not path.exists('out'):
        os.mkdir('out')

    # an arbitrary name for our graph
    GRAPH_NAME = 'grafopp'

    # guardar el grafo en formato pb
    tf.train.write_graph(K.get_session().graph_def, 'out',
                         GRAPH_NAME + '_graph.pbtxt')

    # guarda el grafo en formato de checkpoint
    saver.save(K.get_session(), 'out/' + GRAPH_NAME + '.chkp')

    # congelat el grafo, toma el grafo escrito en formato pb y lo escribe en bytes, formato que lee unity
    freeze_graph.freeze_graph('out/' + GRAPH_NAME + '_graph.pbtxt', None,
                              False, 'out/' + GRAPH_NAME + '.chkp',
                              output_node_name, "save/restore_all",
                              "save/Const:0",
                              'out/frozen_' + GRAPH_NAME + '.bytes', True, "")

    # optimiza el grafo en .bytes
    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + GRAPH_NAME + '.bytes', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + GRAPH_NAME + '.bytes', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("grafo guardado")
def save_model():
    input_graph_path = MODEL_DIRECTORY + '/' + MODEL_NAME + '.pbtxt'
    checkpoint_path = MODEL_DIRECTORY + '/' + MODEL_NAME + '.ckpt'
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "prediction"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_frozen_graph_name = MODEL_DIRECTORY + '/frozen.pb'
    output_optimized_graph_name = MODEL_DIRECTORY + '/optimized.pb'
    clear_devices = True

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
        data = f.read()
        input_graph_def.ParseFromString(data)

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        ["input"],  # an array of the input node(s)
        ["prediction"],  # an array of output nodes
        tf.float32.as_datatype_enum)

    # Save the optimized graph

    f = tf.gfile.FastGFile(output_optimized_graph_name, "w")
    f.write(output_graph_def.SerializeToString())
def export_model_for_mobile(model_name, input_node_name, output_node_name,
                            sess):
    # dump graph as pbtxt
    tf.train.write_graph(sess.graph_def, 'out', model_name + '_graph.pbtxt')
    # save checkpoint
    tf.train.Saver().save(sess, 'out/' + model_name + '.chkp')
    # freeze the graph
    # param1: path for pbtxt, param2: path for checkpoint, param3: name for last layer
    freeze_graph.freeze_graph('out/' + model_name + '_graph.pbtxt', None,
                              False, 'out/' + model_name + '.chkp',
                              output_node_name, "save/restore_all",
                              "save/Const:0",
                              "out" + "/frozen_" + model_name + '.pb', True,
                              "")
    # parse graph to optimize
    input_graph_def = tf.GraphDef()
    with tf.gfile.Open("out" + "/frozen_" + model_name + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())
    # optimize
    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, [input_node_name], [output_node_name],
        tf.float32.as_datatype_enum)
    # output lite model
    with tf.gfile.FastGFile("out" + '/tensorflow_lite_' + model_name + '.pb',
                            "wb") as f:
        f.write(output_graph_def.SerializeToString())
示例#5
0
def save(sess):
    print(tf.all_variables())
    input_graph_def = sess.graph.as_graph_def()

    #for op in input_graph_def.node:
    #     print(op.name)

    output_nodes_names=["init_26"]
    output_graph_def = graph_util.convert_variables_to_constants(
            sess, # The session
            input_graph_def, # input_graph_def is useful for retrieving the nodes 
            output_nodes_names  
    )

    output_graph_name="freeze.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    inp_node = ['Placeholder']
    optimize_graph_def = optimize_for_inference_lib.optimize_for_inference(output_graph_def, [], output_nodes_names,
                                                               tf.float32.as_datatype_enum)
    print("!")
    optimize_graph_def = TransformGraph(optimize_graph_def, inp_node, output_nodes_names, ["sort_by_execution_order"])

    output_graph_name="optimize.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(optimize_graph_def.SerializeToString())
示例#6
0
def export_model(saver: tf.train.Saver, input_names: list, output_name: str, model_name: str):
    """
    You can find node names by using debugger: just connect it right after model is created and look for nodes in the inspec
    :param saver:
    :param input_names:
    :param output_name:
    :param model_name:
    :return:
    """
    os.makedirs("./out", exist_ok=True)
    tf.train.write_graph(K.get_session().graph_def, 'out',
                         model_name + '_graph.pbtxt')

    saver.save(K.get_session(), 'out/' + model_name + '.chkp')

    # pbtxt is human readable representation of the graph
    freeze_graph.freeze_graph('out/' + model_name + '_graph.pbtxt', None,
                              False, 'out/' + model_name + '.chkp', output_name,
                              "save/restore_all", "save/Const:0",
                              'out/frozen_' + model_name + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + model_name + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    # optimization of the graph so we can use it in the android app
    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_names, [output_name],
        tf.float32.as_datatype_enum)

    # This is archived optimal graph in the protobuf format we'll use in our android App.
    with tf.gfile.FastGFile('out/opt_' + model_name + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
def create_graph(graph_file, bert_config_file, init_checkpoint, max_seq_len, select_layers, output_dir = '../bert/tmp'):
    #tf.reset_default_graph()
    #from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
    tf.gfile.MakeDirs(output_dir)

    bert_config = modeling.BertConfig.from_json_file(bert_config_file)

    input_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_ids')
    input_mask = tf.placeholder(tf.int32, (None,max_seq_len), 'input_mask')
    input_type_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_type_ids')

    input_tensors = [input_ids, input_mask, input_type_ids]

    
    model = modeling.BertModel(
            config=bert_config,
            is_training=False,
            input_ids=input_ids,
            input_mask=input_mask,
            token_type_ids=input_type_ids,
            use_one_hot_embeddings=False)

    tvars = tf.trainable_variables()
    (assignment_map, initialized_variable_names
         ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)

    tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
        
    all_layers = []
    if len(select_layers) == 1:
        encoder_layer = model.all_encoder_layers[select_layers[0]]
    else:
        for layer in select_layers:
            all_layers.append(model.all_encoder_layers[layer])
        encoder_layer = tf.concat(all_layers, -1)


    #output_tensors = [encoder_layer]
    pooled = tf.identity(encoder_layer, 'final_encodes')
    output_tensors = [pooled]
        
    tmp_g = tf.get_default_graph().as_graph_def()

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        tmp_g = tf.graph_util.convert_variables_to_constants(sess, tmp_g, [n.name[:-2] for n in output_tensors])
        #[print(n.name) for n in output_tensors]
        dtypes = [n.dtype for n in input_tensors]
        #[print(n.name) for n in input_tensors]
        tmp_g = optimize_for_inference(
            tmp_g,
            [n.name[:-2] for n in input_tensors],
            [n.name[:-2] for n in output_tensors],
            [dtype.as_datatype_enum for dtype in dtypes],
            False)
    tmp_file = graph_file
    with tf.gfile.GFile(tmp_file, 'wb') as f:
        f.write(tmp_g.SerializeToString())
    return tmp_file
示例#8
0
    def _load_frozen_model(self, model_name, model_descriptor):
        infer_config = tf.compat.v1.ConfigProto()
        infer_config.intra_op_parallelism_threads = self._intra_threads
        infer_config.inter_op_parallelism_threads = self._inter_threads

        with tf.io.gfile.GFile(model_name, "rb") as graph_file:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(graph_file.read())

        self._outputs = [
            output + ":0" for output in model_descriptor["output"].split(",")
        ]

        # by default Graph is optimized for the inference
        if not self._unoptimized:
            graph_def = optimize_for_inference(
                graph_def,
                [item.split(":")[0] for item in self._inputs],
                [item.split(":")[0] for item in self._outputs],
                dtypes.float32.as_datatype_enum,
                False,
            )

        graph = tf.compat.v1.import_graph_def(graph_def, name="")
        self._session = tf.compat.v1.Session(graph=graph, config=infer_config)
def convert_to_pb(model,
                  path,
                  input_layer_name,
                  output_layer_name,
                  pbfilename,
                  verbose=False):

    model.load(path, weights_only=True)
    print("[INFO] Loaded CNN network weights from " + path + " ...")

    print("[INFO] Re-export model ...")
    del tf.get_collection_ref(tf.GraphKeys.TRAIN_OPS)[:]
    model.save("model-tmp.tfl")

    # taken from: https://stackoverflow.com/questions/34343259/is-there-an-example-on-how-to-generate-protobuf-files-holding-trained-tensorflow

    print("[INFO] Re-import model ...")

    input_checkpoint = "model-tmp.tfl"
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta', True)
    sess = tf.Session()
    saver.restore(sess, input_checkpoint)

    # print out all layers to find name of output

    if (verbose):
        op = sess.graph.get_operations()
        [print(m.values()) for m in op][1]

    print("[INFO] Freeze model to " + pbfilename + " ...")

    # freeze and removes nodes which are not related to feedforward prediction

    minimal_graph = convert_variables_to_constants(sess,
                                                   sess.graph.as_graph_def(),
                                                   [output_layer_name])

    graph_def = optimize_for_inference_lib.optimize_for_inference(
        minimal_graph, [input_layer_name], [output_layer_name],
        tf.float32.as_datatype_enum)
    graph_def = TransformGraph(graph_def, [input_layer_name],
                               [output_layer_name],
                               ["sort_by_execution_order"])

    with tf.gfile.GFile(pbfilename, 'wb') as f:
        f.write(graph_def.SerializeToString())

    # write model to logs dir so we can visualize it as:
    # tensorboard --logdir="logs"

    if (verbose):
        writer = tf.summary.FileWriter('logs', graph_def)
        writer.close()

    # tidy up tmp files

    for f in glob.glob("model-tmp.tfl*"):
        os.remove(f)

    os.remove('checkpoint')
示例#10
0
def optimize_graph(frozen_graph_filename, suffix='optimized'):
    """Optimize a TensorFlow graph for inference.

    Optimized graphs are saved to the same directory as the input frozen graph.

    Args:
        frozen_graph_filename (str): the filename of a frozen graph.
        suffix (optional, str): a suffix to append to the optimized graph file.
    
    Returns:
        optimized_graph_filename (str): a path to the saved optimized graph.
    """
    output_dir, basename = os.path.split(frozen_graph_filename)
    graph_def = load_graph_def(frozen_graph_filename)

    optimized_graph = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def=graph_def,
        input_node_names=['input_1'],
        placeholder_type_enum=dtypes.float32.as_datatype_enum,
        output_node_names=['deprocess_stylized_image_1/mul'],
        toco_compatible=True
    )

    optimized_graph_filename = os.path.basename(
        frozen_graph_filename).replace('frozen', suffix)
    optimized_graph_filename = optimized_graph_filename
    tf.train.write_graph(
        optimized_graph, output_dir, optimized_graph_filename, as_text=False
    )
    logger.info('Saved optimized graph to: %s' %
                os.path.join(output_dir, optimized_graph_filename))
    return optimized_graph_filename
示例#11
0
def export_inference_model(ckpt_dir, model_dir, model_name, toco_compatible):

    graph = tf.get_default_graph()

    with tf.Session(graph=graph) as sess:

        saver = tf.train.Saver()

        saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))

        frozen_graph_def = tf.graph_util.convert_variables_to_constants(
            sess, graph.as_graph_def(), [OUTPUT_OP_NAME])

        inference_graph_def = optimize_for_inference(
            frozen_graph_def,
            input_node_names=[INPUT_OP_NAME],
            output_node_names=[OUTPUT_OP_NAME],
            placeholder_type_enum=tf.float32.as_datatype_enum,
            toco_compatible=toco_compatible  # look into what this param is about
        )

        inference_model_file_name = join(model_dir, model_name)

        with tf.gfile.GFile(inference_model_file_name, "wb") as f:
            f.write(inference_graph_def.SerializeToString())

    print('Inference graph can be found at ', inference_model_file_name)
示例#12
0
def main(unused_args):
  if not gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  input_graph_def = graph_pb2.GraphDef()
  with gfile.Open(FLAGS.input, "rb") as f:
    data = f.read()
    if FLAGS.frozen_graph:
      input_graph_def.ParseFromString(data)
    else:
      text_format.Merge(data.decode("utf-8"), input_graph_def)

  output_graph_def = optimize_for_inference_lib.optimize_for_inference(
      input_graph_def,
      FLAGS.input_names.split(","),
      FLAGS.output_names.split(","),
      _parse_placeholder_types(FLAGS.placeholder_type_enum),
      FLAGS.toco_compatible)

  if FLAGS.frozen_graph:
    f = gfile.GFile(FLAGS.output, "w")
    f.write(output_graph_def.SerializeToString())
  else:
    graph_io.write_graph(output_graph_def,
                         os.path.dirname(FLAGS.output),
                         os.path.basename(FLAGS.output))
  return 0
示例#13
0
def export_model(input_node_names, output_node_name):
    print("exporting started...")

    freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt',
                              "",
                              False,
                              'out/' + MODEL_NAME + '.chkp',
                              output_node_name,
                              "save/restore_all",
                              "save/Const:0",
                              'out/frozen_' + MODEL_NAME + '.pb',
                              True,
                              "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(input_graph_def,
                                                                         input_node_names,
                                                                         [output_node_name],
                                                                         tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
示例#14
0
def OutputModel(saver, model, input_node_names, output_node_name):
    tf.train.write_graph(K.get_session().graph_def, 'out', \
                         MODEL_NAME + '_graph.pbtxt')

    saver.save(K.get_session(), 'out/' + MODEL_NAME + '.chkp')

    freeze_graph.freeze_graph('out/' + MODEL_NAME + '_graph.pbtxt', None, \
                              False, 'out/' + MODEL_NAME + '.chkp', output_node_name, \
                              "save/restore_all", "save/Const:0", \
                              'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")

    return
def freeze_graph(model,
                 checkpoint_path,
                 tensor_shape,
                 moving_average_decay=0.9999):
    """Converts model ckpts."""
    logging.info('Processing ckpt=%s, tensor_shape=%s', checkpoint_path,
                 tensor_shape)
    out_node = 'InceptionV3/Predictions/Reshape_1'
    in_node = 'input'

    inp = tf.compat.v1.placeholder(shape=[1] + tensor_shape,
                                   dtype=tf.float32,
                                   name=in_node)
    _ = model.create(inp, num_classes=3, is_training=False)

    ema = tf.train.ExponentialMovingAverage(moving_average_decay)
    variables_to_restore = ema.variables_to_restore()

    load_ema = slim.assign_from_checkpoint_fn(checkpoint_path,
                                              variables_to_restore,
                                              ignore_missing_vars=True)

    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        load_ema(sess)

        graph_def = sess.graph.as_graph_def()
        graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
            sess, graph_def, [out_node])
        graph_def = optimize_for_inference_lib.optimize_for_inference(
            graph_def, [in_node], [out_node], tf.float32.as_datatype_enum)

        with tf.io.gfile.GFile('model.pb', 'wb') as f:
            f.write(graph_def.SerializeToString())
示例#16
0
def initialize_graph(model_details, disable_optimize_for_inference):
    graph = tf_v1.Graph()
    with graph.as_default():

        od_graph_def = tf_v1.GraphDef()
        with tf_v1.gfile.GFile(os.path.join(os.getcwd(), model_details['model_path']), 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            od_graph_def = delete_assign(od_graph_def)

        # optimize for inference
        if not disable_optimize_for_inference:
            # optimize graph for inference
            input_list = [in_name for in_name,
                          val in model_details['input'].items()]
            output_list = [
                out_name for out_name in model_details['output']]
            input_data_type = [tf_v1.convert_to_tensor(item).dtype.as_datatype_enum
                               for item in model_details['input'].values()]

            od_graph_def = optimize_for_inference_lib.optimize_for_inference(
                od_graph_def,  # inputGraph,
                input_list,  # an array of the input nodes
                output_list,  # an array of output nodes
                input_data_type)

        tf_v1.import_graph_def(od_graph_def, name='')

    return graph
示例#17
0
def prepare_for_dnn(sess,
                    graph_def,
                    in_node,
                    out_node,
                    out_graph,
                    dtype,
                    optimize=True,
                    quantize=False):
    # Freeze graph. Replaces variables to constants.
    graph_def = tf.graph_util.convert_variables_to_constants(
        sess, graph_def, [out_node])
    if optimize:
        # Optimize graph. Removes training-only ops, unused nodes.
        graph_def = optimize_for_inference_lib.optimize_for_inference(
            graph_def, [in_node], [out_node], dtype.as_datatype_enum)
        # Fuse constant operations.
        transforms = ["fold_constants(ignore_errors=True)"]
        if quantize:
            transforms += ["quantize_weights(minimum_size=0)"]
        transforms += ["sort_by_execution_order"]
        graph_def = TransformGraph(graph_def, [in_node], [out_node],
                                   transforms)
    # Serialize
    with tf.gfile.FastGFile(out_graph, 'wb') as f:
        f.write(graph_def.SerializeToString())
  def tfliteInvoke(self, graph, test_inputs, outputs):
    tf.reset_default_graph()
    # Turn the input into placeholder of shape 1
    tflite_input = tf.placeholder(
        "float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
    tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
    with tf.Session() as sess:
      curr = sess.graph_def
      curr = convert_op_hints_to_stubs(graph_def=curr)

    curr = optimize_for_inference_lib.optimize_for_inference(
        curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
        [tf.float32.as_datatype_enum])

    tflite = tf.lite.toco_convert(
        curr, [tflite_input], [outputs], allow_custom_ops=False)

    interpreter = tf.lite.Interpreter(model_content=tflite)

    try:
      interpreter.allocate_tensors()
    except ValueError:
      assert False

    input_index = (interpreter.get_input_details()[0]["index"])
    interpreter.set_tensor(input_index, test_inputs)
    interpreter.invoke()
    output_index = (interpreter.get_output_details()[0]["index"])
    result = interpreter.get_tensor(output_index)
    # Reset all variables so it will not pollute other inferences.
    interpreter.reset_all_variables()
    return result
示例#19
0
def export_model(model_output_dir, input_node_names, output_node_name):

    name_base = os.path.join(model_output_dir, MODEL_NAME)
    input_graph_path = os.path.join(model_output_dir, MODEL_NAME + '.pbtxt')
    checkpoint_path = os.path.join(model_output_dir,
                                   './' + MODEL_NAME + '.chkp')
    input_saver_def_path = ""
    input_binary = False
    restore_op_name = 'save/restore_all'
    filename_tensor_name = 'save/Const:0'
    clear_devices = True
    frozen_graph_file = os.path.join(model_output_dir,
                                     'frozen_' + MODEL_NAME + '.pb')

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_name,
                              restore_op_name, filename_tensor_name,
                              frozen_graph_file, clear_devices, "")

    input_graph_def = tf.GraphDef()
    print(input_graph_def)
    with tf.gfile.Open(frozen_graph_file, "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    optimized_graph_file = os.path.join(model_output_dir,
                                        'optimized_' + MODEL_NAME + '.pb')
    f = tf.gfile.GFile(optimized_graph_file, "wb")
    f.write(output_graph_def.SerializeToString())

    print("Inference optimized graph saved at: " + optimized_graph_file)
def main(unused_args):
  if not gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  input_graph_def = graph_pb2.GraphDef()
  with gfile.Open(FLAGS.input, "rb") as f:
    data = f.read()
    if FLAGS.frozen_graph:
      input_graph_def.ParseFromString(data)
    else:
      text_format.Merge(data.decode("utf-8"), input_graph_def)

  output_graph_def = optimize_for_inference_lib.optimize_for_inference(
      input_graph_def,
      FLAGS.input_names.split(","),
      FLAGS.output_names.split(","),
      FLAGS.placeholder_type_enum,
      FLAGS.toco_compatible)

  if FLAGS.frozen_graph:
    f = gfile.FastGFile(FLAGS.output, "w")
    f.write(output_graph_def.SerializeToString())
  else:
    graph_io.write_graph(output_graph_def,
                         os.path.dirname(FLAGS.output),
                         os.path.basename(FLAGS.output))
  return 0
示例#21
0
def model_export(model, model_name):
    # K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()
    K.clear_session()
    K.set_learning_phase(0)
    #model = Sequential.from_config(config)
    model = Model.from_config(config)
    #model = model_from_config(config)
    # K.set_learning_phase(0)
    model.set_weights(weights)
    # saver = Saver()
    # saver.save(K.get_session(), "tf_checkpoint")
    graph_def = K.get_session().graph.as_graph_def()
    frozen_graph = convert_variables_to_constants(K.get_session(), graph_def,
                                                  [model.output.name[:-2]])
    opt_graph = optimize_for_inference(frozen_graph, [model.input.name[:-2]],
                                       [model.output.name[:-2]],
                                       tf.float32.as_datatype_enum)
    # opt_graph = frozen_graph
    tf.reset_default_graph()
    tf.import_graph_def(opt_graph, name="")
    # rewrite = GraphRewriter()
    write_graph(opt_graph, "./tfmodel/", model_name + '.pb', as_text=False)
    print([o.name for o in tf.get_default_graph().get_operations()])
def _optimize_checkpoint_for_inference(graph_path: str,
                                       input_names: List[str],
                                       output_names: List[str]):
    """
    Removes Horovod and training related information from the graph

    :param graph_path: (str) Path to the graph.pbtxt file
    :param input_names: (str) Input node names
    :param output_names: (str) Output node names
    """

    print('[*] Optimizing graph for inference ...')

    input_graph_def = graph_pb2.GraphDef()
    with gfile.Open(graph_path, "rb") as f:
        data = f.read()
        text_format.Merge(data.decode("utf-8"), input_graph_def)

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        input_names,
        output_names,
        _parse_placeholder_types(str(dtypes.float32.as_datatype_enum)),
        False)

    print('[*] Saving original graph in: {}'.format(graph_path + '.old'))
    shutil.move(graph_path, graph_path + '.old')

    print('[*] Writing down optimized graph ...')
    graph_io.write_graph(output_graph_def,
                         os.path.dirname(graph_path),
                         os.path.basename(graph_path))
示例#23
0
def export_model(saver, model, input_node_names, output_node_name):
    # Save trained model in .h5py file
    print("\n[INFO] exporting model to .h5py file")
    model.save_weights('out/hhrc_nn.h5py')

    print("\n[INFO] exporting model to .pb file\n")
    tf.train.write_graph(K.get_session().graph_def, 'out', \
        MODEL_NAME + '_graph.pbtxt')

    saver.save(K.get_session(), 'out/' + MODEL_NAME + '.chkp')

    freeze_graph.freeze_graph('out/' + MODEL_NAME + '_graph.pbtxt', None, \
        False, 'out/' + MODEL_NAME + '.chkp', output_node_name, \
        "save/restore_all", "save/Const:0", \
        'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("\n[INFO] model saved")
示例#24
0
def export_model(saver, model, model_name, input_node_names, output_node_name):
    if not path.exists('out'):
        os.mkdir('out')

    tf.train.write_graph(K.get_session().graph_def, 'out',
                         model_name + '_graph.pbtxt')

    saver.save(K.get_session(), 'out/' + model_name + '.chkp')

    freeze_graph.freeze_graph('out/' + model_name + '_graph.pbtxt', None,
                              False, 'out/' + model_name + '.chkp',
                              output_node_name, "save/restore_all",
                              "save/Const:0",
                              'out/frozen_' + model_name + '.bytes', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + model_name + '.bytes', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + model_name + '.bytes', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
示例#25
0
def export_keras(model_name, input_names, output_names, with_weights=True):
    out_name = ','.join(output_names)
    saver = tf.train.Saver()
    tf.train.write_graph(K.get_session().graph_def, 'out',
                         model_name + '_graph.pbtxt')

    saver.save(K.get_session(), 'out/' + model_name + '.chkp')

    output_graph = None
    if with_weights:
        freeze_graph.freeze_graph(
            input_graph='out/' + model_name + '_graph.pbtxt',
            input_saver=None,
            input_binary=False,
            input_checkpoint='out/' + model_name + '.chkp',
            output_node_names=out_name,
            restore_op_name="save/restore_all",
            filename_tensor_name="save/Const:0",
            output_graph='out/frozen_' + model_name + '.pb',
            clear_devices=True,
            initializer_nodes="")

        input_graph_def = tf.GraphDef()
        with tf.gfile.Open('out/frozen_' + model_name + '.pb', "rb") as f:
            input_graph_def.ParseFromString(f.read())

        output_graph = optimize_for_inference_lib.optimize_for_inference(
            input_graph_def, input_names, output_names,
            tf.float32.as_datatype_enum)
    else:
        output_graph = tf.train.write_graph(K.get_session().graph,
                             'out/' + model_name, model_name + 'wo_weights.pb')

    return output_graph
    def __init__(self,
                 input,
                 output,
                 frozen_graph=True,
                 input_names="Mul",
                 output_names="softmax",
                 placeholder_type_enum=dtypes.float32.as_datatype_enum):
        if not gfile.Exists(input):
            print("Input graph file '" + input + "' does not exist!")
            return -1

        input_graph_def = graph_pb2.GraphDef()
        with gfile.Open(input, "rb") as f:
            data = f.read()
            if frozen_graph:
                input_graph_def.ParseFromString(data)
            else:
                text_format.Merge(data.decode("utf-8"), input_graph_def)

        output_graph_def = optimize_for_inference_lib.optimize_for_inference(
            input_graph_def, input_names.split(","), output_names.split(","),
            placeholder_type_enum)

        if frozen_graph:
            f = gfile.FastGFile(output, "w")
            f.write(output_graph_def.SerializeToString())
        else:
            graph_io.write_graph(output_graph_def, os.path.dirname(output),
                                 os.path.basename(output))
示例#27
0
def export_model(saver, model, input_node_names, output_node_name):
    print('Exporting...')
    tf.train.write_graph(K.get_session().graph_def, '.', GRAPH_PBTXT)

    saver.save(K.get_session(), CHKP)

    freeze_graph.freeze_graph(GRAPH_PBTXT, None, False, CHKP, output_node_name,
                              "save/restore_all", "save/Const:0", FROZEN_PB,
                              True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(FROZEN_PB, "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile(OPTIMIZED_PB, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
    print('input_names:')
    print(input_node_names)
    print('output_name:')
    print(output_node_name)
def save_model(model_directory,
               model_name,
               input_array,
               output_array,
               frozen_name='frozen.pb',
               optimized_name='optimized.pb'):
    input_graph_path = model_directory + '/' + model_name + '.pbtxt'
    checkpoint_path = model_directory + '/' + model_name + '.ckpt'
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "prediction"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_frozen_graph_name = model_directory + '/' + frozen_name
    output_optimized_graph_name = model_directory + '/' + optimized_name
    clear_devices = True

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
        data = f.read()
        input_graph_def.ParseFromString(data)

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_array, output_array,
        tf.float32.as_datatype_enum)

    # Save the optimized graph
    f = tf.gfile.FastGFile(output_optimized_graph_name, "w")
    f.write(output_graph_def.SerializeToString())
def export_model(model_output_dir, input_node_names, output_node_name):
    """Export the model so we can use it later.
    This will create two Protocol Buffer files in the model output directory.
    These files represent a serialized version of our model with all the
    learned weights and biases. One of the ProtoBuf files is a version
    optimized for inference-only usage.
    """

    name_base = os.path.join(model_output_dir, MODEL_NAME)
    frozen_graph_file = os.path.join(model_output_dir,
                                     'frozen_' + MODEL_NAME + '.pb')
    freeze_graph.freeze_graph(name_base + '.pbtxt', None, False,
                              name_base + '.chkp', output_node_name,
                              "save/restore_all", "save/Const:0",
                              frozen_graph_file, True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(frozen_graph_file, "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    optimized_graph_file = os.path.join(model_output_dir,
                                        'optimized_' + MODEL_NAME + '.pb')
    with tf.gfile.GFile(optimized_graph_file, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("Inference optimized graph saved at: " + optimized_graph_file)
示例#30
0
def export():
    tf.logging.set_verbosity(tf.logging.INFO)
    inp = tf.placeholder(tf.float32, [None], name=INPUT_TENSOR_NAME)
    model_fn(dict(data=inp), None, tf.estimator.ModeKeys.PREDICT)
    sess = get_session()
    tf.train.Saver().save(sess, os.path.join(EXPORT_FOLDER, 'checkpoint.ckpt'))
    tf.train.write_graph(sess.graph_def, EXPORT_FOLDER, 'graph.pbtxt', True)
    sess.close()
    print("Freezing graph")
    lp = get_latest_export()
    ckpt = tf.train.get_checkpoint_state(EXPORT_FOLDER)
    freeze_graph(
        os.path.join(EXPORT_FOLDER, 'graph.pbtxt'),
        None,
        False,
        ckpt.model_checkpoint_path,
        OUTPUT_TENSOR_NAME,
        'save/restore_all',
        'save/Const:0',
        os.path.join(EXPORT_FOLDER, 'fozen.pb'),
        True,
        ''
    )
    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(os.path.join(EXPORT_FOLDER, 'fozen.pb'), "rb") as f:
        input_graph_def.ParseFromString(f.read())
    output_graph = optimize_for_inference(
        input_graph_def,
        [INPUT_TENSOR_NAME],
        [OUTPUT_TENSOR_NAME],
        tf.float32.as_datatype_enum
    )
    with tf.gfile.FastGFile(EXPORTED_MODEL_NAME, 'w') as f:
        f.write(output_graph.SerializeToString())
示例#31
0
    def tfliteInvoke(self, graph, test_inputs, outputs):
        tf.reset_default_graph()
        # Turn the input into placeholder of shape 1
        tflite_input = tf.placeholder("float",
                                      [1, self.time_steps, self.n_input],
                                      name="INPUT_IMAGE_LITE")
        tf.import_graph_def(graph,
                            name="",
                            input_map={"INPUT_IMAGE": tflite_input})
        with tf.Session() as sess:
            curr = sess.graph_def
            curr = convert_op_hints_to_stubs(graph_def=curr)

        curr = optimize_for_inference_lib.optimize_for_inference(
            curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
            [tf.float32.as_datatype_enum])

        converter = tf.lite.TFLiteConverter(curr, [tflite_input], [outputs])
        tflite = converter.convert()

        interpreter = tf.lite.Interpreter(model_content=tflite)

        try:
            interpreter.allocate_tensors()
        except ValueError:
            assert False

        input_index = (interpreter.get_input_details()[0]["index"])
        interpreter.set_tensor(input_index, test_inputs)
        interpreter.invoke()
        output_index = (interpreter.get_output_details()[0]["index"])
        result = interpreter.get_tensor(output_index)
        # Reset all variables so it will not pollute other inferences.
        interpreter.reset_all_variables()
        return result
示例#32
0
def export_model(model_output_dir, input_node_names, output_node_name):
    """Export the model so we can use it later.
    """

    name_base = os.path.join(model_output_dir, MODEL_NAME)
    frozen_graph_file = os.path.join(model_output_dir,
                                     'frozen_' + MODEL_NAME + '.pb')
    freeze_graph.freeze_graph(name_base + '.pbtxt', None, False,
                              name_base + '.chkp', output_node_name,
                              "save/restore_all", "save/Const:0",
                              frozen_graph_file, True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(frozen_graph_file, "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    optimized_graph_file = os.path.join(model_output_dir,
                                        'optimized_' + MODEL_NAME + '.pb')
    with tf.gfile.GFile(optimized_graph_file, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("Inference optimized graph saved at: " + optimized_graph_file)
示例#33
0
 def _optimize_for_inference(self):
     graph_def = self.getTFInputGraph().graph_def
     # Get data types of input placeholders
     placeholder_types = self._get_placeholder_types(graph_def)
     # Strip away graph nodes not used in computing the tensors with the specified output names
     input_names = [tfx.op_name(tnsr_name) for _, tnsr_name in self.getInputMapping()]
     output_names = [tfx.op_name(tnsr_name) for tnsr_name, _ in self.getOutputMapping()]
     return infr_opt.optimize_for_inference(graph_def,
                                            input_names,
                                            output_names,
                                            placeholder_types)
示例#34
0
def prepare_for_dnn(sess, graph_def, in_node, out_node, out_graph, dtype, optimize=True, quantize=False):
    # Freeze graph. Replaces variables to constants.
    graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [out_node])
    if optimize:
        # Optimize graph. Removes training-only ops, unused nodes.
        graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, [in_node], [out_node], dtype.as_datatype_enum)
        # Fuse constant operations.
        transforms = ["fold_constants(ignore_errors=True)"]
        if quantize:
            transforms += ["quantize_weights(minimum_size=0)"]
        transforms += ["sort_by_execution_order"]
        graph_def = TransformGraph(graph_def, [in_node], [out_node], transforms)
    # Serialize
    with tf.gfile.FastGFile(out_graph, 'wb') as f:
            f.write(graph_def.SerializeToString())
def export_model(input_node_names, output_node_name):
    freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt', None, False,
        'out/' + MODEL_NAME + '.chkp', output_node_name, "save/restore_all",
        "save/Const:0", 'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
            input_graph_def, input_node_names, [output_node_name],
            tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
def main(unused_args):
  if not tf.gfile.Exists(FLAGS.input):
    print("Input graph file '" + FLAGS.input + "' does not exist!")
    return -1

  input_graph_def = tf.GraphDef()
  with tf.gfile.Open(FLAGS.input, "r") as f:
    data = f.read()
    input_graph_def.ParseFromString(data)

  output_graph_def = optimize_for_inference_lib.optimize_for_inference(
      input_graph_def, FLAGS.input_names.split(","),
      FLAGS.output_names.split(","), FLAGS.placeholder_type_enum)

  f = tf.gfile.FastGFile(FLAGS.output, "w")
  f.write(output_graph_def.SerializeToString())

  return 0
    cvNet.setInput(inputData)
    cvNet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
    outDNN = cvNet.forward(out_nodes)

    outTF = sess.run([mbox_loc, mbox_conf_flatten], feed_dict={inp: inputData.transpose(0, 2, 3, 1)})
    print('Max diff @ locations:  %e' % np.max(np.abs(outDNN[0] - outTF[0])))
    print('Max diff @ confidence: %e' % np.max(np.abs(outDNN[1] - outTF[1])))

    # Save a graph
    graph_def = sess.graph.as_graph_def()

    # Freeze graph. Replaces variables to constants.
    graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, out_nodes)
    # Optimize graph. Removes training-only ops, unused nodes.
    graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, inp_nodes, out_nodes, dtype.as_datatype_enum)
    # Fuse constant operations.
    transforms = ["fold_constants(ignore_errors=True)"]
    if args.quantize:
        transforms += ["quantize_weights(minimum_size=0)"]
    transforms += ["sort_by_execution_order"]
    graph_def = TransformGraph(graph_def, inp_nodes, out_nodes, transforms)

    # By default, float16 weights are stored in repeated tensor's field called
    # `half_val`. It has type int32 with leading zeros for unused bytes.
    # This type is encoded by Variant that means only 7 bits are used for value
    # representation but the last one is indicated the end of encoding. This way
    # float16 might takes 1 or 2 or 3 bytes depends on value. To improve compression,
    # we replace all `half_val` values to `tensor_content` using only 2 bytes for everyone.
    for node in graph_def.node:
        if 'value' in node.attr:
  def testOptimizeForInference(self):
    self.maxDiff = 1000
    unused_constant_name = "unused_constant"
    unconnected_add_name = "unconnected_add"
    a_constant_name = "a_constant"
    b_constant_name = "b_constant"
    a_check_name = "a_check"
    b_check_name = "b_check"
    a_identity_name = "a_identity"
    b_identity_name = "b_identity"
    add_name = "add"
    unused_output_add_name = "unused_output_add"
    graph_def = graph_pb2.GraphDef()
    unused_constant = self.create_constant_node_def(
        unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([unused_constant])
    unconnected_add_node = self.create_node_def(
        "Add", unconnected_add_name,
        [unused_constant_name, unused_constant_name])
    self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
    graph_def.node.extend([unconnected_add_node])
    a_constant = self.create_constant_node_def(
        a_constant_name, value=1, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([a_constant])
    a_check_node = self.create_node_def("CheckNumerics", a_check_name,
                                        [a_constant_name])
    graph_def.node.extend([a_check_node])
    a_identity_node = self.create_node_def(
        "Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
    graph_def.node.extend([a_identity_node])
    b_constant = self.create_constant_node_def(
        b_constant_name, value=1, dtype=dtypes.float32, shape=[])
    graph_def.node.extend([b_constant])
    b_check_node = self.create_node_def("CheckNumerics", b_check_name,
                                        [b_constant_name])
    graph_def.node.extend([b_check_node])
    b_identity_node = self.create_node_def(
        "Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
    graph_def.node.extend([b_identity_node])
    add_node = self.create_node_def("Add", add_name,
                                    [a_identity_name, b_identity_name])
    self.set_attr_dtype(add_node, "T", dtypes.float32)
    graph_def.node.extend([add_node])
    unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
                                                  [add_name, b_constant_name])
    self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
    graph_def.node.extend([unused_output_add_node])

    expected_output = graph_pb2.GraphDef()
    a_constant = self.create_constant_node_def(
        a_constant_name, value=1, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([a_constant])
    b_constant = self.create_constant_node_def(
        b_constant_name, value=1, dtype=dtypes.float32, shape=[])
    expected_output.node.extend([b_constant])
    add_node = self.create_node_def("Add", add_name,
                                    [a_constant_name, b_constant_name])
    self.set_attr_dtype(add_node, "T", dtypes.float32)
    expected_output.node.extend([add_node])

    output = optimize_for_inference_lib.optimize_for_inference(
        graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
    self.assertProtoEquals(expected_output, output)
示例#39
0
                output_node_names=output_node_name,
                restore_op_name='save/restore_all',
                filename_tensor_name='save/Const:0',
                output_graph=frozen_graph_path, clear_devices=True,
                initializer_nodes='')

            print('Frozen graph exported to {}'.format(frozen_graph_path))

            graph_path = frozen_graph_path

        if do_optimize:
            print('Optimizing graph...')
            input_graph_def = tf.GraphDef()

            with tf.gfile.Open(graph_path, 'rb') as f:
                data = f.read()
                input_graph_def.ParseFromString(data)

                output_graph_def =\
                    optimize_for_inference_lib.optimize_for_inference(
                        input_graph_def,
                        [input_node_name],
                        [output_node_name],
                        tf.float32.as_datatype_enum)

                f = tf.gfile.FastGFile(optimized_graph_path, 'wb')
                f.write(output_graph_def.SerializeToString())

                print('Optimized graph exported to {}'
                      .format(optimized_graph_path))
示例#40
0
    def build(self, input_nodes=None, output_nodes=None):
        if input_nodes is None:
            input_nodes = self.gan.input_nodes()
        if output_nodes is None:
            output_nodes = self.gan.output_nodes()
        save_file_text = self.name+".pbtxt"
        build_file = os.path.expanduser("builds/"+save_file_text)
        def create_path(filename):
            return os.makedirs(os.path.expanduser(os.path.dirname(filename)), exist_ok=True)
        create_path(build_file)
        tf.train.write_graph(self.gan.session.graph, 'builds', save_file_text)
        inputs = [x.name.split(":")[0] for x in input_nodes]
        outputs = [x.name.split(":")[0] for x in output_nodes]

        with self.gan.session as sess:
            converter = tf.lite.TFLiteConverter.from_session(sess, self.gan.input_nodes(), self.gan.output_nodes())
            tflite_model = converter.convert()
            f = open("builds/"+ self.gan.name+".tflite", "wb")
            f.write(tflite_model)
            f.close()
        tf.reset_default_graph()
        self.gan.session.close()
        [print("Input: ", x) for x in self.gan.input_nodes()]
        [print("Output: ", y) for y in self.gan.output_nodes()]
        print("Written to builds/"+self.gan.name+".tflite")

        pbtxt_path = "builds/"+self.name +'.pbtxt'
        checkpoint_path = "saves/"+self.name +'/model.ckpt'
        input_saver_def_path = ""
        input_binary = False
        output_node_names = ",".join(outputs)
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_frozen_graph_name = 'builds/frozen_'+self.name +'.pb'
        output_optimized_graph_name = 'builds/optimized_'+self.name+'.pb'
        clear_devices = True

        freeze_graph.freeze_graph(pbtxt_path, input_saver_def_path,
          input_binary, checkpoint_path, output_node_names,
          restore_op_name, filename_tensor_name,
          output_frozen_graph_name, clear_devices, "")

        input_graph_def = tf.GraphDef()
        with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
            data = f.read()
            input_graph_def.ParseFromString(data)

        output_graph_def = optimize_for_inference_lib.optimize_for_inference(
                input_graph_def,
                inputs, # an array of the input node(s)
                outputs, # an array of output nodes
                tf.float32.as_datatype_enum)

        # Save the optimized graph

        f = tf.gfile.FastGFile(output_optimized_graph_name, "wb")
        f.write(output_graph_def.SerializeToString())
        f.flush()
        f.close()



        print("Saved generator to ", output_optimized_graph_name)

        print("Testing loading ", output_optimized_graph_name)
        with tf.gfile.FastGFile(output_optimized_graph_name, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
            #tflite_model = tf.lite.TFLiteConverter(graph_def, self.gan.input_nodes(), self.gan.output_nodes()).convert()
            #f = open("builds/"+ self.gan.name+".tflite", "wb")
            #f.write(tflite_model)
            #f.close()

        with tf.Session() as sess:
            for input in inputs:
                print("Input: ", input, sess.graph.get_tensor_by_name(input+":0"))
            for output in outputs:
                print("Output: ", output, sess.graph.get_tensor_by_name(output+":0"))
clear_devices = True


freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                          input_binary, checkpoint_path, output_node_names,
                          restore_op_name, filename_tensor_name,
                          output_frozen_graph_name, clear_devices, "")



# Optimize for inference

input_graph_def = tf.GraphDef()
with tf.gfile.Open(output_frozen_graph_name, "r") as f:
    data = f.read()
    input_graph_def.ParseFromString(data)

output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        ["I"], # an array of the input node(s)
        ["O"], # an array of output nodes
        tf.float32.as_datatype_enum)


# Save the optimized graph

f = tf.gfile.FastGFile(output_optimized_graph_name, "w")
f.write(output_graph_def.SerializeToString())

# tf.train.write_graph(output_graph_def, './', output_optimized_graph_name)