def freeze_session(filename, sess, output_node_names, whitelist=None, blacklist=None): """Save session graph as binary file. Arguments: filename Output path. sess Session with variables and graph. output_node_names List of names for the result nodes of the graph. whitelist The set of variable names to convert, by default, all variables are converted. blacklist The set of variable names to omit converting to constants. """ # Convert variables to constant from tensorflow import graph_util input_graph_def = sess.graph.as_graph_def() output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names, variable_names_whitelist=whitelist, variable_names_blacklist=blacklist) # Write to file with tf.gfile.GFile(filename, "wb") as f: f.write(output_graph_def.SerializeToString())
def _export_one_part(self, sess, export_dir, XtoY, model_name): with self.graph.as_default(): normer = self.X_feed.normalize if XtoY else self.Y_feed.normalize denormer = self.Y_feed.denormalize if XtoY else self.X_feed.denormalize data_in = tf.expand_dims( normer( tf.placeholder(tf.float32, shape=self.XtoY.in_shape if XtoY else self.YtoX.out_shape, name='input')), 0) out = self.XtoY.gen(data_in) if XtoY else self.YtoX.gen(data_in) d_in = self.XtoY.dis(data_in) if XtoY else self.YtoX.dis(data_in) d_out = self.YtoX.dis(out) if XtoY else self.XtoY.dis(out) denormer(tf.squeeze(out, axis=0), name='output') tf.reduce_mean(d_in, name='d_input') tf.reduce_mean(d_out, name='d_output') output_graph_def = gu.extract_sub_graph( gu.remove_training_nodes( gu.convert_variables_to_constants(sess, self.graph.as_graph_def(), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES) tf.train.write_graph(output_graph_def, export_dir, model_name, as_text=False)
def freeze_graph(input_checkpoint, output_graph): ''' :param input_checkpoint: :param output_graph: PB模型保存路径 :return: ''' # checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用 # input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径 # 指定输出的节点名称,该节点名称必须是原模型中存在的节点 output_node_names = "encoder_decoder_relu5_1/decoder_relu5_1/decoder_model_relu5_1/relu5_1_16/relu5_1_16/BiasAdd," \ "style_encoder/model_2/relu5_1/Relu" saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) for n in tf.get_default_graph().as_graph_def().node: print("---" * 3, n.name) with tf.Session() as sess: for n in sess.graph_def.node: print('**', n.name) saver.restore(sess, input_checkpoint) # 恢复图并得到数据 output_graph_def = graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定 sess=sess, input_graph_def=sess.graph_def, # 等于:sess.graph_def output_node_names=output_node_names.split(",")) # 如果有多个输出节点,以逗号隔开 output_file = os.path.join(output_graph, 'wct.pb') with tf.gfile.GFile(output_file, "wb") as f: # 保存模型 f.write(output_graph_def.SerializeToString()) # 序列化输出 print("%d ops in the final graph." % len(output_graph_def.node)) # 得到当前图有几个操作节点
def freeze_graph(input_checkpoint, output_graph): ''' :param input_checkpoint: init checkpoint save path :param output_graph: pb model save path :return: ''' # checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用 # input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径 # 指定输出的节点名称,该节点名称必须是原模型中存在的节点 output_node_names = "loss/Softmax" saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) graph = tf.get_default_graph() # 获得默认的图 input_graph_def = graph.as_graph_def() # 返回一个序列化的图代表当前的图 with tf.Session() as sess: ## BERT Model requires to load the `create_model` function for the session saver.restore(sess, input_checkpoint) # 恢复图并得到数据 output_graph_def = graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定 sess=sess, input_graph_def=input_graph_def, # 等于:sess.graph_def output_node_names=output_node_names.split(",")) # 如果有多个输出节点,以逗号隔开 with tf.gfile.GFile(output_graph, "wb") as f: # 保存模型 f.write(output_graph_def.SerializeToString()) # 序列化输出 print("%d ops in the final graph." % len(output_graph_def.node)) # 得到当前图有几个操作节点 print(output_graph_def.node)
def save(self, sess, prefix): graph_def = convert_variables_to_constants( sess, tf.get_default_graph().as_graph_def(), list(self.output_nodes.values())) with tf.gfile.GFile('%s.model' % prefix, "wb") as f: f.write(graph_def.SerializeToString())
def freeze(): saver = tf.train.import_meta_graph('checkpoints-cnn/har.meta', clear_devices=True) graph = tf.get_default_graph() input_graph_def = graph.as_graph_def() sess = tf.Session() saver.restore(sess, "checkpoints-cnn/har") output_node_names = "y_pred" output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.split(",")) output_graph = "checkpoints-cnn/har-model.pb" with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) sess.close()
def _export_cp_one_part(gen: BaseNet, out_dis: BaseNet, in_dis: BaseNet, normer, denormer, in_shape, cp_dir, export_dir, model_name): graph = tf.Graph() with graph.as_default(): # data_in = tf.expand_dims(normer(tf.placeholder(tf.float32, # shape=in_shape, # name='input')), 0) # above is for one image per batch, this is for any size batch data_in = normer( tf.placeholder(tf.float32, shape=[None] + list(in_shape), name='input')) out = gen.build(data_in) d_in = in_dis.build(data_in) d_out = out_dis.build(out) # denormer(tf.squeeze(out, axis=0), name='output') # above is for one image per batch, this is for any size batch denormer(out, name='output') tf.reduce_mean(d_in, name='d_input') tf.reduce_mean(d_out, name='d_output') restore = tf.train.Saver() # no need to use GPU for export config = tf.ConfigProto(device_count={'GPU': 0}) config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=config) as sess: sess.run(tf.global_variables_initializer()) cp = tf.train.latest_checkpoint(cp_dir) restore.restore(sess, cp) output_graph_def = gu.extract_sub_graph( gu.remove_training_nodes( gu.convert_variables_to_constants(sess, graph.as_graph_def(), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES) # just getting the current step meta_graph_path = cp + '.meta' step = str(CycleGAN.meta_path_to_step(meta_graph_path)) tf.train.write_graph(output_graph_def, osp.join(export_dir, step), model_name, as_text=False) return step
def save_model(checkpoint_dir,output): #保存模型 meta_graph = [meta for meta in os.listdir( checkpoint_dir) if '.meta' in meta] assert (len(meta_graph) > 0) sess = tf.Session() saver = tf.train.import_meta_graph( os.path.join(checkpoint_dir, meta_graph[0])) saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir)) graph = tf.get_default_graph() input_graph_def = graph.as_graph_def() output_node_names = 'add_39' output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.split(",")) with tf.gfile.GFile(output, "wb") as f: f.write(output_graph_def.SerializeToString()) sess.close()
def save(model, fname, sess): """ Save the full model as a .h5 file Args fname (str): filename to save file as model (Sequential): The compiled and trained model to save """ # print a summary print(model.summary()) # make sure there is not a filetype included fname = fname.split('.', 1)[0] # as keras model.save(MOD_DIR + fname + '.h5', include_optimizer=True) print('Keras model saved as ' + MOD_DIR + fname + '.h5') # as forzen frozen = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output/Softmax"]) write_graph(frozen, './', MOD_DIR + fname + '.pb', as_text=False) print('Frozen model saved as ' + MOD_DIR + fname + '.pb')
def _export_cp_one_part(gen, out_dis, in_dis, normer, denormer, in_shape, cp_dir, export_dir, model_name): graph = tf.Graph() with graph.as_default(): data_in = tf.expand_dims( normer(tf.placeholder(tf.float32, shape=in_shape, name='input')), 0) out = gen(data_in) d_in = in_dis(data_in) d_out = out_dis(out) denormer(tf.squeeze(out, axis=0), name='output') tf.reduce_mean(d_in, name='d_input') tf.reduce_mean(d_out, name='d_output') restore = tf.train.Saver() # no need to use GPU for export config = tf.ConfigProto(device_count={'GPU': 0}) config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=config) as sess: sess.run(tf.global_variables_initializer()) cp = tf.train.latest_checkpoint(cp_dir) restore.restore(sess, cp) output_graph_def = gu.extract_sub_graph( gu.remove_training_nodes( gu.convert_variables_to_constants(sess, graph.as_graph_def(), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES), CycleGAN.SAVE_NODES) # just getting the current step meta_graph_path = cp + '.meta' step = str( osp.basename(meta_graph_path).split('-')[1].split('.')[0]) tf.train.write_graph(output_graph_def, osp.join(export_dir, step), model_name, as_text=False) return step
import os import tensorflow as tf from tensorflow import graph_util from models import ShuffleNet_model as model input_image_shape = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='inputs') logits = model(num_classes=7, num_groups=3)(input_image_shape) outputs = tf.identity(logits, 'outputs') # predicted_classes = tf.argmax(logits, axis=1, name='outputs') sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(tf.global_variables_initializer()) graph = tf.get_default_graph() input_graph_def = graph.as_graph_def() output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, # We split on comma for convenience ["outputs"]) # # Finally we serialize and dump the output graph to the filesystem with tf.gfile.GFile(os.path.join('data', 'Shufflenet64_g3.pb'), "wb") as f: f.write(output_graph_def.SerializeToString())
def Model2Kinetica(self, pbfile=None, sess=None, graph=None, output_node_names=None, ModelName="Model", Loss=0.99, COLLECTION="Network"): """ pbfile can be a path to a local file or a pickle bytes. """ import uuid from time import gmtime, strftime datetime = strftime("%Y-%m-%d %H:%M:%S", gmtime()) from tensorflow import graph_util h_db = self.h_db # create model table if not exist table = 'TFmodel' type_def = self.file_type if not h_db.has_table(table_name=table)['table_exists']: response = h_db.create_type(type_definition=type_def, label=table, properties=self.type_properties) h_db.create_table(table_name=table, type_id=response['type_id'], options={"collection_name": COLLECTION}) # generate output binary string # output_node_names example,output_node_names = "input,output,output2" #print "works 1" if pbfile != None: if len(pbfile) < 256: model = open(pbfile, 'rb').read() else: model = pbfile #print "works 2" else: output_graph_def = graph_util.convert_variables_to_constants( sess, # The session is used to retrieve the weights graph.as_graph_def(), output_node_names.split( "," ) # The output node names are used to select the usefull nodes ) model = output_graph_def.SerializeToString() # insert model into kinetica encoded_obj_list = [] ID = str(uuid.uuid1()) datum = collections.OrderedDict() datum["model_binary"] = model datum["model"] = ModelName datum["model_id"] = ID datum["Accuracy"] = Loss datum["Data_Time_created"] = datetime encoded_obj_list.append(h_db.encode_datum(self.file_type, datum)) options = {'update_on_existing_pk': 'true'} response = h_db.insert_records(table_name=table, data=encoded_obj_list, list_encoding='binary', options=options) return ID
import os import tensorflow as tf from tensorflow import graph_util import argparse parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', required=True) parser.add_argument('--output', dest='output', required=True) args = parser.parse_args() meta_graph = [ meta for meta in os.listdir(args.checkpoint_dir) if '.meta' in meta ] assert (len(meta_graph) > 0) sess = tf.Session() saver = tf.train.import_meta_graph( os.path.join(args.checkpoint_dir, meta_graph[0])) saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir)) graph = tf.get_default_graph() input_graph_def = graph.as_graph_def() output_node_names = 'add_39' output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.split(",")) with tf.gfile.GFile(args.output, "wb") as f: f.write(output_graph_def.SerializeToString()) sess.close()
from model.datasets import Dataset, Dataloader, get_training_augmentation, get_validation_augmentation, visualize H5_MODEL = "saved_models/config13/model_.112-0.386539.h5" OUTPUT_PB_FILE = "lane_segmentation_384x384.pb" # Reload the model and the best weights tf.keras.backend.clear_session() tf.keras.backend.set_learning_phase(False) model = tf.keras.models.load_model(H5_MODEL, compile=False) # model.layers.pop() # input = model.input # last_layer = 'final_conv' # output = (model.get_layer(name=last_layer).output if isinstance(last_layer, str) # else model.get_layer(index=last_layer).output) # model = tf.keras.Model(inputs=input, outputs=output) model.summary() # Freeze model and save # First freeze the graph and remove training nodes. sess = tf.keras.backend.get_session() input_graph_def = sess.graph.as_graph_def() output_names = model.output.op.name print(output_names) frozen_graph = convert_variables_to_constants(sess, input_graph_def, [output_names]) frozen_graph = remove_training_nodes(frozen_graph) # Save the model with tf.gfile.GFile(OUTPUT_PB_FILE, "wb") as ofile: ofile.write(frozen_graph.SerializeToString()) print("saved model to {}".format(OUTPUT_PB_FILE))