def export_model(sess, export_path, export_version, x, y): saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=x, scores_tensor=y) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(export_version), sess) print("Export Finished!")
def export_model(sess, x, y): print('Exporting trained model to %s' % DATA_PATH) saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=x, scores_tensor=y) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(DATA_PATH, tf.constant(FLAGS.export_version), sess) print 'Done exporting!'
def main(_): if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): print( 'Usage: mnist_export.py [--training_iteration=x] ' '[--export_version=y] export_dir') sys.exit(-1) if FLAGS.training_iteration <= 0: print 'Please specify a positive value for training iteration.' sys.exit(-1) if FLAGS.export_version <= 0: print 'Please specify a positive value for version number.' sys.exit(-1) # Train model print 'Training model...' mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True) sess = tf.InteractiveSession() x = tf.placeholder('float', shape=[None, 784]) y_ = tf.placeholder('float', shape=[None, 10]) w = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.initialize_all_variables()) y = tf.nn.softmax(tf.matmul(x, w) + b) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize( cross_entropy) for _ in range(FLAGS.training_iteration): batch = mnist.train.next_batch(50) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) print 'training accuracy %g' % sess.run(accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels }) print 'Done training!' # Export model # WARNING(break-tutorial-inline-code): The following code snippet is # in-lined in tutorials, please update tutorial documents accordingly # whenever code changes. export_path = sys.argv[-1] print 'Exporting trained model to', export_path saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=x, scores_tensor=y) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess) print 'Done exporting!'
def main(_): if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): print('Usage: mnist_export.py [--training_iteration=x] ' '[--export_version=y] export_dir') sys.exit(-1) if FLAGS.training_iteration <= 0: print 'Please specify a positive value for training iteration.' sys.exit(-1) if FLAGS.export_version <= 0: print 'Please specify a positive value for version number.' sys.exit(-1) # Train model print 'Training model...' mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True) sess = tf.InteractiveSession() x = tf.placeholder('float', shape=[None, 784]) y_ = tf.placeholder('float', shape=[None, 10]) w = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.initialize_all_variables()) y = tf.nn.softmax(tf.matmul(x, w) + b) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) for _ in range(FLAGS.training_iteration): batch = mnist.train.next_batch(50) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) print 'training accuracy %g' % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) print 'Done training!' # Export model # WARNING(break-tutorial-inline-code): The following code snippet is # in-lined in tutorials, please update tutorial documents accordingly # whenever code changes. export_path = sys.argv[-1] print 'Exporting trained model to', export_path saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=x, scores_tensor=y) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess) print 'Done exporting!'
def export(sess,previos_model,export_path,export_version): K.set_learning_phase(0) # all new operations will be in test mode from now on # serialize the model and get its weights, for quick re-building config = previous_model.get_config() weights = previous_model.get_weights() # re-build a model where the learning phase is now hard-coded to 0 new_model = model_from_config(config) new_model.set_weights(weights) saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(export_version), sess)
def export(): # Create index->synset mapping synsets = [] with open(SYNSET_FILE) as f: synsets = f.read().splitlines() # Create synset->metadata mapping texts = {} with open(METADATA_FILE) as f: for line in f.read().splitlines(): parts = line.split('\t') assert len(parts) == 2 texts[parts[0]] = parts[1] with tf.Graph().as_default(): # Build inference model. # Please refer to Tensorflow inception model for details. # Input transformation. # TODO(b/27776734): Add batching support. jpegs = tf.placeholder(tf.string, shape=(1)) image_buffer = tf.squeeze(jpegs, [0]) # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # After this point, all image pixels reside in [0,1) # until the very end, when they're rescaled to (-1, 1). The various # adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1) image = tf.sub(image, 0.5) image = tf.mul(image, 2.0) images = tf.expand_dims(image, 0) # Run inference. logits, _ = inception_model.inference(images, NUM_CLASSES + 1) # Transform output to topK result. values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES) # Create a constant string Tensor where the i'th element is # the human readable class description for the i'th index. class_tensor = tf.constant([texts[s] for s in synsets]) classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices), mapping=class_tensor) # Restore variables from training checkpoint. variable_averages = tf.train.ExponentialMovingAverage( inception_model.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: # Restore variables from training checkpoints. ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] print('Successfully loaded model from %s at step=%s.' % (ckpt.model_checkpoint_path, global_step)) else: print('No checkpoint file found at %s' % FLAGS.checkpoint_dir) return # Export inference model. init_op = tf.group(tf.initialize_all_tables(), name='init_op') model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=jpegs, classes_tensor=classes, scores_tensor=values) model_exporter.init(default_graph_signature=signature, init_op=init_op) model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess) print('Successfully exported model to %s' % FLAGS.export_dir)
def export(): # Create index->synset mapping synsets = [] with open(SYNSET_FILE) as f: synsets = f.read().splitlines() # Create synset->metadata mapping texts = {} with open(METADATA_FILE) as f: for line in f.read().splitlines(): parts = line.split('\t') assert len(parts) == 2 texts[parts[0]] = parts[1] with tf.Graph().as_default(): # Build inference model. # Please refer to Tensorflow inception model for details. # Input transformation. # TODO(b/27776734): Add batching support. jpegs = tf.placeholder(tf.string, shape=(1)) image_buffer = tf.squeeze(jpegs, [0]) # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # After this point, all image pixels reside in [0,1) # until the very end, when they're rescaled to (-1, 1). The various # adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1) image = tf.sub(image, 0.5) image = tf.mul(image, 2.0) images = tf.expand_dims(image, 0) # Run inference. logits, _ = inception_model.inference(images, NUM_CLASSES + 1) # Transform output to topK result. values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES) # Create a constant string Tensor where the i'th element is # the human readable class description for the i'th index. class_tensor = tf.constant([texts[s] for s in synsets]) classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices), mapping=class_tensor) # Restore variables from training checkpoint. variable_averages = tf.train.ExponentialMovingAverage( inception_model.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: # Restore variables from training checkpoints. ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: full_path = os.path.join(FLAGS.checkpoint_dir, ckpt.model_checkpoint_path) saver.restore(sess, full_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] print('Successfully loaded model from %s at step=%s.' % (full_path, global_step)) else: print('No checkpoint file found at %s' % FLAGS.checkpoint_dir) return # Export inference model. init_op = tf.group(tf.initialize_all_tables(), name='init_op') model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=jpegs, classes_tensor=classes, scores_tensor=values) model_exporter.init(default_graph_signature=signature, init_op=init_op) model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess) print('Successfully exported model to %s' % FLAGS.export_dir)
def main(argv=None): with tf.name_scope('batch_inputs'): # Read inventory of training images and labels train_file = "./train.txt" valid_file = "./valid.txt" image_size = IMAGE_SIZE / IMAGE_RESIZE_FACTOR train_image_batch, train_label_batch = inputs(train_file, batch_size=TRAIN_BATCH_SIZE, input_name="training", num_epochs=TRAIN_EPOCHS) valid_image_batch, valid_label_batch = inputs(valid_file, batch_size=VALID_BATCH_SIZE, input_name="validation", num_epochs=VALID_EPOCHS) x_ = tf.placeholder("float32", shape=[None, image_size, image_size, IMAGE_CHANNELS], name="image_batch_placeholder") y_ = tf.placeholder("float32", shape=[None, NUM_CLASSES], name="label_batch_placeholder") # Store layers weight & bias with tf.name_scope('weights'): weights = { # 5x5 conv, 1 input, 32 outputs 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5 conv, 32 inputs, 64 outputs 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5 conv, 64 inputs, 128 outputs 'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])), # 5x5 conv, 128 inputs, 256 outputs 'wc4': tf.Variable(tf.random_normal([5, 5, 128, 256])), # fully connected, 19*19*256 inputs, 1024 outputs 'wd1': tf.Variable(tf.random_normal([10*10*256, 1024])), # 1024 inputs, 4 class labels (prediction) 'out': tf.Variable(tf.random_normal([1024, NUM_CLASSES])) } with tf.name_scope('biases'): biases = { 'bc1': tf.Variable(tf.random_normal([32])), 'bc2': tf.Variable(tf.random_normal([64])), 'bc3': tf.Variable(tf.random_normal([128])), 'bc4': tf.Variable(tf.random_normal([256])), 'bd1': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([NUM_CLASSES])) } # Create the graph, etc. keep_prob = tf.placeholder(tf.float32, name="keep_prob") pred = conv_net(x_, weights, biases, image_size, keep_prob) with tf.name_scope('serving'): softmax_pred = tf.nn.softmax(conv_net(x_, weights, biases, image_size, 1.0)) # Calculate loss with tf.name_scope('cross_entropy'): # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y_)) cost_summary = tf.scalar_summary("cost_summary", cost) # Optimiser with tf.name_scope('train'): train_step = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost) # Evaluate model with tf.name_scope('predict'): correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) accuracy_summary = tf.scalar_summary("accuracy_summary", accuracy) sess = tf.Session() writer = tf.train.SummaryWriter("./logs", sess.graph) merged = tf.merge_all_summaries() init_op = tf.initialize_all_variables() step = 0 with sess.as_default(): sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): step += 1 x, y = sess.run([train_image_batch, train_label_batch]) if step % TRAIN_BATCH_SIZE == 0: result = sess.run([merged, accuracy], feed_dict={keep_prob: 1.0, x_: x, y_: y}) summary_str = result[0] acc = result[1] writer.add_summary(summary_str, step) print("Accuracy at step %s: %s" % (step, acc)) train_step.run(feed_dict={keep_prob: 0.75, x_: x, y_: y}) except tf.errors.OutOfRangeError: x, y = sess.run([valid_image_batch, valid_label_batch]) result = sess.run([accuracy], feed_dict={keep_prob: 1.0, x_: x, y_: y}) print("Validation accuracy: %s" % result[0]) finally: coord.request_stop() coord.join(threads) # export the model export_path = "./model/" print "Exporting model to " + export_path saver = tf.train.Saver(sharded=False) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=x_, scores_tensor=softmax_pred) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(EXPORT_VERSION), sess) sess.close() return 0
def doBasicsOneExportPath(self, export_path, clear_devices=False, global_step=GLOBAL_STEP): # Build a graph with 2 parameter nodes on different devices. tf.reset_default_graph() with tf.Session(target="", config=config_pb2.ConfigProto( device_count={"CPU": 2})) as sess: # v2 is an unsaved variable derived from v0 and v1. It is used to # exercise the ability to run an init op when restoring a graph. with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") v2 = tf.Variable(1, name="v2", trainable=False, collections=[]) assign_v2 = tf.assign(v2, tf.add(v0, v1)) init_op = tf.group(assign_v2, name="init_op") tf.add_to_collection("v", v0) tf.add_to_collection("v", v1) tf.add_to_collection("v", v2) global_step_tensor = tf.Variable(global_step, name="global_step") named_tensor_bindings = { "logical_input_A": v0, "logical_input_B": v1 } signatures = { "foo": exporter.regression_signature(input_tensor=v0, output_tensor=v1), "generic": exporter.generic_signature(named_tensor_bindings) } def write_asset(path): file_path = os.path.join(path, "file.txt") with gfile.FastGFile(file_path, "w") as f: f.write("your data here") asset_file = tf.Variable("hello42.txt", name="filename42") assets = {("hello42.txt", asset_file)} tf.initialize_all_variables().run() # Run an export. save = tf.train.Saver({ "v0": v0, "v1": v1 }, restore_sequentially=True, sharded=True) export = exporter.Exporter(save) export.init( sess.graph.as_graph_def(), init_op=init_op, clear_devices=clear_devices, default_graph_signature=exporter.classification_signature( input_tensor=v0), named_graph_signatures=signatures, assets=assets, assets_callback=write_asset) export.export(export_path, global_step_tensor, sess, exports_to_keep=gc.largest_export_versions(2)) # Restore graph. compare_def = tf.get_default_graph().as_graph_def() tf.reset_default_graph() with tf.Session(target="", config=config_pb2.ConfigProto( device_count={"CPU": 2})) as sess: save = tf.train.import_meta_graph( os.path.join(export_path, exporter.VERSION_FORMAT_SPECIFIER % global_step, exporter.META_GRAPH_DEF_FILENAME)) self.assertIsNotNone(save) meta_graph_def = save.export_meta_graph() collection_def = meta_graph_def.collection_def # Validate custom graph_def. graph_def_any = collection_def[exporter.GRAPH_KEY].any_list.value self.assertEquals(len(graph_def_any), 1) graph_def = tf.GraphDef() graph_def_any[0].Unpack(graph_def) if clear_devices: for node in compare_def.node: node.device = "" self.assertProtoEquals(compare_def, graph_def) # Validate init_op. init_ops = collection_def[exporter.INIT_OP_KEY].node_list.value self.assertEquals(len(init_ops), 1) self.assertEquals(init_ops[0], "init_op") # Validate signatures. signatures_any = collection_def[ exporter.SIGNATURES_KEY].any_list.value self.assertEquals(len(signatures_any), 1) signatures = manifest_pb2.Signatures() signatures_any[0].Unpack(signatures) default_signature = signatures.default_signature self.assertEqual( default_signature.classification_signature.input.tensor_name, "v0:0") bindings = signatures.named_signatures[ "generic"].generic_signature.map self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0") self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0") read_foo_signature = ( signatures.named_signatures["foo"].regression_signature) self.assertEquals(read_foo_signature.input.tensor_name, "v0:0") self.assertEquals(read_foo_signature.output.tensor_name, "v1:0") # Validate the assets. assets_any = collection_def[exporter.ASSETS_KEY].any_list.value self.assertEquals(len(assets_any), 1) asset = manifest_pb2.AssetFile() assets_any[0].Unpack(asset) assets_path = os.path.join( export_path, exporter.VERSION_FORMAT_SPECIFIER % global_step, exporter.ASSETS_DIRECTORY, "file.txt") asset_contents = gfile.GFile(assets_path).read() self.assertEqual(asset_contents, "your data here") self.assertEquals("hello42.txt", asset.filename) self.assertEquals("filename42:0", asset.tensor_binding.tensor_name) # Validate graph restoration. save.restore( sess, os.path.join(export_path, exporter.VERSION_FORMAT_SPECIFIER % global_step, exporter.VARIABLES_DIRECTORY)) self.assertEqual(10, tf.get_collection("v")[0].eval()) self.assertEqual(20, tf.get_collection("v")[1].eval()) tf.get_collection(exporter.INIT_OP_KEY)[0].run() self.assertEqual(30, tf.get_collection("v")[2].eval())
new_model.set_weights(weights) import tensorflow as tf import sys sys.path.insert(0, '/Users/dan.dixey/Desktop/QBiz/serving') # Unable to Import THIS!! why? from tensorflow_serving.session_bundle import exporter sess = K.get_session() export_path = './Serving' # where to save the exported graph export_version = 0o0000001 # version number (integer) saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(export_version), sess) if show_activation: """ Experimenting with different the Cov Net Layers to visulise their outputs This section will push data through the layer and record the activations Normalise (Min/Max) the Data in the whole 3D array, slice for one layer of the 3D Array Plot using Maplotlib the Output of 10 slice...
def doBasicsOneExportPath(self, export_path, clear_devices=False, global_step=GLOBAL_STEP, sharded=True): # Build a graph with 2 parameter nodes on different devices. tf.reset_default_graph() with tf.Session( target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: # v2 is an unsaved variable derived from v0 and v1. It is used to # exercise the ability to run an init op when restoring a graph. with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") v2 = tf.Variable(1, name="v2", trainable=False, collections=[]) assign_v2 = tf.assign(v2, tf.add(v0, v1)) init_op = tf.group(assign_v2, name="init_op") tf.add_to_collection("v", v0) tf.add_to_collection("v", v1) tf.add_to_collection("v", v2) global_step_tensor = tf.Variable(global_step, name="global_step") named_tensor_bindings = {"logical_input_A": v0, "logical_input_B": v1} signatures = { "foo": exporter.regression_signature(input_tensor=v0, output_tensor=v1), "generic": exporter.generic_signature(named_tensor_bindings) } asset_filepath_orig = os.path.join(tf.test.get_temp_dir(), "hello42.txt") asset_file = tf.constant(asset_filepath_orig, name="filename42") tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file) with gfile.FastGFile(asset_filepath_orig, "w") as f: f.write("your data here") assets_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS) ignored_asset = os.path.join(tf.test.get_temp_dir(), "ignored.txt") with gfile.FastGFile(ignored_asset, "w") as f: f.write("additional data here") tf.initialize_all_variables().run() # Run an export. save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True, sharded=sharded) export = exporter.Exporter(save) export.init(sess.graph.as_graph_def(), init_op=init_op, clear_devices=clear_devices, default_graph_signature=exporter.classification_signature( input_tensor=v0), named_graph_signatures=signatures, assets_collection=assets_collection) export.export(export_path, global_step_tensor, sess, exports_to_keep=gc.largest_export_versions(2)) # Restore graph. compare_def = tf.get_default_graph().as_graph_def() tf.reset_default_graph() with tf.Session( target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess: save = tf.train.import_meta_graph( os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER % global_step, constants.META_GRAPH_DEF_FILENAME)) self.assertIsNotNone(save) meta_graph_def = save.export_meta_graph() collection_def = meta_graph_def.collection_def # Validate custom graph_def. graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value self.assertEquals(len(graph_def_any), 1) graph_def = tf.GraphDef() graph_def_any[0].Unpack(graph_def) if clear_devices: for node in compare_def.node: node.device = "" self.assertProtoEquals(compare_def, graph_def) # Validate init_op. init_ops = collection_def[constants.INIT_OP_KEY].node_list.value self.assertEquals(len(init_ops), 1) self.assertEquals(init_ops[0], "init_op") # Validate signatures. signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value self.assertEquals(len(signatures_any), 1) signatures = manifest_pb2.Signatures() signatures_any[0].Unpack(signatures) default_signature = signatures.default_signature self.assertEqual( default_signature.classification_signature.input.tensor_name, "v0:0") bindings = signatures.named_signatures["generic"].generic_signature.map self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0") self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0") read_foo_signature = ( signatures.named_signatures["foo"].regression_signature) self.assertEquals(read_foo_signature.input.tensor_name, "v0:0") self.assertEquals(read_foo_signature.output.tensor_name, "v1:0") # Validate the assets. assets_any = collection_def[constants.ASSETS_KEY].any_list.value self.assertEquals(len(assets_any), 1) asset = manifest_pb2.AssetFile() assets_any[0].Unpack(asset) assets_path = os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER % global_step, constants.ASSETS_DIRECTORY, "hello42.txt") asset_contents = gfile.GFile(assets_path).read() self.assertEqual(asset_contents, "your data here") self.assertEquals("hello42.txt", asset.filename) self.assertEquals("filename42:0", asset.tensor_binding.tensor_name) ignored_asset_path = os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER % global_step, constants.ASSETS_DIRECTORY, "ignored.txt") self.assertFalse(gfile.Exists(ignored_asset_path)) # Validate graph restoration. if sharded: save.restore(sess, os.path.join( export_path, constants.VERSION_FORMAT_SPECIFIER % global_step, constants.VARIABLES_FILENAME_PATTERN)) else: save.restore(sess, os.path.join( export_path, constants.VERSION_FORMAT_SPECIFIER % global_step, constants.VARIABLES_FILENAME)) self.assertEqual(10, tf.get_collection("v")[0].eval()) self.assertEqual(20, tf.get_collection("v")[1].eval()) tf.get_collection(constants.INIT_OP_KEY)[0].run() self.assertEqual(30, tf.get_collection("v")[2].eval())
external_x = tf.placeholder(tf.string) x = convert_external_inputs(external_x) y = inference(x) saver = tf.train.Saver() with tf.Session() as sess: # Restore variables from training checkpoints. ckpt = tf.train.get_checkpoint_state(sys.argv[1]) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, sys.argv[1] + "/" + ckpt.model_checkpoint_path) else: print("Checkpoint file not found") raise SystemExit scores, class_ids = tf.nn.top_k(y, NUM_CLASSES_TO_RETURN) # for simplification we will just return the class ids, we should return the names instead classes = tf.contrib.lookup.index_to_string( tf.to_int64(class_ids), mapping=tf.constant([str(i) for i in range(1001)])) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=external_x, classes_tensor=classes, scores_tensor=scores) model_exporter.init(default_graph_signature=signature, init_op=tf.initialize_all_tables()) model_exporter.export(sys.argv[1] + "/export", tf.constant(time.time()), sess)
external_x = tf.placeholder(tf.string) x = convert_external_inputs(external_x) y = inference(x) saver = tf.train.Saver() with tf.Session() as sess: # Restore variables from training checkpoints. ckpt = tf.train.get_checkpoint_state(sys.argv[1]) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, sys.argv[1] + "/" + ckpt.model_checkpoint_path) else: print("Checkpoint file not found") raise SystemExit scores, class_ids = tf.nn.top_k(y, NUM_CLASSES_TO_RETURN) # for simplification we will just return the class ids, we should return the names instead classes = tf.contrib.lookup.index_to_string(tf.to_int64(class_ids), mapping=tf.constant([str(i) for i in range(1001)])) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=external_x, classes_tensor=classes, scores_tensor=scores) model_exporter.init(default_graph_signature=signature, init_op=tf.initialize_all_tables()) model_exporter.export(sys.argv[1] + "/export", tf.constant(time.time()), sess)
def main(argv=None): # Read inventory of training images and labels with tf.name_scope('batch_inputs'): train_file = "./train.txt" valid_file = "./valid.txt" image_size = IMAGE_SIZE train_image_batch, train_label_batch = inputs( train_file, batch_size=TRAIN_BATCH_SIZE, num_epochs=TRAIN_EPOCHS) valid_image_batch, valid_label_batch = inputs( valid_file, batch_size=VALID_BATCH_SIZE, num_epochs=VALID_EPOCHS) # These are image and label batch placeholders which we'll feed in during training x_ = tf.placeholder("float32", shape=[None, image_size, image_size, IMAGE_CHANNELS]) y_ = tf.placeholder("float32", shape=[None, NUM_CLASSES]) # k is the image size after 4 convolution layers k = int(math.ceil(IMAGE_SIZE / 2.0 / 2.0 / 2.0 / 2.0)) # Store weights for our convolution & fully-connected layers with tf.name_scope('weights'): weights = { # 5x5 conv, 3 input channel, 32 outputs each 'wc1': tf.Variable(tf.random_normal([5, 5, 1 * IMAGE_CHANNELS, 32])), # 5x5 conv, 32 inputs, 64 outputs 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5 conv, 64 inputs, 128 outputs 'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])), # 5x5 conv, 128 inputs, 256 outputs 'wc4': tf.Variable(tf.random_normal([5, 5, 128, 256])), # fully connected, k * k * 256 inputs, 1024 outputs 'wd1': tf.Variable(tf.random_normal([k * k * 256, 1024])), # 1024 inputs, 2 class labels (prediction) 'out': tf.Variable(tf.random_normal([1024, NUM_CLASSES])) } # Store biases for our convolution and fully-connected layers with tf.name_scope('biases'): biases = { 'bc1': tf.Variable(tf.random_normal([32])), 'bc2': tf.Variable(tf.random_normal([64])), 'bc3': tf.Variable(tf.random_normal([128])), 'bc4': tf.Variable(tf.random_normal([256])), 'bd1': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([NUM_CLASSES])) } # Define dropout rate to prevent overfitting keep_prob = tf.placeholder(tf.float32) # Build our graph pred = conv_net(x_, weights, biases, image_size, keep_prob) # Calculate loss with tf.name_scope('cross_entropy'): # Define loss and optimizer cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_)) cost_summary = tf.summary.scalar("cost_summary", cost) # Run optimizer step with tf.name_scope('train'): train_step = tf.train.AdamOptimizer( learning_rate=LEARNING_RATE).minimize(cost) # Evaluate model accuracy with tf.name_scope('predict'): correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) accuracy_summary = tf.summary.scalar("accuracy_summary", accuracy) w_summary = tf.summary.histogram("weights", weights['wc1']) b_summary = tf.summary.histogram("biases", biases['bc1']) sess = tf.Session() writer = tf.summary.FileWriter("./logs", sess.graph) init_op = tf.global_variables_initializer() init_local_op = tf.local_variables_initializer() saver = tf.train.Saver() step = 0 with sess.as_default(): sess.run(init_op) sess.run(init_local_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): step += 1 x, y = sess.run([train_image_batch, train_label_batch]) train_step.run(feed_dict={keep_prob: 0.75, x_: x, y_: y}) if step % VALID_STEPS == 0: x, y = sess.run([valid_image_batch, valid_label_batch]) conv_summary, relu_summary = generate_image_summary( x_, weights, biases, step, image_size) result = sess.run([ cost_summary, accuracy_summary, accuracy, conv_summary, relu_summary, w_summary, b_summary ], feed_dict={ keep_prob: 1.0, x_: x, y_: y }) cost_summary_str = result[0] accuracy_summary_str = result[1] acc = result[2] conv_summary_str = result[3] relu_summary_str = result[4] w_summary_str = result[5] b_summary_str = result[6] # write summaries for viewing in Tensorboard writer.add_summary(accuracy_summary_str, step) writer.add_summary(cost_summary_str, step) writer.add_summary(conv_summary_str, step) writer.add_summary(relu_summary_str, step) writer.add_summary(w_summary_str, step) writer.add_summary(b_summary_str, step) print("Accuracy at step %s: %s" % (step, acc)) save_path = saver.save(sess, "./model.ckpt") except tf.errors.OutOfRangeError: x, y = sess.run([valid_image_batch, valid_label_batch]) result = sess.run([accuracy], feed_dict={ keep_prob: 1.0, x_: x, y_: y }) print("Validation accuracy: %s" % result[0]) finally: coord.request_stop() coord.join(threads) # export the model export_path = "./model/" print "Exporting model to " + export_path saver = tf.train.Saver(sharded=False) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=x_, scores_tensor=softmax_pred) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(EXPORT_VERSION), sess) sess.close() return 0
print('input is :', previous_model.input.name) print ('output is:', previous_model.output.name) sess = K.get_session() model = previous_model # # serialize the model and get its weights, for quick re-building # config = previous_model.get_config() # weights = previous_model.get_weights() # # re-build a model where the learning phase is now hard-coded to 0 # from keras.models import model_from_config # model = model_from_config(config) # model.set_weights(weights) from tensorflow_serving.session_bundle import exporter export_path = "saved_tfsering.pb" # where to save the exported graph export_version = 1 # version number (integer) saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output) model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(export_version), sess)
return x * w x1 = tf.placeholder(tf.float32, shape=[1], name="x1") x2 = tf.placeholder(tf.float32, shape=[1], name="x2") x3 = tf.placeholder(tf.float32, shape=[1], name="x3") with tf.Session() as sess: m1 = step1(x1) y1 = step2(m1) y2 = step2(x2, reuse=True) sess.run(tf.initialize_all_variables()) print sess.run(y1, feed_dict={x1:[1.0]}) print sess.run(y1, feed_dict={m1:[2.0]}) print sess.run(y2, feed_dict={x2:[2.0]}) with tf.variable_scope('step2', reuse=True): w = tf.get_variable('weigths', [1]) print w.eval() saver = tf.train.Saver([w], sharded=False) from tensorflow_serving.session_bundle import exporter model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=m1, scores_tensor=y1 ) model_exporter.init( sess.graph.as_graph_def(), default_graph_signature=signature ) model_exporter.export('partition_export_weights', tf.constant(0), sess) print 'Export Done'