示例#1
0
def export():
    with tf.Graph().as_default():
        # Build inference model.
        # Please refer to Tensorflow inception model for details.

        # Input transformation.
        jpegs = tf.placeholder(tf.string)
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        print(images)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=jpegs, classes_tensor=None, scores_tensor=feature)
            model_exporter.init(default_graph_signature=signature,
                                init_op=init_op)
            model_exporter.export('model', tf.constant(150000), sess)
            print('Successfully exported model to model/.')
示例#2
0
def export():
    with tf.Graph().as_default():
        #TODO(xuesen) for serving
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)

        jpegs = tf_example['image/encoded']
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            #TODO() Export inference model using regression_signture ?
            feat_signature = exporter.regression_signature(
                input_tensor=serialized_tf_example, output_tensor=feature)
            named_graph_signature = {
                'inputs': exporter.generic_signature({'images': jpegs}),
                'outputs': exporter.generic_signature({'feats': feature})
            }
            model_exporter = exporter.Exporter(saver)
            model_exporter.init(default_graph_signature=feat_signature,
                                init_op=init_op,
                                named_graph_signatures=named_graph_signature)
            model_exporter.export('model/vgg_serving', tf.constant(150000),
                                  sess)
            print('Successfully exported model to model/.')
示例#3
0
def train_fast():
    if not os.path.exists(os.path.dirname(constants.FAST_MODEL_FILE)):
        os.mkdir(os.path.dirname(constants.FAST_MODEL_FILE))
    with tf.Graph().as_default():
        sess = tf.Session()
        boards, logits, fast_nn, fast_train, fast_loss = build_fast_train_func()
        if os.path.isfile(constants.FAST_MODEL_FILE):
            saver = tf.train.Saver()
            print "Loading Fast Neural Net Model"
            saver.restore(sess, constants.FAST_MODEL_FILE)
        else:
            print "Training Fast Neural Net"
            run_training(sess, fast_train, fast_loss, constants.FAST)
        print "Evaluating Fast Neural Net"
        #run_eval(sess, fast_nn)
        print "Exporting Model with number {0}".format(FLAGS.export_version)
        saver = tf.train.Saver(sharded=True)
        model_exporter = exporter.Exporter(saver)
        model_exporter.init(
            sess.graph.as_graph_def(),
            named_graph_signatures={
                'inputs': exporter.generic_signature({'boards': boards}),
                'outputs': exporter.generic_signature({'labels': logits})
            }
        )
        model_exporter.export(constants.EXPORT_PATH, tf.constant(FLAGS.export_version), sess)
        sess.close()
示例#4
0
def exporter(saver, sess):
    model_exporter = exp.Exporter(saver)
    signature = exp.classification_signature(input_tensor=img,
                                             pred_tensor=pred_val)
    model_exporter.init(default_graph_signature=signature,
                        init_op=tf.initialize_all_tables())
    model_exporter.export(FLAGS.log_dir + "/export", tf.constant(time.time()),
                          sess)
def export_model_to_tensorflow(path_to_trained_keras_model: str):
    print("Loading model for exporting to Protocol Buffer format...")
    model = keras.models.load_model(path_to_trained_keras_model)

    sess = K.get_session()

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = os.path.abspath(os.path.join("export", "simple"))  # where to save the exported graph
    os.makedirs(export_path)
    checkpoint_state_name = "checkpoint_state"
    export_version = 1  # version number (integer)
    saver = tensorflow.train.Saver(sharded=True, name=checkpoint_state_name)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output)

    # # Version 1 of exporter
    # model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)
    # model_exporter.export(export_path, tensorflow.constant(export_version), sess)
    #
    # # Version 2 of exporter
    # tensorflow.train.write_graph(sess.graph.as_graph_def(), logdir=".", name="simple.pbtxt", as_text=True)

    # Version 3 with Freezer from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"
    saver_write_version = saver_pb2.SaverDef.V2

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    saver = tensorflow.train.Saver(write_version=saver_write_version)
    checkpoint_path = saver.save(sess, export_path, global_step=0, latest_filename=checkpoint_state_name)
    graph_io.write_graph(sess.graph, export_path, input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(export_path, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node/Softmax"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(export_path, output_graph_name)
    clear_devices = False
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    shutil.copy(os.path.join("export", "simple", "output_graph.pb"), output_graph_name)
    shutil.rmtree("export")
    print("Exported model: {0}".format(os.path.abspath(output_graph_name)))
示例#6
0
def export_model(sess, saver, signature, model_path, model_version):
    logging.info("Export the model to {}".format(model_path))
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures=signature)
    try:
        model_exporter.export(model_path, tf.constant(model_version), sess)
    except Exception as e:
        logging.error("Fail to export model, exception: {}".format(e))
示例#7
0
def main(_):
    if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
        print(
            'Usage: mnist_export.py [--training_iteration=x] '
            '[--export_version=y] export_dir')
        sys.exit(-1)
    if FLAGS.training_iteration <= 0:
        print 'Please specify a positive value for training iteration.'
        sys.exit(-1)
    if FLAGS.export_version <= 0:
        print 'Please specify a positive value for version number.'
        sys.exit(-1)

    # Train model
    print 'Training model...'
    mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
    sess = tf.InteractiveSession()
    x = tf.placeholder('float', shape=[None, 784])
    y_ = tf.placeholder('float', shape=[None, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tf.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, w) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    for _ in range(FLAGS.training_iteration):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    print 'training accuracy %g' % sess.run(accuracy,
                                            feed_dict={
                                                x: mnist.test.images,
                                                y_: mnist.test.labels
                                            })
    print 'Done training!'

    # Export model
    # WARNING(break-tutorial-inline-code): The following code snippet is
    # in-lined in tutorials, please update tutorial documents accordingly
    # whenever code changes.
    export_path = sys.argv[-1]
    print 'Exporting trained model to', export_path
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs': exporter.generic_signature({'images':
                                                                  x}),
                            'outputs':
                            exporter.generic_signature({'scores': y})
                        })
    model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
    print 'Done exporting!'
def main(_):
    training_set, test_set = make_training_and_test_sets(one_hot=True,
                                                         binary=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 151])
    W = tf.Variable(tf.zeros([151, 2]))
    b = tf.Variable(tf.zeros([2]))
    y = tf.matmul(x, W) + b

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 2])

    # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(y, y_))
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    # Train
    tf.initialize_all_variables().run()

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    for i in range(10000):
        print 'Training batch' + str(i)
        batch_xs, batch_ys = training_set.next_batch(1000)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        print(
            sess.run(cross_entropy,
                     feed_dict={
                         x: test_set.data,
                         y_: test_set.target
                     }))

    print(sess.run(accuracy, feed_dict={
        x: test_set.data,
        y_: test_set.target
    }))

    export_path = sys.argv[-1]
    print 'Exporting trained model to', export_path
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs': exporter.generic_signature({'data': x}),
                            'outputs': exporter.generic_signature({'move': y})
                        })
    model_exporter.export(export_path, tf.constant(1), sess)
示例#9
0
def tensorflow_serving():
    # Generate input data
    n_samples = 1000
    learning_rate = 0.01
    batch_size = 100
    n_steps = 500
    x_data = np.arange(100, step=.1)
    y_data = x_data + 20 * np.sin(x_data / 10)
    x_data = np.reshape(x_data, (n_samples, 1))
    y_data = np.reshape(y_data, (n_samples, 1))
    # Placeholders for batched input
    x = tf.placeholder(tf.float32, shape=(batch_size, 1))
    y = tf.placeholder(tf.float32, shape=(batch_size, 1))
    with tf.variable_scope('test'):
        w = tf.get_variable('weights', (1, 1),
                            initializer=tf.random_normal_initializer())
        b = tf.get_variable('bias', (1, ),
                            initializer=tf.constant_initializer(0))
        y_pred = tf.matmul(x, w) + b
        loss = tf.reduce_sum((y - y_pred)**2 / n_samples)
        opt = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(loss)
        with tf.Session() as sess:
            saver = tf.train.Saver()
            sess.run(tf.initialize_all_variables())
            for _ in range(n_steps):
                indices = np.random.choice(n_samples, batch_size)
                x_batch = x_data[indices]
                y_batch = y_data[indices]
                _, loss_val = sess.run([opt, loss],
                                       feed_dict={
                                           x: x_batch,
                                           y: y_batch
                                       })
            print(w.eval())
            print(b.eval())
            print(loss_val)
            saver.save(sess, "./model/test.ckpt"
                       )  #;   saver.restore(sess, “./model/test.ckpt”)
            model_exporter = exporter.Exporter(saver)
            model_exporter.init(sess.graph.as_graph_def(),
                                named_graph_signatures={
                                    'inputs':
                                    exporter.generic_signature({'x': x}),
                                    'outputs':
                                    exporter.generic_signature({'y': y_pred})
                                })
            model_exporter.export('./model', tf.constant(1), sess)

    a = 1
示例#10
0
def _export_graph(graph, saver, checkpoint_path, export_dir,
                  default_graph_signature, named_graph_signatures,
                  exports_to_keep):
  """Exports graph via session_bundle, by creating a Session."""
  with graph.as_default():
    with tf_session.Session('') as session:
      session.run(variables.initialize_local_variables())
      saver.restore(session, checkpoint_path)
      export = exporter.Exporter(saver)
      export.init(session.graph.as_graph_def(),
                  default_graph_signature=default_graph_signature,
                  named_graph_signatures=named_graph_signatures)
      export.export(export_dir, contrib_variables.get_global_step(), session,
                    exports_to_keep=exports_to_keep)
示例#11
0
def export_model(sess, inputs_signature, outputs_signature):
  # Export the model for generic inference service
  print 'Exporting trained model to', FLAGS.model_path
  saver = tf.train.Saver(sharded=True)
  model_exporter = exporter.Exporter(saver)
  model_exporter.init(
      sess.graph.as_graph_def(),
      named_graph_signatures={
          'inputs': exporter.generic_signature(inputs_signature),
          'outputs': exporter.generic_signature(outputs_signature)
      })
  model_exporter.export(FLAGS.model_path, tf.constant(FLAGS.model_version),
                        sess)
  print 'Done exporting!'
示例#12
0
 def train(self, dropout, checkpoint_step, batch_size, epoch_num,
           model_name, version, train_word, train_reg, train_y, dev_word,
           dev_reg, dev_y):
     curr_step = 0
     batches = dh.batch_iter(list(zip(train_word, train_reg, train_y)),
                             batch_size, epoch_num)
     dev_feed_dict = {
         self.x_word: dev_word,
         self.x_reg: dev_reg,
         self.y: dev_y,
         self.dropout_keep: dropout
     }
     sess = tf.InteractiveSession()
     sess.run(tf.initialize_all_variables())
     # Training
     for batch in batches:
         if len(batch) == 0:
             continue
         word_batch, reg_batch, y_batch = zip(*batch)
         feed_dict = {
             self.x_word: word_batch,
             self.x_reg: reg_batch,
             self.y: y_batch,
             self.dropout_keep: dropout
         }
         self.train_step.run(feed_dict=feed_dict)
         curr_step += 1
         if curr_step % checkpoint_step == 0:
             self.accuracy.run([self.accuracy, self.prediction],
                               dev_feed_dict)
     acc, predictions = self.accuracy.run([self.accuracy, self.prediction],
                                          dev_feed_dict)
     export_model_path = './export/%s' % model_name
     if self.device == '/cpu:0':
         saver = tf.train.Saver(tf.global_variables())
         model_exporter = exporter.Exporter(saver)
         named_tensor_binding = {
             "input_x": self.x_in,
             "input_reg": self.x_reg,
             "classes": self.predictions,
             "scores": self.probs
         }
         signature = exporter.generic_signature(named_tensor_binding)
         signatures = {"generic": signature}
         model_exporter.init(sess.graph.as_graph_def(),
                             named_graph_signatures=signatures)
         model_exporter.export(export_model_path, tf.constant(version),
                               sess)
     return acc, predictions
示例#13
0
def export_model(sess, export_path, export_version, x, y, probability):
    export_saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(export_saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs':
                            exporter.generic_signature({'images': x}),
                            'outputs':
                            exporter.generic_signature({
                                'classes':
                                y,
                                'probability':
                                probability
                            })
                        })
    model_exporter.export(export_path, tf.constant(export_version), sess)
    print("Export Finished!")
示例#14
0
def exporter_model(saver, sess, work_dir, export_version, x, y):
    """
    :param saver: tf.train.Saver()
    :param sess: tf.Session()
    :param work_dir: 保存的路径
    :param export_version: 保存的版本数,tensorflow serving会优先读取最高的版本
    :param x: 模型 input
    :param y: 模型 predict result
    :return:
    """
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs': exporter.generic_signature({'x': x}),
                            'outputs': exporter.generic_signature({'y': y})
                        })
    model_exporter.export(work_dir, tf.constant(export_version), sess)
示例#15
0
def export_model(path):
    net = resnet()
    g = net.graph
    X = g.get_operation_by_name('InputData/X')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        model = tflearn.DNN(net, session=sess)
        # model.load(pjoin(MODEL_PATH, MODEL_NAME))
        saver.restore(sess, pjoin(MODEL_PATH, MODEL_NAME))
        model_exporter = exporter.Exporter(saver)
        model_exporter.init(sess.graph.as_graph_def(),
                            named_graph_signatures={
                                'inputs': exporter.generic_signature({'x': X}),
                                'outputs':
                                exporter.generic_signature({'y': net})
                            })
        model_exporter.export(path, tf.constant(200), sess)
        print 'Successfully exported model to %s' % path
def build_model_decode(saver, sess, work_dir, export_version, inputs, outputs):
    """
    导出模型,tensorflow serving可以直接加载导出的模型
    :param saver: tf.train.Saver()
    :param sess: tf.Session()
    :param work_dir: 导出模型路径
    :param export_version: 版本
    :param inputs: inputs
    :param outputs: outputs
    :return:
    """
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(
        sess.graph.as_graph_def(),
        named_graph_signatures={
            'inputs': exporter.generic_signature({'inputs': inputs}),
            'outputs': exporter.generic_signature({'outputs': outputs})})
    model_exporter.export(work_dir,
                          tf.constant(export_version), sess)
示例#17
0
def _export_graph(graph, saver, checkpoint_path, export_dir,
                  default_graph_signature, named_graph_signatures,
                  exports_to_keep):
  """Exports graph via session_bundle, by creating a Session."""
  with graph.as_default():
    with tf_session.Session('') as session:
      variables.initialize_local_variables()
      data_flow_ops.initialize_all_tables()
      saver.restore(session, checkpoint_path)

      export = exporter.Exporter(saver)
      export.init(init_op=control_flow_ops.group(
          variables.initialize_local_variables(),
          data_flow_ops.initialize_all_tables()),
                  default_graph_signature=default_graph_signature,
                  named_graph_signatures=named_graph_signatures,
                  assets_collection=ops.get_collection(
                      ops.GraphKeys.ASSET_FILEPATHS))
      return export.export(export_dir, contrib_variables.get_global_step(),
                           session, exports_to_keep=exports_to_keep)
示例#18
0
def create_and_export_model(export_path):
    x = tf.placeholder(tf.int32, shape=[3])
    z = tf.Variable([2])
    y = tf.mul(x, z)

    sess = tf.Session()
    init = tf.initialize_all_variables()
    sess.run(init)
    feed_dict = {x: [3, 4, 5]}
    print(sess.run(y, feed_dict=feed_dict))

    print 'Exporting trained model to', export_path
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs': exporter.generic_signature({'x': x}),
                            'outputs': exporter.generic_signature({'y': y})
                        })
    model_exporter.export(export_path, tf.constant(VERSION), sess)
示例#19
0
def saveWithSavedModel():
    # K.set_learning_phase(0)  # all new operations will be in test mode from now on

    # wordIndex = loadWordIndex()
    model = createModel()
    model.load_weights(KERAS_WEIGHTS_FILE)

    export_path = os.path.join(PUNCTUATOR_DIR,
                               'graph')  # where to save the exported graph

    shutil.rmtree(export_path, True)
    export_version = 1  # version number (integer)

    import tensorflow as tf
    sess = tf.Session()

    saver = tf.train.Saver(sharded=True)
    from tensorflow.contrib.session_bundle import exporter
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                  scores_tensor=model.output)
    # model_exporter.init(sess.graph.as_graph_def(),default_graph_signature=signature)
    tf.initialize_all_variables().run(session=sess)
    # model_exporter.export(export_path, tf.constant(export_version), sess)
    from tensorflow.python.saved_model import builder as saved_model_builder
    builder = saved_model_builder.SavedModelBuilder(export_path)
    from tensorflow.python.saved_model import signature_constants
    from tensorflow.python.saved_model import tag_constants
    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
    from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
    signature_def = predict_signature_def(
        {signature_constants.PREDICT_INPUTS: model.input},
        {signature_constants.PREDICT_OUTPUTS: model.output})
    builder.add_meta_graph_and_variables(
        sess, [tag_constants.SERVING],
        signature_def_map={
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            signature_def
        },
        legacy_init_op=legacy_init_op)
    builder.save()
示例#20
0
def main(_):
    # Train model
    print('Training model...')
    mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
    x = tf.placeholder(tf.float32, [None, 784], name='x')
    y_ = tf.placeholder('float', shape=[None, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for _ in range(FLAGS.training_iteration):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    print('training accuracy %g' % sess.run(accuracy,
                                            feed_dict={
                                                x: mnist.test.images,
                                                y_: mnist.test.labels
                                            }))
    print('Done training!')

    # Export model
    export_path = FLAGS.export_path
    print('Exporting trained model to %s' % export_path)
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        named_graph_signatures={
                            'inputs': exporter.generic_signature({'images':
                                                                  x}),
                            'outputs':
                            exporter.generic_signature({'scores': y})
                        })
    model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
    print('Done exporting!')
示例#21
0
def ExportModel(sess, model_dir, input, output, assets):
    if os.path.isdir(model_dir):
        shutil.rmtree(model_dir)

    # using TF Serving exporter to load into a TF Serving session bundle
    logging.info('Exporting trained model to %s', model_dir)
    saver = tf.train.Saver()
    model_exporter = exporter.Exporter(saver)
    signature = exporter.regression_signature(input_tensor=input,
                                              output_tensor=output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature,
                        assets_collection=assets)
    model_exporter.export(model_dir, tf.constant(1), sess)

    # using a SummaryWriter so graph can be loaded in TensorBoard
    writer = tf.train.SummaryWriter(model_dir, sess.graph)
    writer.flush()

    # exporting the graph as a text protobuf, to view graph manualy
    f1 = open(model_dir + '/graph.pbtxt', 'w+')
    print >> f1, str(tf.get_default_graph().as_graph_def())
def export_model(sess, model_train):
    init_op = tf.group(tf.initialize_all_tables(), name='init_op')
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)

    model_exporter.init(sess.graph.as_graph_def(),
                        init_op=init_op,
                        named_graph_signatures={
                            'inputs':
                            exporter.generic_signature({
                                'batch_utterances':
                                model_train.context,
                                'batch_target':
                                model_train.flag_one_hot,
                                'batch_seq_lens':
                                model_train.sequence_length_context
                            }),
                            'outputs':
                            exporter.generic_signature(
                                {'loss': model_train.loss})
                        })
    model_exporter.export('exported_model/', tf.constant(10), sess)
    return
示例#23
0
    def doBasicsOneExportPath(self,
                              export_path,
                              clear_devices=False,
                              global_step=GLOBAL_STEP,
                              sharded=True):
        # Build a graph with 2 parameter nodes on different devices.
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            # v2 is an unsaved variable derived from v0 and v1.  It is used to
            # exercise the ability to run an init op when restoring a graph.
            with sess.graph.device("/cpu:0"):
                v0 = tf.Variable(10, name="v0")
            with sess.graph.device("/cpu:1"):
                v1 = tf.Variable(20, name="v1")
            v2 = tf.Variable(1, name="v2", trainable=False, collections=[])
            assign_v2 = tf.assign(v2, tf.add(v0, v1))
            init_op = tf.group(assign_v2, name="init_op")

            tf.add_to_collection("v", v0)
            tf.add_to_collection("v", v1)
            tf.add_to_collection("v", v2)

            global_step_tensor = tf.Variable(global_step, name="global_step")
            named_tensor_bindings = {
                "logical_input_A": v0,
                "logical_input_B": v1
            }
            signatures = {
                "foo":
                exporter.regression_signature(input_tensor=v0,
                                              output_tensor=v1),
                "generic":
                exporter.generic_signature(named_tensor_bindings)
            }

            asset_filepath_orig = os.path.join(tf.test.get_temp_dir(),
                                               "hello42.txt")
            asset_file = tf.constant(asset_filepath_orig, name="filename42")
            tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file)

            with gfile.FastGFile(asset_filepath_orig, "w") as f:
                f.write("your data here")
            assets_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)

            ignored_asset = os.path.join(tf.test.get_temp_dir(), "ignored.txt")
            with gfile.FastGFile(ignored_asset, "w") as f:
                f.write("additional data here")

            tf.initialize_all_variables().run()

            # Run an export.
            save = tf.train.Saver({
                "v0": v0,
                "v1": v1
            },
                                  restore_sequentially=True,
                                  sharded=sharded)
            export = exporter.Exporter(save)
            export.init(
                sess.graph.as_graph_def(),
                init_op=init_op,
                clear_devices=clear_devices,
                default_graph_signature=exporter.classification_signature(
                    input_tensor=v0),
                named_graph_signatures=signatures,
                assets_collection=assets_collection)
            export.export(export_path,
                          global_step_tensor,
                          sess,
                          exports_to_keep=gc.largest_export_versions(2))

        # Restore graph.
        compare_def = tf.get_default_graph().as_graph_def()
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            save = tf.train.import_meta_graph(
                os.path.join(export_path,
                             constants.VERSION_FORMAT_SPECIFIER % global_step,
                             constants.META_GRAPH_DEF_FILENAME))
            self.assertIsNotNone(save)
            meta_graph_def = save.export_meta_graph()
            collection_def = meta_graph_def.collection_def

            # Validate custom graph_def.
            graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
            self.assertEquals(len(graph_def_any), 1)
            graph_def = tf.GraphDef()
            graph_def_any[0].Unpack(graph_def)
            if clear_devices:
                for node in compare_def.node:
                    node.device = ""
            self.assertProtoEquals(compare_def, graph_def)

            # Validate init_op.
            init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
            self.assertEquals(len(init_ops), 1)
            self.assertEquals(init_ops[0], "init_op")

            # Validate signatures.
            signatures_any = collection_def[
                constants.SIGNATURES_KEY].any_list.value
            self.assertEquals(len(signatures_any), 1)
            signatures = manifest_pb2.Signatures()
            signatures_any[0].Unpack(signatures)
            default_signature = signatures.default_signature
            self.assertEqual(
                default_signature.classification_signature.input.tensor_name,
                "v0:0")
            bindings = signatures.named_signatures[
                "generic"].generic_signature.map
            self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0")
            self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0")
            read_foo_signature = (
                signatures.named_signatures["foo"].regression_signature)
            self.assertEquals(read_foo_signature.input.tensor_name, "v0:0")
            self.assertEquals(read_foo_signature.output.tensor_name, "v1:0")

            # Validate the assets.
            assets_any = collection_def[constants.ASSETS_KEY].any_list.value
            self.assertEquals(len(assets_any), 1)
            asset = manifest_pb2.AssetFile()
            assets_any[0].Unpack(asset)
            assets_path = os.path.join(
                export_path, constants.VERSION_FORMAT_SPECIFIER % global_step,
                constants.ASSETS_DIRECTORY, "hello42.txt")
            asset_contents = gfile.GFile(assets_path).read()
            self.assertEqual(asset_contents, "your data here")
            self.assertEquals("hello42.txt", asset.filename)
            self.assertEquals("filename42:0", asset.tensor_binding.tensor_name)
            ignored_asset_path = os.path.join(
                export_path, constants.VERSION_FORMAT_SPECIFIER % global_step,
                constants.ASSETS_DIRECTORY, "ignored.txt")
            self.assertFalse(gfile.Exists(ignored_asset_path))

            # Validate graph restoration.
            if sharded:
                save.restore(
                    sess,
                    os.path.join(
                        export_path,
                        constants.VERSION_FORMAT_SPECIFIER % global_step,
                        constants.VARIABLES_FILENAME_PATTERN))
            else:
                save.restore(
                    sess,
                    os.path.join(
                        export_path,
                        constants.VERSION_FORMAT_SPECIFIER % global_step,
                        constants.VARIABLES_FILENAME))
            self.assertEqual(10, tf.get_collection("v")[0].eval())
            self.assertEqual(20, tf.get_collection("v")[1].eval())
            tf.get_collection(constants.INIT_OP_KEY)[0].run()
            self.assertEqual(30, tf.get_collection("v")[2].eval())
示例#24
0
def export_model(sess, saver, signature, model_path, model_version):
    print("Export the model to {}".format(model_path))
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(sess.graph.as_graph_def(),
                        med_graph_signatures=signature)
    model_exporter.export(model_path, tf.constant(model_version), sess)
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    jpegs = tf_example['image/encoded']
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      classification_signature = exporter.classification_signature(
          input_tensor=serialized_tf_example,
          classes_tensor=classes,
          scores_tensor=values)
      named_graph_signature = {
          'inputs': exporter.generic_signature({'images': jpegs}),
          'outputs': exporter.generic_signature({
              'classes': classes,
              'scores': values
          })}
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(
          init_op=init_op,
          default_graph_signature=classification_signature,
          named_graph_signatures=named_graph_signature)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
示例#26
0
def Export():
    with tf.Session() as sess:
        # Make model parameters a&b variables instead of constants to
        # exercise the variable reloading mechanisms.
        a = tf.Variable(0.5, name="a")
        b = tf.Variable(2.0, name="b")

        # Create a placeholder for serialized tensorflow.Example messages to be fed.
        serialized_tf_example = tf.placeholder(tf.string, name="tf_example")

        # Parse the tensorflow.Example looking for a feature named "x" with a single
        # floating point value.
        feature_configs = {
            "x": tf.FixedLenFeature([1], dtype=tf.float32),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)
        # Use tf.identity() to assign name
        x = tf.identity(tf_example["x"], name="x")

        # Calculate, y = a*x + b
        y = tf.add(tf.mul(a, x), b, name="y")

        # Setup a standard Saver for our variables.
        save = tf.train.Saver(
            {
                "a": a,
                "b": b
            },
            sharded=True,
            write_version=tf.train.SaverDef.V2
            if FLAGS.use_checkpoint_v2 else tf.train.SaverDef.V1)

        # asset_path contains the base directory of assets used in training (e.g.
        # vocabulary files).
        original_asset_path = tf.constant("/tmp/original/export/assets")
        # Ops reading asset files should reference the asset_path tensor
        # which stores the original asset path at training time and the
        # overridden assets directory at restore time.
        asset_path = tf.Variable(original_asset_path,
                                 name="asset_path",
                                 trainable=False,
                                 collections=[])
        assign_asset_path = asset_path.assign(original_asset_path)

        # Use a fixed global step number.
        global_step_tensor = tf.Variable(123, name="global_step")

        # Create a RegressionSignature for our input and output.
        regression_signature = exporter.regression_signature(
            input_tensor=serialized_tf_example,
            # Use tf.identity here because we export two signatures here.
            # Otherwise only graph for one of the signatures will be loaded
            # (whichever is created first) during serving.
            output_tensor=tf.identity(y))
        named_graph_signature = {
            "inputs": exporter.generic_signature({"x": x}),
            "outputs": exporter.generic_signature({"y": y})
        }

        # Create two filename assets and corresponding tensors.
        # TODO(b/26254158) Consider adding validation of file existance as well as
        # hashes (e.g. sha1) for consistency.
        original_filename1 = tf.constant("hello1.txt")
        tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
        filename1 = tf.Variable(original_filename1,
                                name="filename1",
                                trainable=False,
                                collections=[])
        assign_filename1 = filename1.assign(original_filename1)
        original_filename2 = tf.constant("hello2.txt")
        tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
        filename2 = tf.Variable(original_filename2,
                                name="filename2",
                                trainable=False,
                                collections=[])
        assign_filename2 = filename2.assign(original_filename2)

        # Init op contains a group of all variables that we assign.
        init_op = tf.group(assign_asset_path, assign_filename1,
                           assign_filename2)

        # CopyAssets is used as a callback during export to copy files to the
        # given export directory.
        def CopyAssets(filepaths, export_path):
            print("copying asset files to: %s" % export_path)
            for filepath in filepaths:
                print("copying asset file: %s" % filepath)

        # Run an export.
        tf.initialize_all_variables().run()
        export = exporter.Exporter(save)
        export.init(sess.graph.as_graph_def(),
                    init_op=init_op,
                    default_graph_signature=regression_signature,
                    named_graph_signatures=named_graph_signature,
                    assets_collection=tf.get_collection(
                        tf.GraphKeys.ASSET_FILEPATHS),
                    assets_callback=CopyAssets)
        export.export(FLAGS.export_dir, global_step_tensor, sess)