Ejemplo n.º 1
0
def export_model(sess, export_path, export_version, x, y):
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=x, scores_tensor=y)
    model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
    print("Export Finished!")
Ejemplo n.º 2
0
def main(_):
    if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
        print(
            'Usage: mnist_export.py [--training_iteration=x] '
            '[--export_version=y] export_dir')
        sys.exit(-1)
    if FLAGS.training_iteration <= 0:
        print 'Please specify a positive value for training iteration.'
        sys.exit(-1)
    if FLAGS.export_version <= 0:
        print 'Please specify a positive value for version number.'
        sys.exit(-1)

    # Train model
    print 'Training model...'
    mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
    sess = tf.InteractiveSession()
    x = tf.placeholder('float', shape=[None, 784])
    y_ = tf.placeholder('float', shape=[None, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tf.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, w) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    for _ in range(FLAGS.training_iteration):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    print 'training accuracy %g' % sess.run(accuracy,
                                            feed_dict={
                                                x: mnist.test.images,
                                                y_: mnist.test.labels
                                            })
    print 'Done training!'

    # Export model
    # WARNING(break-tutorial-inline-code): The following code snippet is
    # in-lined in tutorials, please update tutorial documents accordingly
    # whenever code changes.
    export_path = sys.argv[-1]
    print 'Exporting trained model to', export_path
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=x,
                                                  scores_tensor=y)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
    print 'Done exporting!'
Ejemplo n.º 3
0
def export(sess,previos_model,export_path,export_version):
    K.set_learning_phase(0)  # all new operations will be in test mode from now on

    # serialize the model and get its weights, for quick re-building
    config = previous_model.get_config()
    weights = previous_model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = model_from_config(config)
    new_model.set_weights(weights)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                          scores_tensor=model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                                    default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Ejemplo n.º 4
0
def export():
    with tf.Graph().as_default():
        # Build Aquila model.
        # Please refer to Tensorflow inception model for details.

        flat_image_size = 3 * FLAGS.image_size**2
        input_data = tf.placeholder(tf.float32, shape=(None, flat_image_size))
        # reshape the images appropriately
        images = tf.reshape(input_data,
                            (-1, FLAGS.image_size, FLAGS.image_size, 3))

        # Run inference.
        logits, _ = aquila_model.inference(images,
                                           for_training=False,
                                           restore_logits=True)

        # Restore variables from training checkpoint.
        variable_averages = tf.train.ExponentialMovingAverage(
            aquila_model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        with tf.Session() as sess:
            # Restore variables from training checkpoints.
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/imagenet_train/model.ckpt-0,
                # extract global_step from it.
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                print('Successfully loaded model from %s at step=%s.' %
                      (ckpt.model_checkpoint_path, global_step))
            else:
                print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
                return

            # Export inference model.
            model_exporter = exporter.Exporter(saver)
            signature = exporter.regression_signature(jpegs, logits)
            model_exporter.init(default_graph_signature=signature)
            model_exporter.export(FLAGS.export_dir, tf.constant(global_step),
                                  sess)
            print('Successfully exported model to %s' % FLAGS.export_dir)
Ejemplo n.º 5
0
def export():
    # Create index->synset mapping
    synsets = []
    with open(SYNSET_FILE) as f:
        synsets = f.read().splitlines()
    # Create synset->metadata mapping
    texts = {}
    with open(METADATA_FILE) as f:
        for line in f.read().splitlines():
            parts = line.split('\t')
            assert len(parts) == 2
            texts[parts[0]] = parts[1]

    with tf.Graph().as_default():
        # Build inference model.
        # Please refer to Tensorflow inception model for details.

        # Input transformation.
        # TODO(b/27776734): Add batching support.
        jpegs = tf.placeholder(tf.string, shape=(1))
        image_buffer = tf.squeeze(jpegs, [0])
        # Decode the string as an RGB JPEG.
        # Note that the resulting image contains an unknown height and width
        # that is set dynamically by decode_jpeg. In other words, the height
        # and width of image is unknown at compile-time.
        image = tf.image.decode_jpeg(image_buffer, channels=3)
        # After this point, all image pixels reside in [0,1)
        # until the very end, when they're rescaled to (-1, 1).  The various
        # adjust_* ops all require this range for dtype float.
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        # Crop the central region of the image with an area containing 87.5% of
        # the original image.
        image = tf.image.central_crop(image, central_fraction=0.875)
        # Resize the image to the original height and width.
        image = tf.expand_dims(image, 0)
        image = tf.image.resize_bilinear(image,
                                         [FLAGS.image_size, FLAGS.image_size],
                                         align_corners=False)
        image = tf.squeeze(image, [0])
        # Finally, rescale to [-1,1] instead of [0, 1)
        image = tf.sub(image, 0.5)
        image = tf.mul(image, 2.0)
        images = tf.expand_dims(image, 0)

        # Run inference.
        logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

        # Transform output to topK result.
        values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

        # Create a constant string Tensor where the i'th element is
        # the human readable class description for the i'th index.
        class_tensor = tf.constant([texts[s] for s in synsets])

        classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                    mapping=class_tensor)

        # Restore variables from training checkpoint.
        variable_averages = tf.train.ExponentialMovingAverage(
            inception_model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        with tf.Session() as sess:
            # Restore variables from training checkpoints.
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                full_path = os.path.join(FLAGS.checkpoint_dir,
                                         ckpt.model_checkpoint_path)
                saver.restore(sess, full_path)
                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/imagenet_train/model.ckpt-0,
                # extract global_step from it.
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                print('Successfully loaded model from %s at step=%s.' %
                      (full_path, global_step))
            else:
                print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
                return

            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')
            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=jpegs,
                classes_tensor=classes,
                scores_tensor=values)
            model_exporter.init(default_graph_signature=signature,
                                init_op=init_op)
            model_exporter.export(FLAGS.export_dir, tf.constant(global_step),
                                  sess)
            print('Successfully exported model to %s' % FLAGS.export_dir)
Ejemplo n.º 6
0
def main(argv=None):
    with tf.name_scope('batch_inputs'):
        # Read inventory of training images and labels
        train_file = "./train.txt"
        valid_file = "./valid.txt"

        image_size = IMAGE_SIZE / IMAGE_RESIZE_FACTOR

        train_image_batch, train_label_batch = inputs(train_file,
            batch_size=TRAIN_BATCH_SIZE, input_name="training", num_epochs=TRAIN_EPOCHS)
        valid_image_batch, valid_label_batch = inputs(valid_file,
            batch_size=VALID_BATCH_SIZE, input_name="validation", num_epochs=VALID_EPOCHS)

    x_ = tf.placeholder("float32", shape=[None, image_size, image_size,
        IMAGE_CHANNELS], name="image_batch_placeholder")

    y_ = tf.placeholder("float32", shape=[None, NUM_CLASSES],
        name="label_batch_placeholder")

    # Store layers weight & bias
    with tf.name_scope('weights'):
        weights = {
            # 5x5 conv, 1 input, 32 outputs
            'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
            # 5x5 conv, 32 inputs, 64 outputs
            'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
            # 5x5 conv, 64 inputs, 128 outputs
            'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])),
            # 5x5 conv, 128 inputs, 256 outputs
            'wc4': tf.Variable(tf.random_normal([5, 5, 128, 256])),
            # fully connected, 19*19*256 inputs, 1024 outputs
            'wd1': tf.Variable(tf.random_normal([10*10*256, 1024])),
            # 1024 inputs, 4 class labels (prediction)
            'out': tf.Variable(tf.random_normal([1024, NUM_CLASSES]))
        }

    with tf.name_scope('biases'):
        biases = {
            'bc1': tf.Variable(tf.random_normal([32])),
            'bc2': tf.Variable(tf.random_normal([64])),
            'bc3': tf.Variable(tf.random_normal([128])),
            'bc4': tf.Variable(tf.random_normal([256])),
            'bd1': tf.Variable(tf.random_normal([1024])),
            'out': tf.Variable(tf.random_normal([NUM_CLASSES]))
        }

    # Create the graph, etc.
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    pred = conv_net(x_, weights, biases, image_size, keep_prob)

    with tf.name_scope('serving'):
        softmax_pred = tf.nn.softmax(conv_net(x_, weights, biases, image_size, 1.0))

    # Calculate loss
    with tf.name_scope('cross_entropy'):
        # Define loss and optimizer
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y_))
        cost_summary = tf.scalar_summary("cost_summary", cost)

    # Optimiser
    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)

    # Evaluate model
    with tf.name_scope('predict'):
        correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        accuracy_summary = tf.scalar_summary("accuracy_summary", accuracy)

    sess = tf.Session()

    writer = tf.train.SummaryWriter("./logs", sess.graph)
    merged = tf.merge_all_summaries()
    init_op = tf.initialize_all_variables()

    step = 0

    with sess.as_default():
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop():
                step += 1
                x, y = sess.run([train_image_batch, train_label_batch])

                if step % TRAIN_BATCH_SIZE == 0:
                    result = sess.run([merged, accuracy], feed_dict={keep_prob: 1.0,
                        x_: x, y_: y})
                    summary_str = result[0]
                    acc = result[1]
                    writer.add_summary(summary_str, step)
                    print("Accuracy at step %s: %s" % (step, acc))

                train_step.run(feed_dict={keep_prob: 0.75,
                    x_: x, y_: y})

        except tf.errors.OutOfRangeError:
            x, y = sess.run([valid_image_batch, valid_label_batch])
            result = sess.run([accuracy], feed_dict={keep_prob: 1.0,
                x_: x, y_: y})
            print("Validation accuracy: %s" % result[0])

        finally:
            coord.request_stop()
            coord.join(threads)
            # export the model
            export_path = "./model/"
            print "Exporting model to " + export_path
            saver = tf.train.Saver(sharded=False)
            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(input_tensor=x_,
                scores_tensor=softmax_pred)
            model_exporter.init(sess.graph.as_graph_def(),
                default_graph_signature=signature)
            model_exporter.export(export_path, tf.constant(EXPORT_VERSION),
                sess)
            sess.close()

    return 0
Ejemplo n.º 7
0
    def doBasicsOneExportPath(self,
                              export_path,
                              clear_devices=False,
                              global_step=GLOBAL_STEP):
        # Build a graph with 2 parameter nodes on different devices.
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            # v2 is an unsaved variable derived from v0 and v1.  It is used to
            # exercise the ability to run an init op when restoring a graph.
            with sess.graph.device("/cpu:0"):
                v0 = tf.Variable(10, name="v0")
            with sess.graph.device("/cpu:1"):
                v1 = tf.Variable(20, name="v1")
            v2 = tf.Variable(1, name="v2", trainable=False, collections=[])
            assign_v2 = tf.assign(v2, tf.add(v0, v1))
            init_op = tf.group(assign_v2, name="init_op")

            tf.add_to_collection("v", v0)
            tf.add_to_collection("v", v1)
            tf.add_to_collection("v", v2)

            global_step_tensor = tf.Variable(global_step, name="global_step")
            named_tensor_bindings = {
                "logical_input_A": v0,
                "logical_input_B": v1
            }
            signatures = {
                "foo":
                exporter.regression_signature(input_tensor=v0,
                                              output_tensor=v1),
                "generic":
                exporter.generic_signature(named_tensor_bindings)
            }

            def write_asset(path):
                file_path = os.path.join(path, "file.txt")
                with gfile.FastGFile(file_path, "w") as f:
                    f.write("your data here")

            asset_file = tf.Variable("hello42.txt", name="filename42")
            assets = {("hello42.txt", asset_file)}

            tf.initialize_all_variables().run()

            # Run an export.
            save = tf.train.Saver({
                "v0": v0,
                "v1": v1
            },
                                  restore_sequentially=True,
                                  sharded=True)
            export = exporter.Exporter(save)
            export.init(
                sess.graph.as_graph_def(),
                init_op=init_op,
                clear_devices=clear_devices,
                default_graph_signature=exporter.classification_signature(
                    input_tensor=v0),
                named_graph_signatures=signatures,
                assets=assets,
                assets_callback=write_asset)
            export.export(export_path,
                          global_step_tensor,
                          sess,
                          exports_to_keep=gc.largest_export_versions(2))

        # Restore graph.
        compare_def = tf.get_default_graph().as_graph_def()
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            save = tf.train.import_meta_graph(
                os.path.join(export_path,
                             exporter.VERSION_FORMAT_SPECIFIER % global_step,
                             exporter.META_GRAPH_DEF_FILENAME))
            self.assertIsNotNone(save)
            meta_graph_def = save.export_meta_graph()
            collection_def = meta_graph_def.collection_def

            # Validate custom graph_def.
            graph_def_any = collection_def[exporter.GRAPH_KEY].any_list.value
            self.assertEquals(len(graph_def_any), 1)
            graph_def = tf.GraphDef()
            graph_def_any[0].Unpack(graph_def)
            if clear_devices:
                for node in compare_def.node:
                    node.device = ""
            self.assertProtoEquals(compare_def, graph_def)

            # Validate init_op.
            init_ops = collection_def[exporter.INIT_OP_KEY].node_list.value
            self.assertEquals(len(init_ops), 1)
            self.assertEquals(init_ops[0], "init_op")

            # Validate signatures.
            signatures_any = collection_def[
                exporter.SIGNATURES_KEY].any_list.value
            self.assertEquals(len(signatures_any), 1)
            signatures = manifest_pb2.Signatures()
            signatures_any[0].Unpack(signatures)
            default_signature = signatures.default_signature
            self.assertEqual(
                default_signature.classification_signature.input.tensor_name,
                "v0:0")
            bindings = signatures.named_signatures[
                "generic"].generic_signature.map
            self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0")
            self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0")
            read_foo_signature = (
                signatures.named_signatures["foo"].regression_signature)
            self.assertEquals(read_foo_signature.input.tensor_name, "v0:0")
            self.assertEquals(read_foo_signature.output.tensor_name, "v1:0")

            # Validate the assets.
            assets_any = collection_def[exporter.ASSETS_KEY].any_list.value
            self.assertEquals(len(assets_any), 1)
            asset = manifest_pb2.AssetFile()
            assets_any[0].Unpack(asset)
            assets_path = os.path.join(
                export_path, exporter.VERSION_FORMAT_SPECIFIER % global_step,
                exporter.ASSETS_DIRECTORY, "file.txt")
            asset_contents = gfile.GFile(assets_path).read()
            self.assertEqual(asset_contents, "your data here")
            self.assertEquals("hello42.txt", asset.filename)
            self.assertEquals("filename42:0", asset.tensor_binding.tensor_name)

            # Validate graph restoration.
            save.restore(
                sess,
                os.path.join(export_path,
                             exporter.VERSION_FORMAT_SPECIFIER % global_step,
                             exporter.VARIABLES_DIRECTORY))
            self.assertEqual(10, tf.get_collection("v")[0].eval())
            self.assertEqual(20, tf.get_collection("v")[1].eval())
            tf.get_collection(exporter.INIT_OP_KEY)[0].run()
            self.assertEqual(30, tf.get_collection("v")[2].eval())
Ejemplo n.º 8
0
    new_model = model_from_config(config)
    new_model.set_weights(weights)

    import tensorflow as tf
    import sys
    sys.path.insert(0, '/Users/dan.dixey/Desktop/QBiz/serving')
    # Unable to Import THIS!! why?
    from tensorflow_serving.session_bundle import exporter

    sess = K.get_session()

    export_path = './Serving'  # where to save the exported graph
    export_version = 0o0000001  # version number (integer)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                  scores_tensor=model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)

    model_exporter.export(export_path, tf.constant(export_version), sess)

if show_activation:
    """
        Experimenting with different the Cov Net Layers to visulise their outputs

        This section will push data through the layer and record the activations

        Normalise (Min/Max) the Data in the whole 3D array, slice for one layer of the 3D Array
Ejemplo n.º 9
0
def main(argv=None):

    # Read inventory of training images and labels
    with tf.name_scope('batch_inputs'):
        train_file = "./train.txt"
        valid_file = "./valid.txt"

        image_size = IMAGE_SIZE

        train_image_batch, train_label_batch = inputs(
            train_file, batch_size=TRAIN_BATCH_SIZE, num_epochs=TRAIN_EPOCHS)
        valid_image_batch, valid_label_batch = inputs(
            valid_file, batch_size=VALID_BATCH_SIZE, num_epochs=VALID_EPOCHS)

    # These are image and label batch placeholders which we'll feed in during training
    x_ = tf.placeholder("float32",
                        shape=[None, image_size, image_size, IMAGE_CHANNELS])

    y_ = tf.placeholder("float32", shape=[None, NUM_CLASSES])

    # k is the image size after 4 convolution layers
    k = int(math.ceil(IMAGE_SIZE / 2.0 / 2.0 / 2.0 / 2.0))

    # Store weights for our convolution & fully-connected layers
    with tf.name_scope('weights'):
        weights = {
            # 5x5 conv, 3 input channel, 32 outputs each
            'wc1': tf.Variable(tf.random_normal([5, 5, 1 * IMAGE_CHANNELS,
                                                 32])),
            # 5x5 conv, 32 inputs, 64 outputs
            'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
            # 5x5 conv, 64 inputs, 128 outputs
            'wc3': tf.Variable(tf.random_normal([5, 5, 64, 128])),
            # 5x5 conv, 128 inputs, 256 outputs
            'wc4': tf.Variable(tf.random_normal([5, 5, 128, 256])),
            # fully connected, k * k * 256 inputs, 1024 outputs
            'wd1': tf.Variable(tf.random_normal([k * k * 256, 1024])),
            # 1024 inputs, 2 class labels (prediction)
            'out': tf.Variable(tf.random_normal([1024, NUM_CLASSES]))
        }

    # Store biases for our convolution and fully-connected layers
    with tf.name_scope('biases'):
        biases = {
            'bc1': tf.Variable(tf.random_normal([32])),
            'bc2': tf.Variable(tf.random_normal([64])),
            'bc3': tf.Variable(tf.random_normal([128])),
            'bc4': tf.Variable(tf.random_normal([256])),
            'bd1': tf.Variable(tf.random_normal([1024])),
            'out': tf.Variable(tf.random_normal([NUM_CLASSES]))
        }

    # Define dropout rate to prevent overfitting
    keep_prob = tf.placeholder(tf.float32)

    # Build our graph
    pred = conv_net(x_, weights, biases, image_size, keep_prob)

    # Calculate loss
    with tf.name_scope('cross_entropy'):
        # Define loss and optimizer
        cost = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_))
        cost_summary = tf.summary.scalar("cost_summary", cost)

    # Run optimizer step
    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(
            learning_rate=LEARNING_RATE).minimize(cost)

    # Evaluate model accuracy
    with tf.name_scope('predict'):
        correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        accuracy_summary = tf.summary.scalar("accuracy_summary", accuracy)
        w_summary = tf.summary.histogram("weights", weights['wc1'])
        b_summary = tf.summary.histogram("biases", biases['bc1'])

    sess = tf.Session()

    writer = tf.summary.FileWriter("./logs", sess.graph)

    init_op = tf.global_variables_initializer()
    init_local_op = tf.local_variables_initializer()

    saver = tf.train.Saver()

    step = 0

    with sess.as_default():
        sess.run(init_op)
        sess.run(init_local_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop():
                step += 1
                x, y = sess.run([train_image_batch, train_label_batch])
                train_step.run(feed_dict={keep_prob: 0.75, x_: x, y_: y})

                if step % VALID_STEPS == 0:
                    x, y = sess.run([valid_image_batch, valid_label_batch])
                    conv_summary, relu_summary = generate_image_summary(
                        x_, weights, biases, step, image_size)
                    result = sess.run([
                        cost_summary, accuracy_summary, accuracy, conv_summary,
                        relu_summary, w_summary, b_summary
                    ],
                                      feed_dict={
                                          keep_prob: 1.0,
                                          x_: x,
                                          y_: y
                                      })

                    cost_summary_str = result[0]
                    accuracy_summary_str = result[1]
                    acc = result[2]
                    conv_summary_str = result[3]
                    relu_summary_str = result[4]
                    w_summary_str = result[5]
                    b_summary_str = result[6]

                    # write summaries for viewing in Tensorboard
                    writer.add_summary(accuracy_summary_str, step)
                    writer.add_summary(cost_summary_str, step)
                    writer.add_summary(conv_summary_str, step)
                    writer.add_summary(relu_summary_str, step)
                    writer.add_summary(w_summary_str, step)
                    writer.add_summary(b_summary_str, step)

                    print("Accuracy at step %s: %s" % (step, acc))

                    save_path = saver.save(sess, "./model.ckpt")

        except tf.errors.OutOfRangeError:
            x, y = sess.run([valid_image_batch, valid_label_batch])
            result = sess.run([accuracy],
                              feed_dict={
                                  keep_prob: 1.0,
                                  x_: x,
                                  y_: y
                              })
            print("Validation accuracy: %s" % result[0])

        finally:
            coord.request_stop()
            coord.join(threads)

            # export the model
            export_path = "./model/"
            print "Exporting model to " + export_path
            saver = tf.train.Saver(sharded=False)
            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=x_, scores_tensor=softmax_pred)
            model_exporter.init(sess.graph.as_graph_def(),
                                default_graph_signature=signature)
            model_exporter.export(export_path, tf.constant(EXPORT_VERSION),
                                  sess)
            sess.close()

    return 0
Ejemplo n.º 10
0
def Export():
  export_path = "/tmp/half_plus_two"
  with tf.Session() as sess:
    # Make model parameters a&b variables instead of constants to
    # exercise the variable reloading mechanisms.
    a = tf.Variable(0.5, name="a")
    b = tf.Variable(2.0, name="b")

    # Calculate, y = a*x + b
    # here we use a placeholder 'x' which is fed at inference time.
    x = tf.placeholder(tf.float32, name="x")
    y = tf.add(tf.mul(a, x), b, name="y")

    # Setup a standard Saver for our variables.
    save = tf.train.Saver({"a": a, "b": b}, sharded=True)

    # asset_path contains the base directory of assets used in training (e.g.
    # vocabulary files).
    original_asset_path = tf.constant("/tmp/original/export/assets")
    # Ops reading asset files should reference the asset_path tensor
    # which stores the original asset path at training time and the
    # overridden assets directory at restore time.
    asset_path = tf.Variable(original_asset_path,
                             name="asset_path",
                             trainable=False,
                             collections=[])
    assign_asset_path = asset_path.assign(original_asset_path)

    # CopyAssets is used as a callback during export to copy files to the
    # given export directory.
    def CopyAssets(export_path):
      print "copying asset files to: %s" % export_path

    # Use a fixed global step number.
    global_step_tensor = tf.Variable(123, name="global_step")

    # Create a RegressionSignature for our input and output.
    signature = exporter.regression_signature(input_tensor=x, output_tensor=y)

    # Create two filename assets and corresponding tensors.
    # TODO(b/26254158) Consider adding validation of file existance as well as
    # hashes (e.g. sha1) for consistency.
    original_filename1 = tf.constant("hello1.txt")
    filename1 = tf.Variable(original_filename1,
                            name="filename1",
                            trainable=False,
                            collections=[])
    assign_filename1 = filename1.assign(original_filename1)
    original_filename2 = tf.constant("hello2.txt")
    filename2 = tf.Variable(original_filename2,
                            name="filename2",
                            trainable=False,
                            collections=[])
    assign_filename2 = filename2.assign(original_filename2)
    assets = {("hello1.txt", original_filename1),
              ("hello2.txt", original_filename2)}

    # Init op contains a group of all variables that we assign.
    init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)

    # Run an export.
    tf.initialize_all_variables().run()
    export = exporter.Exporter(save)
    export.init(sess.graph.as_graph_def(),
                init_op=init_op,
                default_graph_signature=signature,
                assets=assets,
                assets_callback=CopyAssets)
    export.export(export_path, global_step_tensor, sess)