Example #1
0
def main(argv=None):
    global_step = tf.Variable(0, trainable=False)

    files = [os.path.join(FLAGS.data_dir, 'data%d.tfrecords' % i) for i in range(1, 6)]
    images, labels = v2.inputs(files, distort=True)
    logits = v2.inference(images)
    losses = v2.loss(logits, labels)
    train_op = v2.train(losses, global_step)
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver(tf.all_variables(), max_to_keep=10)
    with tf.Session() as sess:
        summary_writer = tf.train.SummaryWriter('train', graph_def=sess.graph_def)
        sess.run(tf.initialize_all_variables())

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, losses])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            format_str = '%s: step %d, loss = %.5f (%.3f sec/batch)'
            print format_str % (datetime.now(), step, loss_value, duration)

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Example #2
0
def main(argv=None):
    files = [os.path.join(FLAGS.data_dir, "test.tfrecords")]
    images, labels = v2.inputs(files, distort=False)
    logits = v2.inference(images)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    variable_averages = tf.train.ExponentialMovingAverage(v2.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir)
    eval_once(saver, summary_writer, top_k_op, summary_op)
Example #3
0
def main(argv=None):
    files = [os.path.join(FLAGS.data_dir, 'test.tfrecords') for i in range(1, 2)]
    images, labels = v2.inputs(files, distort=False)
    logits = v2.inference(images)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)
    variable_averages = tf.train.ExponentialMovingAverage(v2.MOVING_AVERAGE_DECAY)
    variables_to_restore = {}
    for v in tf.all_variables():
        if v in tf.trainable_variables():
            name = variable_averages.average_name(v)
        else:
            name = v.op.name
        variables_to_restore[name] = v
    saver = tf.train.Saver(variables_to_restore)

    eval_once(saver, top_k_op)
Example #4
0
from flask import Flask, jsonify, request
import tensorflow as tf

import base64
import urllib
import os
import gzip

v2.BATCH_SIZE = 1

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("checkpoint_path", "/tmp/model.ckpt", """Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer("port", 5000, """Application port.""")

images = tf.placeholder(tf.float32, shape=(1, v2.INPUT_SIZE, v2.INPUT_SIZE, 3))
logits = tf.nn.softmax(v2.inference(images))

sess = tf.Session()
variable_averages = tf.train.ExponentialMovingAverage(v2.MOVING_AVERAGE_DECAY)
variables_to_restore = {}
for v in tf.all_variables():
    if v in tf.trainable_variables():
        restore_name = variable_averages.average_name(v)
    else:
        restore_name = v.op.name
    variables_to_restore[restore_name] = v
saver = tf.train.Saver(variables_to_restore)
if not os.path.isfile(FLAGS.checkpoint_path):
    print "No checkpoint file found"
    urllib.urlretrieve(os.environ["DOWNLOAD_URL"], FLAGS.checkpoint_path + ".gz")
    open(FLAGS.checkpoint_path, "wb").write(gzip.open(FLAGS.checkpoint_path + ".gz").read())