Ejemplo n.º 1
0
def main(argv=None):
    global_step = tf.Variable(0, trainable=False)

    files = [os.path.join(FLAGS.data_dir, 'data%d.tfrecords' % i) for i in range(1, 6)]
    images, labels = v2.inputs(files, distort=True)
    logits = v2.inference(images)
    losses = v2.loss(logits, labels)
    train_op = v2.train(losses, global_step)
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver(tf.all_variables(), max_to_keep=10)
    with tf.Session() as sess:
        summary_writer = tf.train.SummaryWriter('train', graph_def=sess.graph_def)
        sess.run(tf.initialize_all_variables())

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, losses])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            format_str = '%s: step %d, loss = %.5f (%.3f sec/batch)'
            print format_str % (datetime.now(), step, loss_value, duration)

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Ejemplo n.º 2
0
def main(argv=None):
    files = [os.path.join(FLAGS.data_dir, "test.tfrecords")]
    images, labels = v2.inputs(files, distort=False)
    logits = v2.inference(images)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    variable_averages = tf.train.ExponentialMovingAverage(v2.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir)
    eval_once(saver, summary_writer, top_k_op, summary_op)
Ejemplo n.º 3
0
def main(argv=None):
    files = [os.path.join(FLAGS.data_dir, 'test.tfrecords') for i in range(1, 2)]
    images, labels = v2.inputs(files, distort=False)
    logits = v2.inference(images)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)
    variable_averages = tf.train.ExponentialMovingAverage(v2.MOVING_AVERAGE_DECAY)
    variables_to_restore = {}
    for v in tf.all_variables():
        if v in tf.trainable_variables():
            name = variable_averages.average_name(v)
        else:
            name = v.op.name
        variables_to_restore[name] = v
    saver = tf.train.Saver(variables_to_restore)

    eval_once(saver, top_k_op)