예제 #1
0
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.
  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))
예제 #2
0
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.
  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))
def do_eval_summary(tag, sess, eval_correct, images_placeholder,
                    labels_placeholder, data_set):
    true_count = 0
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size
    num_examples = steps_per_epoch * FLAGS.batch_size

    for step in xrange(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set, images_placeholder,
                                   labels_placeholder)
        true_count += sess.run(eval_correct, feed_dict=feed_dict)
    error = 1 - true_count / num_examples
    return sess.run(tf.scalar_summary(tag, tf.identity(error)))
예제 #4
0
def do_eval_summary(tag,
                    sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_set):
  true_count = 0
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  error = 1 - true_count / num_examples
  return sess.run(tf.scalar_summary(tag, tf.identity(error)))
예제 #5
0
from data import read_data_sets
from data import placeholder_inputs, fill_feed_dict
import tensorflow as tf

flags = tf.app.flags
FLAGS = flags.FLAGS
tf.app.flags.DEFINE_string('hello', 'Hello World!', 'Example argument')
flags.DEFINE_integer('K', 5000, 'Number of top-K words')
flags.DEFINE_integer('batch_size', 10, 'Size of batch')
flags.DEFINE_integer('max_steps', 1000, 'Maximum number of steps')

train, validation, test = read_data_sets(train_ratio=.8,
                                         validation_ratio=.1,
                                         interval=(5, 15))

input_placeholder, label_placeholder = placeholder_inputs(FLAGS.batch_size)
sess = tf.Session()

for step in xrange(FLAGS.max_steps):
    feed_dict = fill_feed_dict(train, input_placeholder, label_placeholder)
    sess.run([], feed_dict=feed_dict)
    # input sequence length -> dimension = input_length * 5000
    input_length = FLAGS.input_length