def load_dataset(config):
    """Load dataset following instruction in `config`."""
    if dataset_is_mnist_family(config['dataset']):
        crop_width = config.get('crop_width', None)  # unused
        img_width = config.get('img_width', None)  # unused

        scratch = config.get('scratch', get_default_scratch())
        basepath = os.path.join(scratch, config['dataset'].lower())
        data_path = os.path.join(basepath, 'data')
        save_path = os.path.join(basepath, 'ckpts')

        tf.gfile.MakeDirs(data_path)
        tf.gfile.MakeDirs(save_path)

        # black-on-white MNIST (harder to learn than white-on-black MNIST)
        # Running locally (pre-download data locally)
        mnist_train, mnist_eval, mnist_test = local_mnist.read_data_sets(
            data_path, one_hot=True)

        train_data = np.concatenate([mnist_train.images, mnist_eval.images],
                                    axis=0)
        attr_train = np.concatenate([mnist_train.labels, mnist_eval.labels],
                                    axis=0)
        eval_data = mnist_test.images
        attr_eval = mnist_test.labels

        attribute_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

    elif config['dataset'] == 'CELEBA':
        crop_width = config['crop_width']
        img_width = config['img_width']
        postfix = '_crop_%d_res_%d.npy' % (crop_width, img_width)

        # Load Data
        scratch = config.get('scratch', get_default_scratch())
        basepath = os.path.join(scratch, 'celeba')
        data_path = os.path.join(basepath, 'data')
        save_path = os.path.join(basepath, 'ckpts')

        (train_data, eval_data, _, attr_train, attr_eval, _,
         attribute_names) = _load_celeba(data_path, postfix)
    else:
        raise NotImplementedError

    return ObjectBlob(
        crop_width=crop_width,
        img_width=img_width,
        basepath=basepath,
        data_path=data_path,
        save_path=save_path,
        train_data=train_data,
        attr_train=attr_train,
        eval_data=eval_data,
        attr_eval=attr_eval,
        attribute_names=attribute_names,
    )
def run_training():
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, shape=[None, 784])
        y_ = tf.placeholder(tf.float32, shape=[None, 10])
        keep_prob = tf.placeholder(tf.float32)

        y_conv = inference(x, keep_prob)
        cross_entropy = loss(y_conv, y_)
        train_step = training(cross_entropy)
        accuracy = validation(y_conv, y_)

        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge_all()

        sess = tf.InteractiveSession()
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()

        training_util.write_graph(sess.graph.as_graph_def(add_shapes=True),
                                  FLAGS.train_dir, "graph.pbtxt")

        for step in xrange(2000):
            batch = mnist.train.next_batch(50)
            feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5}
            _, loss_value = sess.run([train_step, cross_entropy],
                                     feed_dict=feed_dict)

            if step % 500 == 0:
                saver.save(sess, os.path.join(FLAGS.train_dir, "model.ckpt"),
                           step)
                train_accuracy = accuracy.eval(feed_dict=feed_dict)
                print("step %d, loss_value %.2f,training accuracy %g" %
                      (step, loss_value, train_accuracy))
                # Update the events file.
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
        # Test
        print("test accuracy %g" % accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            keep_prob: 1.0
        }))
Exemplo n.º 3
0
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

# train and evaluate
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

for i in range(20000):
  batch = mnist.train.next_batch(50)
  if i%100 == 0:
    train_accuracy = accuracy.eval(feed_dict={
        x:batch[0], y_: batch[1], keep_prob: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
  train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

print("test accuracy %g"%accuracy.eval(feed_dict={
    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))