コード例 #1
0
def run_training():
  data_sets = data_mnist.read_data_sets()
  with tf.Graph().as_default():
    images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
    logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
    loss = mnist.loss(logits, labels_placeholder)
    train_op = mnist.training(loss, FLAGS.learning_rate)
    eval_correct = mnist.evaluation(logits, labels_placeholder)

    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()
    
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

    # Start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)
      _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
      duration = time.time() - start_time

      if step % 100 == 0:
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()

      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
        saver.save(sess, checkpoint_file, global_step=step)
        do_eval(sess,eval_correct, images_placeholder, labels_placeholder, data_sets.train)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
コード例 #2
0
def test_mnist():
    mnist = data_mnist.read_data_sets(one_hot=True)

    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])
    y_pred = residual_network(x, 10)

    cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
    optimizer = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # %% We'll train in minibatches and report accuracy:
    batch_size = 50
    n_epochs = 5
    for epoch_i in range(n_epochs):
        # Training
        train_accuracy = 0
        for batch_i in range(mnist.train.num_examples // batch_size):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            local_accuracy = sess.run([optimizer, accuracy],
                                      feed_dict={
                                          x: batch_xs,
                                          y: batch_ys
                                      })[1]
            print(local_accuracy)
            train_accuracy += local_accuracy
        train_accuracy /= (mnist.train.num_examples // batch_size)

        # Validation
        valid_accuracy = 0
        for batch_i in range(mnist.validation.num_examples // batch_size):
            batch_xs, batch_ys = mnist.validation.next_batch(batch_size)
            valid_accuracy += sess.run(accuracy,
                                       feed_dict={
                                           x: batch_xs,
                                           y: batch_ys
                                       })
        valid_accuracy /= (mnist.validation.num_examples // batch_size)
        print('epoch:', epoch_i, ', train:', train_accuracy, ', valid:',
              valid_accuracy)
コード例 #3
0
def run_training():
    data_sets = data_mnist.read_data_sets()
    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        loss = mnist.loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        summary_op = tf.merge_all_summaries()
        saver = tf.train.Saver()

        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        # Start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
                saver.save(sess, checkpoint_file, global_step=step)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.train)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.validation)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.test)
コード例 #4
0
ファイル: mnist.py プロジェクト: JaysonsdLin/MLPythonLib
def main(_=None):
    image_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28 * 28])
    labels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])

    if FLAGS.model == 'full':
        result = multilayer_fully_connected(image_placeholder,
                                            labels_placeholder)
    elif FLAGS.model == 'conv':
        result = lenet5(image_placeholder, labels_placeholder)
    else:
        raise ValueError('model must be full or conv: %s' % FLAGS.model)

    accuracy = result.softmax.evaluate_classifier(labels_placeholder,
                                                  phase=pt.Phase.test)
    optimizer = tf.train.GradientDescentOptimizer(0.01)
    train_op = pt.apply_optimizer(optimizer, losses=[result.loss])

    # 数据
    mnist = data_mnist.read_data_sets(one_hot=True)

    runner = pt.train.Runner(save_path=FLAGS.save_path)

    with tf.Session():
        for epoch in xrange(10):
            # 训练
            runner.train_model(
                train_op,
                result.loss,
                EPOCH_SIZE,
                feed_vars=(image_placeholder, labels_placeholder),
                feed_data=pt.train.feed_numpy(BATCH_SIZE, mnist.train.images,
                                              mnist.train.labels),
                print_every=100)
            # 正确率
            classification_accuracy = runner.evaluate_model(
                accuracy,
                TEST_SIZE,
                feed_vars=(image_placeholder, labels_placeholder),
                feed_data=pt.train.feed_numpy(BATCH_SIZE, mnist.test.images,
                                              mnist.test.labels))
            print('Accuracy after %d epoch %g%%' %
                  (epoch + 1, classification_accuracy * 100))
コード例 #5
0
ファイル: mnist.py プロジェクト: CosmosShadow/MLPythonLib
def main(_=None):
  image_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28*28])
  labels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])

  if FLAGS.model == 'full':
    result = multilayer_fully_connected(image_placeholder, labels_placeholder)
  elif FLAGS.model == 'conv':
    result = lenet5(image_placeholder, labels_placeholder)
  else:
    raise ValueError('model must be full or conv: %s' % FLAGS.model)

  accuracy = result.softmax.evaluate_classifier(labels_placeholder, phase=pt.Phase.test)
  optimizer = tf.train.GradientDescentOptimizer(0.01)
  train_op = pt.apply_optimizer(optimizer, losses=[result.loss])

  # 数据
  mnist = data_mnist.read_data_sets(one_hot=True)

  runner = pt.train.Runner(save_path=FLAGS.save_path)

  with tf.Session():
    for epoch in xrange(10):
      # 训练
      runner.train_model(
          train_op,
          result.loss,
          EPOCH_SIZE,
          feed_vars=(image_placeholder, labels_placeholder),
          feed_data= pt.train.feed_numpy(BATCH_SIZE, mnist.train.images, mnist.train.labels),
          print_every=100)
      # 正确率
      classification_accuracy = runner.evaluate_model(
          accuracy,
          TEST_SIZE,
          feed_vars=(image_placeholder, labels_placeholder),
          feed_data=pt.train.feed_numpy(BATCH_SIZE, mnist.test.images, mnist.test.labels))
      print('Accuracy after %d epoch %g%%' % (epoch + 1, classification_accuracy * 100))
コード例 #6
0
def test_mnist():
    mnist = data_mnist.read_data_sets(one_hot=True)

    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])
    y_pred = residual_network(x, 10)

    cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
    optimizer = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())

    # %% We'll train in minibatches and report accuracy:
    batch_size = 50
    n_epochs = 5
    for epoch_i in range(n_epochs):
        # Training
        train_accuracy = 0
        for batch_i in range(mnist.train.num_examples // batch_size):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            local_accuracy = sess.run([optimizer, accuracy], feed_dict={x: batch_xs, y: batch_ys})[1]
            print(local_accuracy)
            train_accuracy += local_accuracy 
        train_accuracy /= (mnist.train.num_examples // batch_size)

        # Validation
        valid_accuracy = 0
        for batch_i in range(mnist.validation.num_examples // batch_size):
            batch_xs, batch_ys = mnist.validation.next_batch(batch_size)
            valid_accuracy += sess.run(accuracy, feed_dict={x: batch_xs,y: batch_ys})
        valid_accuracy /= (mnist.validation.num_examples // batch_size)
        print('epoch:', epoch_i, ', train:', train_accuracy, ', valid:', valid_accuracy)
コード例 #7
0
ファイル: reconstruct.py プロジェクト: lianglili/DRAW

# 训练
with graph.as_default():
	# GPU
	config = tf.ConfigProto()
	config.gpu_options.per_process_gpu_memory_fraction = 0.8
	config.gpu_options.allow_growth = True

	# sess
	sess=tf.InteractiveSession(config=config)
	tf.initialize_all_variables().run()

	model.restore(sess, save_path)

	data = mnist.read_data_sets()

	x_, _ = data.train.next_batch(hp.batch_size)
	x_ = (x_ > 0.5).astype(np.float32)		#二值化

	images = sess.run(model.output_tensors, {model.x: x_})
	unit_images = []
	for T, image in enumerate(images):
		imgs = image.reshape(-1, hp.A, hp.B)[:100]
		img = images2one(imgs)
		img = (np.clip(img, 0.0, 1.0) *  255).astype(np.uint8)
		unit_images.append(img)
		imageio.imwrite('images/' +str(T) + '.png', img)

	imageio.mimsave('images/reconstruct.gif', unit_images, duration=1.0)
コード例 #8
0
# coding: utf-8
import time
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist


@pt.Register
def leaky_relu(input_pt):
    return tf.select(tf.greater(input_pt, 0.0), input_pt, 0.01 * input_pt)


# 数据
mnist = data_mnist.read_data_sets(one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
dropout = tf.placeholder(tf.float32, [1])

x_reshape = tf.reshape(x, [-1, 28, 28, 1])
seq = pt.wrap(x_reshape).sequential()

# CNN
# seq.conv2d(6, 16)
# seq.max_pool(2, 2)
# seq.conv2d(6, 16)
# seq.max_pool(2, 2)


def residual(seq, stride, output):
コード例 #9
0
# coding: utf-8
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist

# 数据
mnist = data_mnist.read_data_sets(one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None,10])
pretty_input = pt.wrap(x)
softmax, loss = (
	pretty_input.
	fully_connected(100, activation_fn=tf.nn.relu).
	fully_connected(10, activation_fn=None).
	softmax_classifier(10, labels=y))

accuracy = softmax.evaluate_classifier(y)
optimizer = tf.train.GradientDescentOptimizer(0.01)  # learning rate
train_op = pt.apply_optimizer(optimizer, losses=[loss])

with tf.Session() as sess:
	sess.run(tf.initialize_all_variables())
	# train
	for i in range(2000):
		batch_xs, batch_ys = mnist.train.next_batch(100)
		_, loss_val = sess.run([train_op, loss], feed_dict={x: batch_xs, y: batch_ys})
		if (i+1)%100 == 0:
			print 'index: %d, loss: %f' % (i+1, loss_val)
	# test
コード例 #10
0
def nn_train(to_name, param):
	global lock, running
	itchat.send(u'开工了...', to_name)
	# Lock
	with lock:
		running = True

	# mnist data reading
	mnist = data_mnist.read_data_sets(one_hot=True)

	# Parameters
	# learning_rate = 0.001
	# training_iters = 200000
	# batch_size = 128
	# display_step = 10
	learning_rate, training_iters, batch_size, display_step = param

	# Network Parameters
	n_input = 784 # MNIST data input (img shape: 28*28)
	n_classes = 10 # MNIST total classes (0-9 digits)
	dropout = 0.75 # Dropout, probability to keep units

	# tf Graph input
	x = tf.placeholder(tf.float32, [None, n_input])
	y = tf.placeholder(tf.float32, [None, n_classes])
	keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)


	# Create some wrappers for simplicity
	def conv2d(x, W, b, strides=1):
		# Conv2D wrapper, with bias and relu activation
		x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
		x = tf.nn.bias_add(x, b)
		return tf.nn.relu(x)


	def maxpool2d(x, k=2):
		# MaxPool2D wrapper
		return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')


	# Create model
	def conv_net(x, weights, biases, dropout):
		# Reshape input picture
		x = tf.reshape(x, shape=[-1, 28, 28, 1])

		# Convolution Layer
		conv1 = conv2d(x, weights['wc1'], biases['bc1'])
		# Max Pooling (down-sampling)
		conv1 = maxpool2d(conv1, k=2)

		# Convolution Layer
		conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
		# Max Pooling (down-sampling)
		conv2 = maxpool2d(conv2, k=2)

		# Fully connected layer
		# Reshape conv2 output to fit fully connected layer input
		fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
		fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
		fc1 = tf.nn.relu(fc1)
		# Apply Dropout
		fc1 = tf.nn.dropout(fc1, dropout)

		# Output, class prediction
		out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
		return out

	# Store layers weight & bias
	weights = {
		# 5x5 conv, 1 input, 32 outputs
		'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
		# 5x5 conv, 32 inputs, 64 outputs
		'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
		# fully connected, 7*7*64 inputs, 1024 outputs
		'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
		# 1024 inputs, 10 outputs (class prediction)
		'out': tf.Variable(tf.random_normal([1024, n_classes]))
	}

	biases = {
		'bc1': tf.Variable(tf.random_normal([32])),
		'bc2': tf.Variable(tf.random_normal([64])),
		'bd1': tf.Variable(tf.random_normal([1024])),
		'out': tf.Variable(tf.random_normal([n_classes]))
	}

	# Construct model
	pred = conv_net(x, weights, biases, keep_prob)

	# Define loss and optimizer
	cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
	optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

	# Evaluate model
	correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))


	# Initializing the variables
	init = tf.initialize_all_variables()

	# Launch the graph
	with tf.Session() as sess:
		sess.run(init)
		step = 1
		# Keep training until reach max iterations
		print('Wait for lock')
		with lock:
			run_state = running
		print('Start')
		while step * batch_size < training_iters and run_state:
			batch_x, batch_y = mnist.train.next_batch(batch_size)
			# Run optimization op (backprop)
			sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
			if step % display_step == 0:
				# Calculate batch loss and accuracy
				loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
				print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
					"{:.6f}".format(loss) + ", Training Accuracy= " + \
					"{:.5f}".format(acc))
				itchat.send("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
					"{:.6f}".format(loss) + ", Training Accuracy= " + \
							"{:.5f}".format(acc), to_name)
			step += 1
			with lock:
				run_state = running
		print("Optimization Finished!")
		itchat.send("Optimization Finished!", to_name)

		# Calculate accuracy for 256 mnist test images
		print("Testing Accuracy:", \
			sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))
		itchat.send("Testing Accuracy: %s" %
			sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}), to_name)

	with lock:
		running = False
コード例 #11
0
def train():
    mnist = data_mnist.read_data_sets(one_hot=True, fake_data=FLAGS.fake_data)
    sess = tf.InteractiveSession()

    # 模型
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, 784], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
    with tf.name_scope('input_reshape'):
        image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
        tf.image_summary('input', image_shaped_input, 10)

    def weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial)

    def bias_variable(shape):
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    def variable_summaries(var, name):
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.scalar_summary('mean/' + name, mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
            tf.scalar_summary('sttdev/' + name, stddev)
            tf.scalar_summary('max/' + name, tf.reduce_max(var))
            tf.scalar_summary('min/' + name, tf.reduce_min(var))
            tf.histogram_summary(name, var)

    def nn_layer(input_tensor,
                 input_dim,
                 output_dim,
                 layer_name,
                 act=tf.nn.relu):
        with tf.name_scope(layer_name):
            # This Variable will hold the state of the weights for the layer
            with tf.name_scope('weights'):
                weights = weight_variable([input_dim, output_dim])
                variable_summaries(weights, layer_name + '/weights')
            with tf.name_scope('biases'):
                biases = bias_variable([output_dim])
                variable_summaries(biases, layer_name + '/biases')
            with tf.name_scope('Wx_plus_b'):
                preactivate = tf.matmul(input_tensor, weights) + biases
                tf.histogram_summary(layer_name + '/pre_activations',
                                     preactivate)
            activations = act(preactivate, 'activation')
            tf.histogram_summary(layer_name + '/activations', activations)
            return activations

    hidden1 = nn_layer(x, 784, 500, 'layer1')

    with tf.name_scope('dropout'):
        keep_prob = tf.placeholder(tf.float32)
        tf.scalar_summary('dropout_keep_probability', keep_prob)
        dropped = tf.nn.dropout(hidden1, keep_prob)

    y = nn_layer(dropped, 500, 10, 'layer2', act=tf.nn.softmax)

    with tf.name_scope('cross_entropy'):
        diff = y_ * tf.log(y)
        with tf.name_scope('total'):
            cross_entropy = -tf.reduce_mean(diff)
        tf.scalar_summary('cross entropy', cross_entropy)

    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(
            FLAGS.learning_rate).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.scalar_summary('accuracy', accuracy)

    # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
    merged = tf.merge_all_summaries()
    train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',
                                          sess.graph)
    test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
    tf.initialize_all_variables().run()

    def feed_dict(train):
        if train or FLAGS.fake_data:
            xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
            k = FLAGS.dropout
        else:
            xs, ys = mnist.test.images, mnist.test.labels
            k = 1.0
        return {x: xs, y_: ys, keep_prob: k}

    for i in range(FLAGS.max_steps):
        if i % 10 == 0:  # Record summaries and test-set accuracy
            summary, acc = sess.run([merged, accuracy],
                                    feed_dict=feed_dict(False))
            test_writer.add_summary(summary, i)
            print('Accuracy at step %s: %s' % (i, acc))
        else:  # Record train set summaries, and train
            if i % 100 == 99:  # Record execution stats
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                summary, _ = sess.run([merged, train_step],
                                      feed_dict=feed_dict(True),
                                      options=run_options,
                                      run_metadata=run_metadata)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
                train_writer.add_summary(summary, i)
                print('Adding run metadata for', i)
            else:  # Record a summary
                summary, _ = sess.run([merged, train_step],
                                      feed_dict=feed_dict(True))
                train_writer.add_summary(summary, i)
    train_writer.close()
    test_writer.close()
コード例 #12
0
def train():
  mnist = data_mnist.read_data_sets(one_hot=True, fake_data=FLAGS.fake_data)
  sess = tf.InteractiveSession()

  # 模型
  with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, 784], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
  with tf.name_scope('input_reshape'):
    image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
    tf.image_summary('input', image_shaped_input, 10)

  def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
  def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

  def variable_summaries(var, name):
    with tf.name_scope('summaries'):
      mean = tf.reduce_mean(var)
      tf.scalar_summary('mean/' + name, mean)
      with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
      tf.scalar_summary('sttdev/' + name, stddev)
      tf.scalar_summary('max/' + name, tf.reduce_max(var))
      tf.scalar_summary('min/' + name, tf.reduce_min(var))
      tf.histogram_summary(name, var)

  def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
    with tf.name_scope(layer_name):
      # This Variable will hold the state of the weights for the layer
      with tf.name_scope('weights'):
        weights = weight_variable([input_dim, output_dim])
        variable_summaries(weights, layer_name + '/weights')
      with tf.name_scope('biases'):
        biases = bias_variable([output_dim])
        variable_summaries(biases, layer_name + '/biases')
      with tf.name_scope('Wx_plus_b'):
        preactivate = tf.matmul(input_tensor, weights) + biases
        tf.histogram_summary(layer_name + '/pre_activations', preactivate)
      activations = act(preactivate, 'activation')
      tf.histogram_summary(layer_name + '/activations', activations)
      return activations

  hidden1 = nn_layer(x, 784, 500, 'layer1')

  with tf.name_scope('dropout'):
    keep_prob = tf.placeholder(tf.float32)
    tf.scalar_summary('dropout_keep_probability', keep_prob)
    dropped = tf.nn.dropout(hidden1, keep_prob)

  y = nn_layer(dropped, 500, 10, 'layer2', act=tf.nn.softmax)

  with tf.name_scope('cross_entropy'):
    diff = y_ * tf.log(y)
    with tf.name_scope('total'):
      cross_entropy = -tf.reduce_mean(diff)
    tf.scalar_summary('cross entropy', cross_entropy)

  with tf.name_scope('train'):
    train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(cross_entropy)

  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    with tf.name_scope('accuracy'):
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', accuracy)

  # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
  merged = tf.merge_all_summaries()
  train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
  test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
  tf.initialize_all_variables().run()

  def feed_dict(train):
    if train or FLAGS.fake_data:
      xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
      k = FLAGS.dropout
    else:
      xs, ys = mnist.test.images, mnist.test.labels
      k = 1.0
    return {x: xs, y_: ys, keep_prob: k}

  for i in range(FLAGS.max_steps):
    if i % 10 == 0:  # Record summaries and test-set accuracy
      summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
      test_writer.add_summary(summary, i)
      print('Accuracy at step %s: %s' % (i, acc))
    else:  # Record train set summaries, and train
      if i % 100 == 99:  # Record execution stats
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True), options=run_options, run_metadata=run_metadata)
        train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
        train_writer.add_summary(summary, i)
        print('Adding run metadata for', i)
      else:  # Record a summary
        summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
        train_writer.add_summary(summary, i)
  train_writer.close()
  test_writer.close()
コード例 #13
0
def nn_train(to_name, param):
    global lock, running
    itchat.send(u'开工了...', to_name)
    # Lock
    with lock:
        running = True

    # mnist data reading
    mnist = data_mnist.read_data_sets(one_hot=True)

    # Parameters
    # learning_rate = 0.001
    # training_iters = 200000
    # batch_size = 128
    # display_step = 10
    learning_rate, training_iters, batch_size, display_step = param

    # Network Parameters
    n_input = 784  # MNIST data input (img shape: 28*28)
    n_classes = 10  # MNIST total classes (0-9 digits)
    dropout = 0.75  # Dropout, probability to keep units

    # tf Graph input
    x = tf.placeholder(tf.float32, [None, n_input])
    y = tf.placeholder(tf.float32, [None, n_classes])
    keep_prob = tf.placeholder(tf.float32)  #dropout (keep probability)

    # Create some wrappers for simplicity
    def conv2d(x, W, b, strides=1):
        # Conv2D wrapper, with bias and relu activation
        x = tf.nn.conv2d(x,
                         W,
                         strides=[1, strides, strides, 1],
                         padding='SAME')
        x = tf.nn.bias_add(x, b)
        return tf.nn.relu(x)

    def maxpool2d(x, k=2):
        # MaxPool2D wrapper
        return tf.nn.max_pool(x,
                              ksize=[1, k, k, 1],
                              strides=[1, k, k, 1],
                              padding='SAME')

    # Create model
    def conv_net(x, weights, biases, dropout):
        # Reshape input picture
        x = tf.reshape(x, shape=[-1, 28, 28, 1])

        # Convolution Layer
        conv1 = conv2d(x, weights['wc1'], biases['bc1'])
        # Max Pooling (down-sampling)
        conv1 = maxpool2d(conv1, k=2)

        # Convolution Layer
        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
        # Max Pooling (down-sampling)
        conv2 = maxpool2d(conv2, k=2)

        # Fully connected layer
        # Reshape conv2 output to fit fully connected layer input
        fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
        fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
        fc1 = tf.nn.relu(fc1)
        # Apply Dropout
        fc1 = tf.nn.dropout(fc1, dropout)

        # Output, class prediction
        out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
        return out

    # Store layers weight & bias
    weights = {
        # 5x5 conv, 1 input, 32 outputs
        'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
        # 5x5 conv, 32 inputs, 64 outputs
        'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
        # fully connected, 7*7*64 inputs, 1024 outputs
        'wd1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
        # 1024 inputs, 10 outputs (class prediction)
        'out': tf.Variable(tf.random_normal([1024, n_classes]))
    }

    biases = {
        'bc1': tf.Variable(tf.random_normal([32])),
        'bc2': tf.Variable(tf.random_normal([64])),
        'bd1': tf.Variable(tf.random_normal([1024])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }

    # Construct model
    pred = conv_net(x, weights, biases, keep_prob)

    # Define loss and optimizer
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)

    # Evaluate model
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initializing the variables
    init = tf.initialize_all_variables()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)
        step = 1
        # Keep training until reach max iterations
        print('Wait for lock')
        with lock:
            run_state = running
        print('Start')
        while step * batch_size < training_iters and run_state:
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop)
            sess.run(optimizer,
                     feed_dict={
                         x: batch_x,
                         y: batch_y,
                         keep_prob: dropout
                     })
            if step % display_step == 0:
                # Calculate batch loss and accuracy
                loss, acc = sess.run([cost, accuracy],
                                     feed_dict={
                                         x: batch_x,
                                         y: batch_y,
                                         keep_prob: 1.
                                     })
                print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                 "{:.6f}".format(loss) + ", Training Accuracy= " + \
                 "{:.5f}".format(acc))
                itchat.send("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                 "{:.6f}".format(loss) + ", Training Accuracy= " + \
                   "{:.5f}".format(acc), to_name)
            step += 1
            with lock:
                run_state = running
        print("Optimization Finished!")
        itchat.send("Optimization Finished!", to_name)

        # Calculate accuracy for 256 mnist test images
        print("Testing Accuracy:", \
         sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))
        itchat.send(
            "Testing Accuracy: %s" % sess.run(accuracy,
                                              feed_dict={
                                                  x: mnist.test.images[:256],
                                                  y: mnist.test.labels[:256],
                                                  keep_prob: 1.
                                              }), to_name)

    with lock:
        running = False