Exemple #1
0
def test(mnist):
	with tf.Graph().as_default() as graph:
		x = tf.placeholder(tf.float32, shape = [None, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS])
		y_ = tf.placeholder(tf.float32, shape = [None, rnn_forward.OUTPUT_NODE])

		y = rnn_forward.forward(x,rnn_backward.REGULARIZER)

		ema = tf.train.ExponentialMovingAverage(rnn_backward.MOVING_AVERAGE_DECAY)
		ema_restore = ema.variables_to_restore()

		saver = tf.train.Saver(ema_restore)

		correct = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
		accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

		while True:
			with tf.Session() as sess:
				ckpt = tf.train.get_checkpoint_state(rnn_backward.MODEL_SAVE_PATH)
				if ckpt and ckpt.model_checkpoint_path:
					saver.restore(sess, ckpt.model_checkpoint_path)
					global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
					xs = mnist.test.images.reshape((-1, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS))
					accuracy_score = sess.run(accuracy, feed_dict = {x:xs, y_:mnist.test.labels})
					print("after %s setps, test accuracy is %g"%(global_step, accuracy_score))
				else:
					print("no checkpoint")
					return
			time.sleep(TEST_INTERVAL)
def backward(mnist):
	x = tf.placeholder(tf.float32, shape = [None, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS])
	y_ = tf.placeholder(tf.float32, shape = [None, rnn_forward.OUTPUT_NODE])

	y = rnn_forward.forward(x,REGULARIZER)

	global_step = tf.Variable(0, trainable = False)

	learning_rate = tf.train.exponential_decay(
		LEARNING_RATE_BASE,
		global_step,
		mnist.train.num_examples/BATCH_SIZE,
		LEARNING_RATE_DECAY,
		staircase = True)

	ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y, labels = tf.argmax(y_, 1))
	loss_ce = tf.reduce_mean(ce)
	loss_total = loss_ce + tf.add_n(tf.get_collection('losses'))

	train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_total, global_step = global_step)

	ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
	ema_op = ema.apply(tf.trainable_variables())

	with tf.control_dependencies([train_step, ema_op]) :
		train_op = tf.no_op(name = 'train')

	saver = tf.train.Saver()

	writer = tf.summary.FileWriter('W:\\python\\tensorflow\\file', tf.get_default_graph())
	writer.close()

	with tf.Session() as sess:
		init_op = tf.global_variables_initializer()
		sess.run(init_op)

		for i in range(STEPS):
			xs, ys = mnist.train.next_batch(BATCH_SIZE)
			xs = xs.reshape((BATCH_SIZE, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS))
			_, loss_value, step = sess.run([train_op, loss_total, global_step], feed_dict = {x: xs, y_: ys})
			if i % 500 == 0:
				print("After %d steps, loss is: %f" %(step, loss_value))
				saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
Exemple #3
0
def test(mnist):
    with tf.Graph().as_default() as graph:
        x = tf.placeholder(
            tf.float32,
            shape=[None, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS])
        y_ = tf.placeholder(tf.float32, shape=[None, rnn_forward.OUTPUT_NODE])

        y = rnn_forward.forward(x, rnn_backward.REGULARIZER)

        ema = tf.train.ExponentialMovingAverage(
            rnn_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()

        saver = tf.train.Saver(ema_restore)

        correct = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    rnn_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    xs = mnist.test.images.reshape(
                        (-1, rnn_forward.TIME_STEPS, rnn_forward.INPUT_ROWS))
                    accuracy_score = sess.run(accuracy,
                                              feed_dict={
                                                  x: xs,
                                                  y_: mnist.test.labels
                                              })
                    print("after %s setps, test accuracy is %g" %
                          (global_step, accuracy_score))
                else:
                    print("no checkpoint")
                    return
            time.sleep(TEST_INTERVAL)
Exemple #4
0
from rnn_forward import forward
forward('82', 2)
forward('90', 4)
forward('98', 3)
forward('11', 3)
from rnn_forward import forward

forward('51', 2)
forward('59', 5)
forward('67', 1)
forward('75', 0)
from rnn_forward import forward
forward('0',4)
forward('27',0)
forward('35',1)
forward('43',5)