def main(_): pred = RNN(x, weights, biases) # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.initialize_all_variables() train_images, train_labels = load_train_data() train_images, validation_images = train_images[: SPLIT_INDEX, :], train_images[ SPLIT_INDEX:, :] train_labels, validation_labels = train_labels[: SPLIT_INDEX, :], train_labels[ SPLIT_INDEX:, :] with tf.Session() as sess: sess.run(init) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y = functions.next_batch(train_images, train_labels, batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # Calculate batch accuracy acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) step += 1 print("Optimization Finished!") test_images = load_test_data() timages = test_images.reshape((test_images.shape[0], n_steps, n_input)) result = sess.run(tf.argmax(pred, 1), feed_dict={x: timages}) save_result(result) print("Done")
def main(_): train_images, train_labels = load_train_data() train_images, validation_images = train_images[: SPLIT_INDEX, :], train_images[ SPLIT_INDEX:, :] train_labels, validation_labels = train_labels[: SPLIT_INDEX, :], train_labels[ SPLIT_INDEX:, :] x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y_pred = tf.matmul(x, W) + b cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(y_pred, y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) model = tf.initialize_all_variables() with tf.Session() as session: session.run(model) for _ in range(1000): images, labels = functions.next_batch(train_images, train_labels, 100) session.run(train_step, feed_dict={x: images, y: labels}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print( session.run(accuracy, feed_dict={ x: validation_images, y: validation_labels })) test_images = load_test_data() result = session.run(tf.argmax(y_pred, 1), feed_dict={x: test_images}) save_result(result) print("Done")
def main(_): train_images, train_labels = load_train_data() train_images, validation_images = train_images[: SPLIT_INDEX, :], train_images[ SPLIT_INDEX:, :] train_labels, validation_labels = train_labels[: SPLIT_INDEX, :], train_labels[ SPLIT_INDEX:, :] x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) W_conv1 = functions.weight_variable([5, 5, 1, 32]) b_conv1 = functions.bias_variable([32]) x_image = tf.reshape(x, [-1, 28, 28, 1]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) W_conv2 = functions.weight_variable([5, 5, 32, 64]) b_conv2 = functions.bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) W_fc1 = functions.weight_variable([7 * 7 * 64, 1024]) b_fc1 = functions.bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) W_fc2 = functions.weight_variable([1024, 10]) b_fc2 = functions.bias_variable([10]) y_pred = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(y_pred, y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) model = tf.initialize_all_variables() with tf.Session() as session: session.run(model) for i in range(MAX_STEPS): images, labels = functions.next_batch(train_images, train_labels, BATCH_SIZE) if i > 0 and i % 100 == 0: train_accuracy = session.run(accuracy, feed_dict={ x: images, y: labels, keep_prob: 1.0 }) print("step %d, training accuracy %g" % (i, train_accuracy)) session.run(train_step, feed_dict={ x: images, y: labels, keep_prob: 0.5 }) print( session.run(accuracy, feed_dict={ x: validation_images, y: validation_labels, keep_prob: 1.0 })) test_images = load_test_data() result = session.run(tf.argmax(y_pred, 1), feed_dict={ x: test_images, keep_prob: 1.0 }) save_result(result) print("Done")
# Initializing the variables init = tf.initialize_all_variables() train_images, train_labels = load_train_data() train_images, validation_images = train_images[:SPLIT_INDEX, :], train_images[ SPLIT_INDEX:, :] train_labels, validation_labels = train_labels[:SPLIT_INDEX, :], train_labels[ SPLIT_INDEX:, :] # Launch the graph with tf.Session() as sess: sess.run(init) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: images, labels = functions.next_batch(train_images, train_labels, BATCH_SIZE) # Run optimization op (backprop) sess.run(optimizer, feed_dict={ x: images, y: labels, keep_prob: dropout }) if step % display_step == 0: # Calculate batch loss and accuracy loss, acc = sess.run([cost, accuracy], feed_dict={ x: images, y: labels, keep_prob: 1. })
beg_index = i end_index = i + train_range input_X, input_Y = DataSet(beg_index, end_index) test_X, test_Y = DataSet(end_index, end_index + 10) # print(test_X.shape) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) step = 0 Train_Acu = [] Test_Acu = [] while step < training_iters: # train batch batch_xs, batch_ys = next_batch(batch_size, input_X, input_Y) batch_xs = batch_xs.reshape([-1, n_steps, n_inputs]) batch_ys = batch_ys[n_steps - 1::n_steps] # test batch test_xs, test_ys = next_batch(batch_size, test_X, test_Y) test_xs = test_xs.reshape([-1, n_steps, n_inputs]) test_ys = test_ys[n_steps - 1::n_steps] # training sess.run([train_op], feed_dict={ x: batch_xs, y: batch_ys, }) if step % 20 == 0: acc1 = sess.run(accuracy, feed_dict={ x: batch_xs,