예제 #1
0
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

# Loss Function : Cross Entropy
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

sess = tf.InteractiveSession()

sess.run(tf.initialize_all_variables())

for i in range(20000):
    batch_xs, batch_ys = load_data.get_train_batch(50)
    if i % 100 == 0:
        train_accuracy = accuracy.eval(feed_dict={
        x: batch_xs, y_: batch_ys, keep_prob: 1.0})
    print "step %d, training accuracy %g" % (i, train_accuracy)
    train_step.run(feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})

#print "test accuracy %g"%accuracy.eval(feed_dict={
#    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})

test_set_x = load_data.load_test_data("/home/darshan/Documents/DigitRecognizer/MNIST_data/",
                                      "test.csv")
print(test_set_x.shape)
nbr_of_test_batches = 10
batch_size = load_data.nbr_of_test_dp / nbr_of_test_batches
for j in xrange(nbr_of_test_batches):