Beispiel #1
0
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(out, y_)))

# Initialize
init = tf.initialize_all_variables()

dh = DataHelper(batch_size, test_idx=test_start)
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init)
    print sess.run(W_conv1), sess.run(b_conv1), sess.run(W_conv2), sess.run(
        b_conv2)
    test_data, test_labels = dh.get_test_data(test_size)
    epoch = 1
    train_start = time.time()
    while epoch <= epochs:
        epoch_start = time.time()
        print 'Training Epoch {}...'.format(epoch)
        # get data, test_idx = 19000 is ~83% train test split
        dh = DataHelper(batch_size, test_idx=test_start)
        # test data
        step = 1
        # Looks like training iters in the number of images to process
        while step * batch_size < test_start:
            # TODO get data in proper format
            batch_xs, batch_ys = dh.get_next_batch()
            #print batch_xs.shape, batch_ys.shape
            #sys.exit(0)