Example #1
0
def run_second_net():

    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    rate = tf.placeholder(tf.float32)

    C1, C2, C3, = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, rate=rate)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, rate=rate)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, rate=rate)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, rate=rate)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_conv,
                                                                              labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy =tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], rate: 0.0})
                       for i in range(10)])
        print(f'Accuracy {acc * 100}')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], rate: 0.5})

            if i % 50 == 0:
                test(sess)

        test(sess)
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_flat, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0:
                test(sess)

        test(sess)
Example #3
0
def run_simple_net(bs, lr):
    # load the data
    cifar = CifarDataManager()
    # init variables
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)
    # 2 conv and 1 max pooling
    conv1 = conv_layer(x, shape=[3, 3, 3, 64])
    conv2 = conv_layer(conv1, shape=[3, 3, 64, 64])
    conv2_pool = max_pool_2x2(conv2)
    # 2 conv and 1 max pooling
    conv3 = conv_layer(conv2_pool, shape=[3, 3, 64, 128])
    conv4 = conv_layer(conv3, shape=[3, 3, 128, 128])
    conv4_pool = max_pool_2x2(conv4)
    # flatten and drop to prevent overfitting
    conv4_flat = tf.reshape(conv4_pool, [-1, 8 * 8 * 128])
    conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)
    # fully connected nn using relu as activation function
    full_0 = tf.nn.relu(full_layer(conv4_drop, 512))
    full0_drop = tf.nn.dropout(full_0, keep_prob=keep_prob)
    full_1 = tf.nn.relu(full_layer(full0_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)
    # loss function
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    # original: train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
    # for the table
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))
        return acc*100

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(bs)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            '''if i % 500 == 0:
                # print(i//500)
                test(sess)'''
        result = test(sess)
    return result
Example #4
0
def get_y_predict(x, keep_prob):
    conv1 = conv_layer(x, shape=[3, 3, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 32, 64])
    conv2_pool = max_pool_2x2(conv2)
    conv2_flat = tf.reshape(conv2_pool, [-1, 8 * 8 * 64])

    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_predict = full_layer(full1_drop, 10)
    return y_predict
Example #5
0
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            if i % 500 == 0:
                test(sess)

        test(sess)
Example #6
0
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
Example #7
0
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3,
                                ksize=[1, 8, 8, 1],
                                strides=[1, 8, 8, 1],
                                padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
Example #9
0
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
Example #10
0
def convolutional_neural_network(data):
    #NAMING CONVENTION: conv3s32n is a convolutional layer with filter size 3x3 and number of filters = 32
    conv3s32n = layers.conv_layer(data, params.weights(depth=3),
                                  params.biases())
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(), params.biases())
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(), params.biases())

    #NAMING CONVENTION: pool2w2s is a pool layer with 2x2 window size and stride = 2
    pool2w2s = layers.pool_layer(conv3s32n)

    conv3s32n = layers.conv_layer(pool2w2s, params.weights(), params.biases())
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(), params.biases())
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(), params.biases())

    pool2w2s = layers.pool_layer(conv3s32n)

    conv3s32n = layers.conv_layer(pool2w2s, params.weights(n_filters=128),
                                  params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n,
                                  params.weights(depth=128, n_filters=128),
                                  params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n,
                                  params.weights(depth=128, n_filters=128),
                                  params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n,
                                  params.weights(depth=128, n_filters=128),
                                  params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n,
                                  params.weights(depth=128, n_filters=128),
                                  params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n,
                                  params.weights(depth=128, n_filters=128),
                                  params.biases(n_filters=128))
    '''conv3s32n = layers.conv_layer(pool2w2s, params.weights(n_filters=64), params.biases(n_filters=64))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=64, n_filters=64), params.biases(n_filters=64))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=64, n_filters=64), params.biases(n_filters=64))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=64, n_filters=64), params.biases(n_filters=64))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=64, n_filters=64), params.biases(n_filters=64))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=64, n_filters=64), params.biases(n_filters=64))

    pool2w2s = layers.pool_layer(conv3s32n)

    conv3s32n = layers.conv_layer(pool2w2s, params.weights(depth=64, n_filters=128), params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=128, n_filters=128), params.biases(n_filters=128))
    conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=128, n_filters=128), params.biases(n_filters=128))'''

    #pool2w2s = layers.pool_layer(conv3s32n)

    #conv3s32n = layers.conv_layer(pool2w2s, params.weights(depth=128, n_filters=256), params.biases(n_filters=256))
    #conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=256, n_filters=256), params.biases(n_filters=256))
    #conv3s32n = layers.conv_layer(conv3s32n, params.weights(depth=256, n_filters=256), params.biases(n_filters=256))

    #NAMING CONVENTION: Fully connected layers are just indexed
    fc1 = layers.full_layer(conv3s32n, params.fc_weights(conv3s32n, 1024),
                            params.biases(1024), keep_prob)
    fc2 = layers.full_layer(fc1, params.fc_weights(fc1, 1024),
                            params.biases(1024), keep_prob)
    fc3 = layers.full_layer(fc2, params.fc_weights(fc2, 1024),
                            params.biases(1024), keep_prob)

    output = layers.output_layer(fc1, params.fc_weights(fc1, n_classes),
                                 params.biases(n_classes))

    return output
Example #11
0
STEPS = 5000

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

y_conv = full_layer(full1_drop, 10)

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
Example #12
0
def build_second_net():
    cifar = CifarDataManager()
    print('in build_second_net 111')

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    print('in build_second_net 222')

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    print('in build_second_net 333')

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3,
                                ksize=[1, 8, 8, 1],
                                strides=[1, 8, 8, 1],
                                padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa

    # Plug this into the test procedure as above to continue...
    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        #print("Accuracy: {:.4}%".format(acc * 100))
        return acc

    print('get into run_simple_net 333')
    iter_list, test_accuracy_list, train_accuracy_list = [], [], []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            #print('i is ', i)
            #batch = cifar.train.next_batch(BATCH_SIZE)
            batch = cifar.train.random_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0 or i == STEPS - 1:
                iter_list.append(i)
                train_acc = sess.run(accuracy,
                                     feed_dict={
                                         x: batch[0],
                                         y_: batch[1],
                                         keep_prob: 0.5
                                     })
                train_accuracy_list.append(train_acc)
                test_acc = test(sess)
                test_accuracy_list.append(test_acc)
                print(
                    'step i:{0:7} train_acc: {1:.6f} test_acc: {2:.6f}'.format(
                        i, train_acc, test_acc))

        test(sess)
        plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)
Example #13
0
def run_simple_net():
    print('get into run_simple_net')
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    print('get into run_simple_net 111')

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    # conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 100])
    conv3_pool = max_pool_2x2(conv3)

    conv4 = conv_layer(conv3_pool, shape=[5, 5, 100, 256])
    conv4_pool = max_pool_2x2(conv4)
    print('conv4_pool.get_shape() is ', conv4_pool.get_shape())
    #conv4_flat = tf.reshape(conv4_pool, [-1, 4 * 4 * 256])
    conv4_flat = tf.reshape(conv4_pool, [-1, 2 * 2 * 256])
    conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)

    print('get into run_simple_net 222')

    full_1 = tf.nn.relu(full_layer(conv4_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        #print("test accuracy: {:.4}%".format(acc * 100))
        return acc

    print('get into run_simple_net 333')
    iter_list, test_accuracy_list, train_accuracy_list = [], [], []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            #batch = cifar.train.next_batch(BATCH_SIZE)
            batch = cifar.train.random_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0:
                iter_list.append(i)
                train_acc = np.mean([
                    sess.run(accuracy,
                             feed_dict={
                                 x: batch[0],
                                 y_: batch[1],
                                 keep_prob: 0.5
                             }) for i in range(10)
                ])
                train_accuracy_list.append(train_acc)
                test_acc = test(sess)
                test_accuracy_list.append(test_acc)

                print(
                    'step i:{0:6} train_acc: {1:.6f} test_acc: {2:.6f}'.format(
                        i, train_acc, test_acc))

        test(sess)
        plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)
Example #14
0
conv1 = conv_layer(x, shape=[CONV, CONV, COLOR, CONV1_DEPTH])
conv1_pool = max_pool_2x2(conv1)

# 2 convolution layer with max pooling
conv2 = conv_layer(conv1_pool,
                   shape=[CONV, CONV, CONV1_DEPTH, CONV1_DEPTH * 2])
conv2_pool = max_pool_2x2(conv2)

new_size = int(PIXEL / 4 * PIXEL / 4 * CONV1_DEPTH * 2)

if PIXEL % 2 is not 0:
    logger.warning('potential issue with the pixel size')

# fully connected flatt layer
conv3_flat = tf.reshape(conv2_pool, [-1, new_size])
full_1 = tf.nn.relu(full_layer(conv3_flat, 1024))

# drop out
keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

# output layer
y_conv = full_layer(full1_drop, len(position_dict))

predict = tf.argmax(y_conv, 1, name='predict')

# loss function
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_conv, labels=y_))

# optimizer
Example #15
0

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

y_conv = full_layer(full1_drop, 10)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for i in range(STEPS):
Example #16
0
def buildModel():
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    x_image = tf.reshape(x, [-1, 28, 28, 1])
    conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

    keep_prob = tf.placeholder(tf.float32)
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    y_predict = y_conv

    # y_predict = tf.Print(y_conv, [y_conv, tf.shape(y_conv)], "the result is: ", summarize=50)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_predict, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('cross_entropy', cross_entropy)
    tf.summary.scalar('accuracy', accuracy)
    # y_predict = tf.Print(y_conv, [y_conv], "this is result: ", summarize=10)
    start = tf.Variable(0, name="start", dtype=tf.int32)
    saver = tf.train.Saver()

    merged = tf.summary.merge_all()

    # draw graph on tensorboard
    # with tf.Session() as sess:
    #     tf.summary.FileWriter("./draft", sess.graph);

    # return bool type tensor
    # tf.argmax: return index of maximun value in a tenso

    batchSize = 100
    # chkp.print_tensors_in_checkpoint_file("./checkpoint/model.ckpt", tensor_name='start', all_tensors=False)
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('./draft', sess.graph)
        if os.path.isdir('./checkpoint'):
            saver.restore(sess, "./checkpoint/model.ckpt")
        else:
            sess.run(tf.global_variables_initializer())
        for i in range(start.eval() + 1, int(60000 / batchSize)):
            x_train, y_train = next_batch(batchSize, i + 1)
            update_start = start.assign(i)
            batch = sess.run([x_train, y_train])
            if (i + 1) % 10 == 0:
                sess.run(update_start)
                summary, train_accuracy = sess.run([merged, accuracy],
                                                   feed_dict={
                                                       x: batch[0],
                                                       y_: batch[1],
                                                       keep_prob: 1.0
                                                   })
                print("step {}, training accuracy {}".format(
                    int((i + 1) / 10), train_accuracy))
                train_writer.add_summary(summary, i)
                saver.save(sess, "./checkpoint/model.ckpt")
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })
Example #17
0
def run_simple_net():
    # 图像输入数据
    cifar = CifarDataManager()

    # 定义输入图像占位符
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    # 定义正确的分类标签占位符
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    # 第一层卷积操作
    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    # 将结果池化
    conv1_pool = max_pool_2x2(conv1)

    # 第二层卷积操作
    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    # 将结果池化
    conv2_pool = max_pool_2x2(conv2)

    # 第三层卷积操作
    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    # 将结果池化
    conv3_pool = max_pool_2x2(conv3)

    # 将图像数据平整为一维向量形式
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    # 进行随机丢弃操作
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    # 进行全连接操作
    full_1 = tf.nn.relu(full_layer(conv3_drop, 512))
    # 进行随机丢弃操作
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    # 得到训练结果
    y_conv = full_layer(full1_drop, 10)

    # 定义损失函数
    # 使用交叉熵作为损失函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    # 使用梯度下降法定义训练过程
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    # 定义评估步骤,用来测试模型的准确率
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            # 开始训练
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            if i % 500 == 0:
                test(sess)

        test(sess)
#"contrib/learn/python/learn/datasets/mnist.py"

mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_actual = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

y_detected = full_layer(full1_drop, 10)

cost = \
    tf.reduce_mean(\
           tf.nn.softmax_cross_entropy_with_logits_v2(\
                                              logits=y_detected, labels=y_actual))
#The step that adjust the weights and biases
train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)

#Measure how many predictions were  correct
#These nodes play NO role in the training of the model
Example #19
0
def run_simple_net():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4])
    y_ = tf.placeholder(tf.float32, shape=[None, 6])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_pool = avg_pool_2x2(conv1, 2, 2)  #28x28x4->14x14x16

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 16, 32], pad='SAME')
    conv2_pool = avg_pool_2x2(conv2, 2, 2)  #14x14x16->7x7x32

    conv3 = conv_layer(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    # conv3_pool = max_pool_2x2(conv3) # 7x7x32 ->7x7x64

    conv4 = conv_layer(conv3, shape=[3, 3, 64, 96], pad='SAME')
    # conv4_pool = max_pool_2x2(conv4) # 7x7x64 -> 7x7x96

    conv5 = conv_layer(conv4, shape=[3, 3, 96, 64], pad='SAME')
    conv5_pool = avg_pool_2x2(conv5, 2, 2)  # 7x7x96 ->7x7x64

    _flat = tf.reshape(conv5_pool, [-1, 3 * 3 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    # full_1 = tf.nn.relu(full_layer(_drop1, 200))
    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    # -- until here
    # classifier:add(nn.Threshold(0, 1e-6))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)
    full_2 = tf.nn.relu(full_layer(_drop2, 256))
    # classifier:add(nn.Threshold(0, 1e-6))
    full_3 = full_layer(full_2, 6)

    predict = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    #train_step = tf.train.RMSPropOptimizer(lr, decay, momentum).minimize(predict)
    train_step = tf.train.AdamOptimizer(lr).minimize(predict)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # TENSORBOARD
    tf.summary.scalar('loss', predict)
    tf.summary.scalar('accuracy', accuracy)

    merged_sum = tf.summary.merge_all()

    def test(sess):
        X = dataset.test.images.reshape(10, NUM_TEST_SAMPLES, 28, 28, 4)
        Y = dataset.test.labels.reshape(10, NUM_TEST_SAMPLES, 6)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        return acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sum_writer = tf.summary.FileWriter('logs/' + 'v4_sat6')
        sum_writer.add_graph(sess.graph)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            #batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: 0.5
                               })
            sum_writer.add_summary(summ, i)

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb
                     })

            if i % ONE_EPOCH == 0:
                print("\n*****************EPOCH: %d" % (i / ONE_EPOCH))
            if i % TEST_INTERVAL == 0:
                acc = test(sess)
                loss = sess.run(predict,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: dropoutProb
                                })
                print("EPOCH:%d" % (i / ONE_EPOCH) + " Step:" + str(i) +
                      "|| Minibatch Loss= " + "{:.4f}".format(loss) +
                      " Accuracy: {:.4}%".format(acc * 100))

        test(sess)
        sum_writer.close()
Example #20
0
def cnn_model_trainer():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4], name='x')
    y_ = tf.placeholder(tf.float32, shape=[None, 4], name='y_')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    phase_train = tf.placeholder(tf.bool, name='phase_train')

    conv1 = conv_layer_no_relu(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_bn = batch_norm(conv1, 16, phase_train)
    conv1_rl = tf.nn.relu(conv1_bn)

    conv2 = conv_layer_no_relu(conv1_rl, shape=[3, 3, 16, 32], pad='SAME')
    conv2_bn = batch_norm(conv2, 32, phase_train)
    conv2_rl = tf.nn.relu(conv2_bn)
    conv2_pool = avg_pool_2x2(conv2_rl, 2, 2)

    conv3 = conv_layer_no_relu(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    conv3_bn = batch_norm(conv3, 64, phase_train)
    conv3_rl = tf.nn.relu(conv3_bn)
    conv3_pool = avg_pool_2x2(conv3_rl, 2, 2)

    conv4 = conv_layer_no_relu(conv3_pool, shape=[3, 3, 64, 96], pad='SAME')
    conv4_bn = batch_norm(conv4, 96, phase_train)
    conv4_rl = tf.nn.relu(conv4_bn)

    conv5 = conv_layer_no_relu(conv4_rl, shape=[3, 3, 96, 64], pad='SAME')
    conv5_bn = batch_norm(conv5, 64, phase_train)
    conv5_rl = tf.nn.relu(conv5_bn)
    conv5_pool = avg_pool_2x2(conv5_rl, 2, 2)

    _flat = tf.reshape(conv5_pool, [-1, 3 * 3 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)

    full_2 = tf.nn.relu(full_layer(_drop2, 256))

    full_3 = full_layer(full_2, 4)

    # network output as softmax tensor for prediction for presentation
    pred = tf.nn.softmax(logits=full_3, name='pred')

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    # train_step = tf.train.RMSPropOptimizer(lr, decay, momentum).minimize(cross_entropy)
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                              name='accuracy')

    tf.summary.scalar('loss', cross_entropy)
    tf.summary.scalar('accuracy', accuracy)

    # Setting up for the visualization of the data in Tensorboard
    embedding_size = 256  # size of second to last fc layer
    embedding_input = full_2  # FC2 as input
    # Variable containing the points in visualization
    embedding = tf.Variable(tf.zeros([10000, embedding_size]),
                            name="test_embedding")
    assignment = embedding.assign(
        embedding_input)  # Will be passed in the test session

    merged_sum = tf.summary.merge_all()

    def test(test_sess, assign):
        x_ = dataset.test.images.reshape(10, 10000, 28, 28, 4)
        y = dataset.test.labels.reshape(10, 10000, 4)

        test_acc = np.mean([
            test_sess.run(accuracy,
                          feed_dict={
                              x: x_[im],
                              y_: y[im],
                              keep_prob: 1.0,
                              phase_train: False
                          }) for im in range(10)
        ])

        # Pass through the last 10,000 of the test set for visualization
        test_sess.run([assign],
                      feed_dict={
                          x: x_[9],
                          y_: y[9],
                          keep_prob: 1.0,
                          phase_train: False
                      })
        return test_acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # for tensorboard
        sum_writer = tf.summary.FileWriter(os.path.join(log_dir, log_name))
        sum_writer.add_graph(sess.graph)

        # Create a Saver object
        # max_to_keep: keep how many models to keep. Delete old ones.
        saver = tf.train.Saver(max_to_keep=MODELS_TO_KEEP)

        # setting up Projector
        config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
        embedding_config = config.embeddings.add()
        embedding_config.tensor_name = embedding.name
        embedding_config.metadata_path = LABELS  # labels

        # Specify the width and height of a single thumbnail.
        embedding_config.sprite.image_path = SPRITES
        embedding_config.sprite.single_image_dim.extend([28, 28])
        tf.contrib.tensorboard.plugins.projector.visualize_embeddings(
            sum_writer, config)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            # batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb,
                         phase_train: True
                     })

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: dropoutProb,
                                   phase_train: True
                               })
            sum_writer.add_summary(summ, i)

            if i % ONE_EPOCH == 0:
                ep_print = "\n*****************EPOCH: %d" % (
                    (i / ONE_EPOCH) + 1)
                write_to_file.write(ep_print)
                print(ep_print)
            if i % TEST_INTERVAL == 0:
                acc = test(sess, assignment)
                loss = sess.run(cross_entropy,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: 1.0,
                                    phase_train: False
                                })
                ep_test_print = "\nEPOCH:%d" % ((i/ONE_EPOCH) + 1) + " Step:" + str(i) + \
                                "|| Minibatch Loss= " + "{:.4f}".format(loss) + \
                                " Accuracy: {:.4}%".format(acc * 100)
                write_to_file.write(ep_test_print)
                print(ep_test_print)
                # Create a checkpoint in every iteration
                saver.save(sess,
                           os.path.join(model_dir, model_name),
                           global_step=i)

        test(sess, assignment)
        sum_writer.close()