Exemple #1
0
def run_second_net():

    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    rate = tf.placeholder(tf.float32)

    C1, C2, C3, = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, rate=rate)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, rate=rate)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, rate=rate)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, rate=rate)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_conv,
                                                                              labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy =tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], rate: 0.0})
                       for i in range(10)])
        print(f'Accuracy {acc * 100}')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], rate: 0.5})

            if i % 50 == 0:
                test(sess)

        test(sess)
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_flat, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0:
                test(sess)

        test(sess)
Exemple #3
0
def run_simple_net(bs, lr):
    # load the data
    cifar = CifarDataManager()
    # init variables
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)
    # 2 conv and 1 max pooling
    conv1 = conv_layer(x, shape=[3, 3, 3, 64])
    conv2 = conv_layer(conv1, shape=[3, 3, 64, 64])
    conv2_pool = max_pool_2x2(conv2)
    # 2 conv and 1 max pooling
    conv3 = conv_layer(conv2_pool, shape=[3, 3, 64, 128])
    conv4 = conv_layer(conv3, shape=[3, 3, 128, 128])
    conv4_pool = max_pool_2x2(conv4)
    # flatten and drop to prevent overfitting
    conv4_flat = tf.reshape(conv4_pool, [-1, 8 * 8 * 128])
    conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)
    # fully connected nn using relu as activation function
    full_0 = tf.nn.relu(full_layer(conv4_drop, 512))
    full0_drop = tf.nn.dropout(full_0, keep_prob=keep_prob)
    full_1 = tf.nn.relu(full_layer(full0_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)
    # loss function
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    # original: train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
    # for the table
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))
        return acc*100

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(bs)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            '''if i % 500 == 0:
                # print(i//500)
                test(sess)'''
        result = test(sess)
    return result
def get_y_predict(x, keep_prob):
    conv1 = conv_layer(x, shape=[3, 3, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 32, 64])
    conv2_pool = max_pool_2x2(conv2)
    conv2_flat = tf.reshape(conv2_pool, [-1, 8 * 8 * 64])

    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_predict = full_layer(full1_drop, 10)
    return y_predict
Exemple #5
0
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            if i % 500 == 0:
                test(sess)

        test(sess)
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3,
                                ksize=[1, 8, 8, 1],
                                strides=[1, 8, 8, 1],
                                padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
Exemple #9
0
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
Exemple #10
0
def run_simple_net():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4])
    y_ = tf.placeholder(tf.float32, shape=[None, 6])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_pool = max_pool_2x2(conv1, 2, 2)  #28x28x4->14x14x16

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 16, 32], pad='SAME')
    conv2_pool = max_pool_2x2(conv2, 2, 2)  #14x14x16->7x7x32

    conv3 = conv_layer(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    # conv3_pool = max_pool_2x2(conv3) # 7x7x32 ->7x7x64

    conv4 = conv_layer(conv3, shape=[3, 3, 64, 96], pad='SAME')
    # conv4_pool = max_pool_2x2(conv4) # 7x7x64 -> 7x7x96

    conv5 = conv_layer(conv4, shape=[3, 3, 96, 64], pad='SAME')
    #conv5_pool = max_pool_2x2(conv5, 2, 2) # 7x7x96 ->7x7x64

    _flat = tf.reshape(conv5, [-1, 7 * 7 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    # full_1 = tf.nn.relu(full_layer(_drop1, 200))
    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    # -- until here
    # classifier:add(nn.Threshold(0, 1e-6))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)
    full_2 = tf.nn.relu(full_layer(_drop2, 256))
    # classifier:add(nn.Threshold(0, 1e-6))
    full_3 = full_layer(full_2, 6)

    predict = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    train_step = tf.train.RMSPropOptimizer(lr, decay,
                                           momentum).minimize(predict)
    # train_step = tf.train.AdamOptimizer(lr)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # TENSORBOARD
    tf.summary.scalar('loss', predict)
    tf.summary.scalar('accuracy', accuracy)

    merged_sum = tf.summary.merge_all()

    def test(sess):
        X = dataset.test.images.reshape(10, NUM_TEST_SAMPLES, 28, 28, 4)
        Y = dataset.test.labels.reshape(10, NUM_TEST_SAMPLES, 6)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        return acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sum_writer = tf.summary.FileWriter('logs/' + 'v2_sat6')
        sum_writer.add_graph(sess.graph)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            #batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: 0.5
                               })
            sum_writer.add_summary(summ, i)

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb
                     })

            if i % ONE_EPOCH == 0:
                print("\n*****************EPOCH: %d" % (i / ONE_EPOCH))
            if i % TEST_INTERVAL == 0:
                acc = test(sess)
                loss = sess.run(predict,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: dropoutProb
                                })
                print("EPOCH:%d" % (i / ONE_EPOCH) + " Step:" + str(i) +
                      "|| Minibatch Loss= " + "{:.4f}".format(loss) +
                      " Accuracy: {:.4}%".format(acc * 100))

        test(sess)
        sum_writer.close()
Exemple #11
0
def build_second_net():
    cifar = CifarDataManager()
    print('in build_second_net 111')

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    print('in build_second_net 222')

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    print('in build_second_net 333')

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3,
                                ksize=[1, 8, 8, 1],
                                strides=[1, 8, 8, 1],
                                padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa

    # Plug this into the test procedure as above to continue...
    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        #print("Accuracy: {:.4}%".format(acc * 100))
        return acc

    print('get into run_simple_net 333')
    iter_list, test_accuracy_list, train_accuracy_list = [], [], []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            #print('i is ', i)
            #batch = cifar.train.next_batch(BATCH_SIZE)
            batch = cifar.train.random_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0 or i == STEPS - 1:
                iter_list.append(i)
                train_acc = sess.run(accuracy,
                                     feed_dict={
                                         x: batch[0],
                                         y_: batch[1],
                                         keep_prob: 0.5
                                     })
                train_accuracy_list.append(train_acc)
                test_acc = test(sess)
                test_accuracy_list.append(test_acc)
                print(
                    'step i:{0:7} train_acc: {1:.6f} test_acc: {2:.6f}'.format(
                        i, train_acc, test_acc))

        test(sess)
        plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)
Exemple #12
0
def run_simple_net():
    print('get into run_simple_net')
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    print('get into run_simple_net 111')

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    # conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 100])
    conv3_pool = max_pool_2x2(conv3)

    conv4 = conv_layer(conv3_pool, shape=[5, 5, 100, 256])
    conv4_pool = max_pool_2x2(conv4)
    print('conv4_pool.get_shape() is ', conv4_pool.get_shape())
    #conv4_flat = tf.reshape(conv4_pool, [-1, 4 * 4 * 256])
    conv4_flat = tf.reshape(conv4_pool, [-1, 2 * 2 * 256])
    conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)

    print('get into run_simple_net 222')

    full_1 = tf.nn.relu(full_layer(conv4_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        #print("test accuracy: {:.4}%".format(acc * 100))
        return acc

    print('get into run_simple_net 333')
    iter_list, test_accuracy_list, train_accuracy_list = [], [], []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            #batch = cifar.train.next_batch(BATCH_SIZE)
            batch = cifar.train.random_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0:
                iter_list.append(i)
                train_acc = np.mean([
                    sess.run(accuracy,
                             feed_dict={
                                 x: batch[0],
                                 y_: batch[1],
                                 keep_prob: 0.5
                             }) for i in range(10)
                ])
                train_accuracy_list.append(train_acc)
                test_acc = test(sess)
                test_accuracy_list.append(test_acc)

                print(
                    'step i:{0:6} train_acc: {1:.6f} test_acc: {2:.6f}'.format(
                        i, train_acc, test_acc))

        test(sess)
        plot_accuracy(iter_list, test_accuracy_list, train_accuracy_list)
Exemple #13
0
def buildModel():
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    x_image = tf.reshape(x, [-1, 28, 28, 1])
    conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

    keep_prob = tf.placeholder(tf.float32)
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    y_predict = y_conv

    # y_predict = tf.Print(y_conv, [y_conv, tf.shape(y_conv)], "the result is: ", summarize=50)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_predict, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('cross_entropy', cross_entropy)
    tf.summary.scalar('accuracy', accuracy)
    # y_predict = tf.Print(y_conv, [y_conv], "this is result: ", summarize=10)
    start = tf.Variable(0, name="start", dtype=tf.int32)
    saver = tf.train.Saver()

    merged = tf.summary.merge_all()

    # draw graph on tensorboard
    # with tf.Session() as sess:
    #     tf.summary.FileWriter("./draft", sess.graph);

    # return bool type tensor
    # tf.argmax: return index of maximun value in a tenso

    batchSize = 100
    # chkp.print_tensors_in_checkpoint_file("./checkpoint/model.ckpt", tensor_name='start', all_tensors=False)
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('./draft', sess.graph)
        if os.path.isdir('./checkpoint'):
            saver.restore(sess, "./checkpoint/model.ckpt")
        else:
            sess.run(tf.global_variables_initializer())
        for i in range(start.eval() + 1, int(60000 / batchSize)):
            x_train, y_train = next_batch(batchSize, i + 1)
            update_start = start.assign(i)
            batch = sess.run([x_train, y_train])
            if (i + 1) % 10 == 0:
                sess.run(update_start)
                summary, train_accuracy = sess.run([merged, accuracy],
                                                   feed_dict={
                                                       x: batch[0],
                                                       y_: batch[1],
                                                       keep_prob: 1.0
                                                   })
                print("step {}, training accuracy {}".format(
                    int((i + 1) / 10), train_accuracy))
                train_writer.add_summary(summary, i)
                saver.save(sess, "./checkpoint/model.ckpt")
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })
Exemple #14
0
def run_simple_net():
    # 图像输入数据
    cifar = CifarDataManager()

    # 定义输入图像占位符
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    # 定义正确的分类标签占位符
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    # 第一层卷积操作
    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    # 将结果池化
    conv1_pool = max_pool_2x2(conv1)

    # 第二层卷积操作
    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    # 将结果池化
    conv2_pool = max_pool_2x2(conv2)

    # 第三层卷积操作
    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    # 将结果池化
    conv3_pool = max_pool_2x2(conv3)

    # 将图像数据平整为一维向量形式
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    # 进行随机丢弃操作
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    # 进行全连接操作
    full_1 = tf.nn.relu(full_layer(conv3_drop, 512))
    # 进行随机丢弃操作
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    # 得到训练结果
    y_conv = full_layer(full1_drop, 10)

    # 定义损失函数
    # 使用交叉熵作为损失函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    # 使用梯度下降法定义训练过程
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    # 定义评估步骤,用来测试模型的准确率
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            # 开始训练
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            if i % 500 == 0:
                test(sess)

        test(sess)
Exemple #15
0
from layers import conv_layer, max_pool_2x2, full_layer

DATA_DIR = '/tmp/data'
MINIBATCH_SIZE = 50
STEPS = 5000


mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

y_conv = full_layer(full1_drop, 10)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
Exemple #16
0
# initialise DataGetter object
test_img = DataGetter(test_path, Gray)
train_img = DataGetter(train_path, Gray)
val_img = DataGetter(val_path, Gray)

########################################################################################################################
# tensorflow network architecture
########################################################################################################################

# placeholder for images and labels
x = tf.placeholder(tf.float32, shape=[None, PIXEL, PIXEL, COLOR])
y_ = tf.placeholder(tf.float32, shape=[None, len(position_dict)])

# 1 convolution layer with max pooling
conv1 = conv_layer(x, shape=[CONV, CONV, COLOR, CONV1_DEPTH])
conv1_pool = max_pool_2x2(conv1)

# 2 convolution layer with max pooling
conv2 = conv_layer(conv1_pool,
                   shape=[CONV, CONV, CONV1_DEPTH, CONV1_DEPTH * 2])
conv2_pool = max_pool_2x2(conv2)

new_size = int(PIXEL / 4 * PIXEL / 4 * CONV1_DEPTH * 2)

if PIXEL % 2 is not 0:
    logger.warning('potential issue with the pixel size')

# fully connected flatt layer
conv3_flat = tf.reshape(conv2_pool, [-1, new_size])
full_1 = tf.nn.relu(full_layer(conv3_flat, 1024))
def inference(x):
    #conv layer 1
    with tf.name_scope('conv1') as scope:
        w = layers.xavier_weights_variable([3, 3, 3, 128])
        b = layers.bias_variable([128])
        conv1 = tf.nn.relu(layers.conv2d(x, w) + b, name="activations")

    #conv layer 2
    with tf.name_scope('conv2') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv2 = tf.nn.relu(layers.conv2d(conv1, w) + b, name="activations")

    #maxpool1, images now 32x32
    with tf.name_scope('pool1') as scope:
        pool1 = layers.max_pool_2x2(conv2)

    #conv layer 3
    with tf.name_scope('conv3') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv3 = layers.tf.nn.relu(layers.conv2d(pool1, w) + b,
                                  name="activations")

    #conv layer 4
    with tf.name_scope('conv4') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv4 = tf.nn.relu(layers.conv2d(conv3, w) + b, name="activations")

    #maxpool2, images now 16x16
    with tf.name_scope('pool2') as scope:
        pool2 = layers.max_pool_2x2(conv4)

    #conv layer 5
    with tf.name_scope('conv5') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv5 = tf.nn.relu(layers.conv2d(pool2, w) + b, name="activations")

    #maxpool3, imags now 8x8
    with tf.name_scope('pool3') as scope:
        pool3 = layers.max_pool_2x2(conv5)

    #fully connected layer 1
    with tf.name_scope('fully_connected1') as scope:
        w = layers.xavier_weights_variable([8 * 8 * 128, 400])
        b = layers.bias_variable([400])
        pool3_flat = tf.reshape(pool3, [-1, 8 * 8 * 128])
        fully_connected1 = tf.nn.relu(tf.matmul(pool3_flat, w) + b,
                                      name="activations")

    #fully connected layer 2
    with tf.name_scope('fully_connected2') as scope:
        w = layers.xavier_weights_variable([400, 400])
        b = layers.bias_variable([400])
        fully_connected2 = tf.nn.relu(tf.matmul(fully_connected1, w) + b,
                                      name="activations")

    #fully connected layer 3
    with tf.name_scope('fully_connected3') as scope:
        w = layers.xavier_weights_variable([400, 200])
        b = layers.bias_variable([200])
        y_pred = tf.matmul(fully_connected2, w) + b
    return y_pred