示例#1
0
def run_simple_net():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4])
    y_ = tf.placeholder(tf.float32, shape=[None, 6])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_pool = avg_pool_2x2(conv1, 2, 2)  #28x28x4->14x14x16

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 16, 32], pad='SAME')
    conv2_pool = avg_pool_2x2(conv2, 2, 2)  #14x14x16->7x7x32

    conv3 = conv_layer(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    # conv3_pool = max_pool_2x2(conv3) # 7x7x32 ->7x7x64

    conv4 = conv_layer(conv3, shape=[3, 3, 64, 96], pad='SAME')
    # conv4_pool = max_pool_2x2(conv4) # 7x7x64 -> 7x7x96

    conv5 = conv_layer(conv4, shape=[3, 3, 96, 64], pad='SAME')
    conv5_pool = avg_pool_2x2(conv5, 2, 2)  # 7x7x96 ->7x7x64

    _flat = tf.reshape(conv5_pool, [-1, 3 * 3 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    # full_1 = tf.nn.relu(full_layer(_drop1, 200))
    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    # -- until here
    # classifier:add(nn.Threshold(0, 1e-6))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)
    full_2 = tf.nn.relu(full_layer(_drop2, 256))
    # classifier:add(nn.Threshold(0, 1e-6))
    full_3 = full_layer(full_2, 6)

    predict = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    #train_step = tf.train.RMSPropOptimizer(lr, decay, momentum).minimize(predict)
    train_step = tf.train.AdamOptimizer(lr).minimize(predict)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # TENSORBOARD
    tf.summary.scalar('loss', predict)
    tf.summary.scalar('accuracy', accuracy)

    merged_sum = tf.summary.merge_all()

    def test(sess):
        X = dataset.test.images.reshape(10, NUM_TEST_SAMPLES, 28, 28, 4)
        Y = dataset.test.labels.reshape(10, NUM_TEST_SAMPLES, 6)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        return acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sum_writer = tf.summary.FileWriter('logs/' + 'v4_sat6')
        sum_writer.add_graph(sess.graph)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            #batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: 0.5
                               })
            sum_writer.add_summary(summ, i)

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb
                     })

            if i % ONE_EPOCH == 0:
                print("\n*****************EPOCH: %d" % (i / ONE_EPOCH))
            if i % TEST_INTERVAL == 0:
                acc = test(sess)
                loss = sess.run(predict,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: dropoutProb
                                })
                print("EPOCH:%d" % (i / ONE_EPOCH) + " Step:" + str(i) +
                      "|| Minibatch Loss= " + "{:.4f}".format(loss) +
                      " Accuracy: {:.4}%".format(acc * 100))

        test(sess)
        sum_writer.close()
示例#2
0
def cnn_model_trainer():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4], name='x')
    y_ = tf.placeholder(tf.float32, shape=[None, 4], name='y_')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    phase_train = tf.placeholder(tf.bool, name='phase_train')

    conv1 = conv_layer_no_relu(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_bn = batch_norm(conv1, 16, phase_train)
    conv1_rl = tf.nn.relu(conv1_bn)

    conv2 = conv_layer_no_relu(conv1_rl, shape=[3, 3, 16, 32], pad='SAME')
    conv2_bn = batch_norm(conv2, 32, phase_train)
    conv2_rl = tf.nn.relu(conv2_bn)
    conv2_pool = avg_pool_2x2(conv2_rl, 2, 2)

    conv3 = conv_layer_no_relu(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    conv3_bn = batch_norm(conv3, 64, phase_train)
    conv3_rl = tf.nn.relu(conv3_bn)
    conv3_pool = avg_pool_2x2(conv3_rl, 2, 2)

    conv4 = conv_layer_no_relu(conv3_pool, shape=[3, 3, 64, 96], pad='SAME')
    conv4_bn = batch_norm(conv4, 96, phase_train)
    conv4_rl = tf.nn.relu(conv4_bn)

    conv5 = conv_layer_no_relu(conv4_rl, shape=[3, 3, 96, 64], pad='SAME')
    conv5_bn = batch_norm(conv5, 64, phase_train)
    conv5_rl = tf.nn.relu(conv5_bn)
    conv5_pool = avg_pool_2x2(conv5_rl, 2, 2)

    _flat = tf.reshape(conv5_pool, [-1, 3 * 3 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)

    full_2 = tf.nn.relu(full_layer(_drop2, 256))

    full_3 = full_layer(full_2, 4)

    # network output as softmax tensor for prediction for presentation
    pred = tf.nn.softmax(logits=full_3, name='pred')

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    # train_step = tf.train.RMSPropOptimizer(lr, decay, momentum).minimize(cross_entropy)
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                              name='accuracy')

    tf.summary.scalar('loss', cross_entropy)
    tf.summary.scalar('accuracy', accuracy)

    # Setting up for the visualization of the data in Tensorboard
    embedding_size = 256  # size of second to last fc layer
    embedding_input = full_2  # FC2 as input
    # Variable containing the points in visualization
    embedding = tf.Variable(tf.zeros([10000, embedding_size]),
                            name="test_embedding")
    assignment = embedding.assign(
        embedding_input)  # Will be passed in the test session

    merged_sum = tf.summary.merge_all()

    def test(test_sess, assign):
        x_ = dataset.test.images.reshape(10, 10000, 28, 28, 4)
        y = dataset.test.labels.reshape(10, 10000, 4)

        test_acc = np.mean([
            test_sess.run(accuracy,
                          feed_dict={
                              x: x_[im],
                              y_: y[im],
                              keep_prob: 1.0,
                              phase_train: False
                          }) for im in range(10)
        ])

        # Pass through the last 10,000 of the test set for visualization
        test_sess.run([assign],
                      feed_dict={
                          x: x_[9],
                          y_: y[9],
                          keep_prob: 1.0,
                          phase_train: False
                      })
        return test_acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # for tensorboard
        sum_writer = tf.summary.FileWriter(os.path.join(log_dir, log_name))
        sum_writer.add_graph(sess.graph)

        # Create a Saver object
        # max_to_keep: keep how many models to keep. Delete old ones.
        saver = tf.train.Saver(max_to_keep=MODELS_TO_KEEP)

        # setting up Projector
        config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
        embedding_config = config.embeddings.add()
        embedding_config.tensor_name = embedding.name
        embedding_config.metadata_path = LABELS  # labels

        # Specify the width and height of a single thumbnail.
        embedding_config.sprite.image_path = SPRITES
        embedding_config.sprite.single_image_dim.extend([28, 28])
        tf.contrib.tensorboard.plugins.projector.visualize_embeddings(
            sum_writer, config)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            # batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb,
                         phase_train: True
                     })

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: dropoutProb,
                                   phase_train: True
                               })
            sum_writer.add_summary(summ, i)

            if i % ONE_EPOCH == 0:
                ep_print = "\n*****************EPOCH: %d" % (
                    (i / ONE_EPOCH) + 1)
                write_to_file.write(ep_print)
                print(ep_print)
            if i % TEST_INTERVAL == 0:
                acc = test(sess, assignment)
                loss = sess.run(cross_entropy,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: 1.0,
                                    phase_train: False
                                })
                ep_test_print = "\nEPOCH:%d" % ((i/ONE_EPOCH) + 1) + " Step:" + str(i) + \
                                "|| Minibatch Loss= " + "{:.4f}".format(loss) + \
                                " Accuracy: {:.4}%".format(acc * 100)
                write_to_file.write(ep_test_print)
                print(ep_test_print)
                # Create a checkpoint in every iteration
                saver.save(sess,
                           os.path.join(model_dir, model_name),
                           global_step=i)

        test(sess, assignment)
        sum_writer.close()