コード例 #1
0
def one_hidden_layers(X,
                      n_hidden1=300,
                      n_outputs=10,
                      activation_func=tf.nn.sigmoid):
    print("Network with one hidden layer")
    with tf.name_scope("dnn"):
        hidden1 = neuron_layer(X,
                               n_hidden1,
                               name="hidden1",
                               activation=activation_func)
        logits = neuron_layer(hidden1, n_outputs, name="outputs")
    return logits
コード例 #2
0
def two_hidden_layers(X,
                      n_hidden1=1024,
                      n_hidden2=100,
                      n_outputs=10,
                      activation_func=tf.nn.sigmoid):

    nf1 = 32
    nf2 = 64
    input_layer = tf.reshape(X, [-1, 28, 28, 1])
    print("Network with two hidden layers")
    with tf.name_scope("cnn"):
        conv1 = tf.layers.conv2d(inputs=input_layer,
                                 filters=nf1,
                                 kernel_size=[5, 5],
                                 padding="same",
                                 activation=tf.nn.relu)

        # Pooling Layer #1
        pool1 = tf.layers.max_pooling2d(inputs=conv1,
                                        pool_size=[2, 2],
                                        strides=2)
        conv2 = tf.layers.conv2d(inputs=pool1,
                                 filters=nf2,
                                 kernel_size=[5, 5],
                                 padding="same",
                                 activation=tf.nn.relu)
        pool2 = tf.layers.max_pooling2d(inputs=conv2,
                                        pool_size=[2, 2],
                                        strides=2)
        pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * nf2])

    print("Network with two hidden layers")
    with tf.name_scope("dnn"):
        hidden1 = neuron_layer(pool2_flat,
                               n_hidden1,
                               name="hidden1",
                               activation=activation_func)
        #hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2", activation=activation_func)
        logits = neuron_layer(hidden1, n_outputs, name="outputs")

    return logits
def two_hlayers(learning_rate, batch_size, activation_fnc, n_hidden1,
                n_hidden2, n_epochs, trainoptimizer):
    n_inputs = 54  # no. of variable
    n_outputs = 7  # no. of class
    dropout_rate = 0.5
    lambdaReg = 0.0

    logdir = log_dir("forestbook_dnn")
    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

    reset_graph()
    X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
    y = tf.placeholder(tf.int32, shape=(None), name="y")

    training = tf.placeholder_with_default(False, shape=(), name='training')
    X_drop = tf.layers.dropout(X, dropout_rate, training=training)

    with tf.name_scope("dnn"):
        with tf.variable_scope("layer1"):
            hidden1 = neuron_layer(X,
                                   n_hidden1,
                                   "hidden1",
                                   lambdaReg,
                                   activation=activation_fnc)
            hidden1_drop = tf.layers.dropout(hidden1,
                                             dropout_rate,
                                             training=training)
        with tf.variable_scope("layer2"):
            hidden2 = neuron_layer(hidden1_drop,
                                   n_hidden2,
                                   "hidden2",
                                   lambdaReg,
                                   activation=activation_fnc)
            hidden2_drop = tf.layers.dropout(hidden2,
                                             dropout_rate,
                                             training=training)
        with tf.variable_scope("layer3"):
            logits = neuron_layer(hidden2_drop, n_outputs, "outputs",
                                  lambdaReg)

    with tf.name_scope("loss"):
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits)
        loss = tf.reduce_mean(xentropy, name="loss")
        loss_summary = tf.summary.scalar('log_loss', loss)

    with tf.name_scope("train"):
        optimizer = trainoptimizer(learning_rate)
        training_op = optimizer.minimize(loss)

    with tf.name_scope("eval"):
        correct = tf.nn.in_top_k(logits, y, 1)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
        accuracy_summary = tf.summary.scalar('accuracy', accuracy)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    checkpoint_path = "/tmp/my_deep_forest_model.ckpt"
    checkpoint_epoch_path = checkpoint_path + ".epoch"
    final_model_path = 'tmp/forest-{comb}-{LR}-{epoch}-{batch}.ckpt'.format(
        comb="3",
        LR=str(learning_rate),
        epoch=str(n_epochs),
        batch=str(batch_size))

    best_loss = np.infty
    epochs_without_progress = 0
    max_epochs_without_progress = 50

    with tf.Session() as sess:
        if os.path.isfile(checkpoint_epoch_path):
            # if the checkpoint file exists, restore the model and load the epoch number
            with open(checkpoint_epoch_path, "rb") as f:
                start_epoch = int(f.read())
            print("Training was interrupted. Continuing at epoch", start_epoch)
            saver.restore(sess, checkpoint_path)
        else:
            start_epoch = 0
            sess.run(init)

        for epoch in range(start_epoch, n_epochs):
            for X_batch, y_batch in shuffle_batch(X_train, y_train,
                                                  batch_size):
                sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
            accuracy_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
            accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run(
                [accuracy, loss, accuracy_summary, loss_summary],
                feed_dict={
                    X: X_valid,
                    y: y_valid
                })
            file_writer.add_summary(accuracy_summary_str, epoch)
            file_writer.add_summary(loss_summary_str, epoch)
            if epoch % 10 == 0:
                print(
                    "Epoch:", epoch,
                    "\tBatch accuracy: {:.3f}%".format(accuracy_batch * 100),
                    "\tLoss: {:.5f}".format(loss_val),
                    "\tValidation accuracy: {:.3f}%".format(
                        accuracy_val * 100), "\tLoss: {:.5f}".format(loss_val))
                saver.save(sess, checkpoint_path)
                with open(checkpoint_epoch_path, "wb") as f:
                    f.write(b"%d" % (epoch + 1))
                if loss_val < best_loss:
                    saver.save(sess, final_model_path)
                    best_loss = loss_val

                else:
                    epochs_without_progress += 5
                    if epochs_without_progress > max_epochs_without_progress:
                        print("Early stopping")
                        break

    os.remove(checkpoint_epoch_path)

    with tf.Session() as sess:
        saver.restore(sess, final_model_path)
        accuracy_test = accuracy.eval(feed_dict={X: X_test, y: y_test})

    print("\tTest accuracy: {:.3f}%".format(accuracy_test * 100))