def runner_with_results_output(y_bundle):
    test_losses = []
    test_accuracies = []

    for i in range(batch_size):
        test_losses.append(3.5 - 1.6 * sigmoid(i / 10))
        test_accuracies.append(0.5 + 0.4 * sigmoid(i / 10))

    y = YaxisBundle(test_losses, "loss", "b")
    y_bundle.append(y)

    y = YaxisBundle(test_accuracies, "accuracy", "g")
    y_bundle.append(y)
    return y_bundle
Ejemplo n.º 2
0
def run_with_config(config):  #, X_train, y_train, X_test, y_test):
    tf.reset_default_graph()  # To enable to run multiple things in a loop
    config.print_config()

    if config.matplot_lib_enabled:
        # To keep track of training's performance
        test_losses = []
        test_accuracies = []
        indep_test_axis = []

    config.W = {
        'hidden':
        tf.Variable(tf.random_normal([config.n_inputs, config.n_hidden])),
        'output':
        tf.Variable(tf.random_normal([config.n_hidden, config.n_classes]))
    }
    config.biases = {
        'hidden': tf.Variable(tf.random_normal([config.n_hidden], mean=1.0)),
        'output': tf.Variable(tf.random_normal([config.n_classes]))
    }
    #-----------------------------------
    # Define parameters for model
    #-----------------------------------

    print(
        "Some useful info to get an insight on dataset's shape and normalisation:"
    )
    print(
        "features shape, labels shape, each features mean, each features standard deviation"
    )
    print(X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))
    print("the dataset is therefore properly normalised, as expected.")

    # ------------------------------------------------------
    # step3: Let's get serious and build the neural network
    # ------------------------------------------------------
    X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
    Y = tf.placeholder(tf.float32, [None, config.n_classes])

    pred_Y = LSTM_Network(X, config)

    print "Unregularised variables:"
    for unreg in [
            tf_var.name for tf_var in tf.trainable_variables()
            if ("noreg" in tf_var.name or "Bias" in tf_var.name)
    ]:
        print unreg

    # Loss,optimizer,evaluation
    l2 = config.lambda_loss_amount * \
         sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
    # Softmax loss and L2
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred_Y, labels=Y)) + l2
    optimizer = tf.train.AdamOptimizer(
        learning_rate=config.learning_rate).minimize(cost)

    correct_pred = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
    # ------------------------------------------------------
    # step3.5 : Tensorboard stuff here
    # ------------------------------------------------------
    if config.tensor_board_logging_enabled:
        tf.summary.scalar("loss", cost)
        tf.summary.scalar("accuracy", accuracy)
        merged_summary_op = tf.summary.merge_all()

    # --------------------------------------------
    # step4: Hooray, now train the neural network
    # --------------------------------------------
    # Note that log_device_placement can be turned ON but will cause console spam.
    sess = tf.InteractiveSession(config=tf.ConfigProto(
        log_device_placement=False))
    tf.global_variables_initializer().run()

    if config.tensor_board_logging_enabled:
        # op to write logs to Tensorboard
        summary_writer = tf.summary.FileWriter(config.logs_path,
                                               graph=tf.get_default_graph())

    best_accuracy = 0.0
    # Start training for each batch and loop epochs
    for i in range(config.training_epochs):
        for start, end in zip(
                range(0, config.train_count, config.batch_size),
                range(config.batch_size, config.train_count + 1,
                      config.batch_size)):
            if config.tensor_board_logging_enabled:
                _, summary = sess.run([optimizer, merged_summary_op],
                                      feed_dict={
                                          X: X_train[start:end],
                                          Y: y_train[start:end]
                                      })
            else:
                sess.run(optimizer,
                         feed_dict={
                             X: X_train[start:end],
                             Y: y_train[start:end]
                         })

        if config.tensor_board_logging_enabled:
            # Write logs at every iteration
            summary_writer.add_summary(summary, i)

        # Test completely at every epoch: calculate accuracy
        pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost],
                                                    feed_dict={
                                                        X: X_test,
                                                        Y: y_test
                                                    })

        if config.matplot_lib_enabled:
            indep_test_axis.append(i)
            test_losses.append(loss_out)
            test_accuracies.append(accuracy_out)

        print("traing iter: {},".format(i) + \
              " test accuracy : {},".format(accuracy_out) + \
              " loss : {}".format(loss_out))
        best_accuracy = max(best_accuracy, accuracy_out)

    print("")
    print("final test accuracy: {}".format(accuracy_out))
    print("best epoch's test accuracy: {}".format(best_accuracy))
    print("")

    if config.tensor_board_logging_enabled:
        print("Run the command line:\n")
        print(config.tensorboard_cmd)
        print("\nThen open http://0.0.0.0:6006/ into your web browser")

    print(config.model_desc_attched_string)

    if config.matplot_lib_enabled:

        #for i in range(config.batch_size):
        #   indep_test_axis.append(i)
        #indep_test_axis = [i for i in range(config.batch_size)]
        #indep_test_axis = np.array(indep_test_axis)

        #p = PlotUtil("title", indep_test_axis, "x_label", "y_label")
        y_bundle = []
        test_losses = np.array(test_losses)
        test_accuracies = np.array(test_accuracies)

        y = YaxisBundle(np.array(test_losses), "loss", "b")
        y_bundle.append(y)

        y = YaxisBundle(np.array(test_accuracies), "accuracy", "g")
        y_bundle.append(y)

        #p.show_plot(y_bundle)

        if config.matplot_lib_for_single_ybundle:
            if config.matplot_lib_for_accuracy:
                return y_bundle[1]
            else:
                return y_bundle[0]
        return y_bundle