Example #1
0
    run_id = log.dl_run_start(dl_run, dl_network, dl_model_file_path, dl_data, hyper_dict)

    print()
    for i in range(EPOCHS):
        X_train, y_train = shuffle(X_train, y_train)
        for offset in range(0, num_examples, BATCH_SIZE):
            end = offset + BATCH_SIZE
            batch_x, batch_y = X_train[offset:end], y_train[offset:end]
            sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})

        validation_accuracy = evaluate(X_validation, y_validation)
        print("EPOCH {} ...".format(i+1))
        print("Validation Accuracy = {:.3f}".format(validation_accuracy))
        print()

    saver.save(sess, 'lenet')
    print("Model saved")
    log.dl_run_end(run_id, validation_accuracy)

# Evaluate the model with test data
dl_data = ('TF_MNIST', 'Test')

with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint('.'))

    run_id = log.dl_run_start(dl_run, dl_network, dl_model_file_path, dl_data, hyper_dict)

    test_accuracy = evaluate(X_test, y_test)
    print("Test Accuracy = {:.3f}".format(test_accuracy))

    log.dl_run_end(run_id, test_accuracy)
Example #2
0
def log_run_end(id_run, accuracy, training_loss):
    """
    Log the end info of a neural network run
    """
    log.dl_run_end(id_run, accuracy, training_loss)