コード例 #1
0
def run_networks():
    """Train networks using three different values for the learning rate,
    and store the cost curves in the file ``multiple_eta.json``, where
    they can later be used by ``make_plot``.

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    results = []
    for eta in LEARNING_RATES:
        print("\nTrain a network using eta = " + str(eta))
        net = network2.Network([784, 30, 10])
        results.append(
            net.SGD(training_data,
                    NUM_EPOCHS,
                    10,
                    eta,
                    lmbda=5.0,
                    evaluation_data=validation_data,
                    monitor_training_cost=True))
    f = open("multiple_eta.json", "w")
    json.dump(results, f)
    f.close()
コード例 #2
0
def load_data():
    train_data, valid_data, test_data = loader.load_data_wrapper(DATA_PATH)
    print('Number of training: {}'.format(len(train_data[0])))
    print('Number of validation: {}'.format(len(valid_data[0])))
    print('Number of testing: {}'.format(len(test_data[0])))
    #print(test_data[0][0] , test_data[1][0])
    return train_data, valid_data, test_data
コード例 #3
0
def run_network(filename, num_epochs, training_set_size=1000, lmbda=0.0):
    """Train the network for ``num_epochs`` on ``training_set_size``
    images, and store the results in ``filename``.  Those results can
    later be used by ``make_plots``.  Note that the results are stored
    to disk in large part because it's convenient not to have to
    ``run_network`` each time we want to make a plot (it's slow).

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    net.large_weight_initializer()
    test_cost, test_accuracy, training_cost, training_accuracy \
        = net.SGD(training_data[:training_set_size], num_epochs, 10, 0.5,
                  evaluation_data=test_data, lmbda = lmbda,
                  monitor_evaluation_cost=True,
                  monitor_evaluation_accuracy=True,
                  monitor_training_cost=True,
                  monitor_training_accuracy=True)
    f = open(filename, "w")
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy], f)
    f.close()
コード例 #4
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()
    network = Network([784, 15, 10])
    network.SGD(training_data=training_data,
                epochs=30,
                mini_batch_size=64,
                eta=0.5,
                test_data=test_data)
コード例 #5
0
def main():
	training_set, validation_set, test_set = mnist_loader.load_data_wrapper()

	training_set = get_modified_training_set2(training_set)
	print (len(training_set))
	net = src.network.Network([784, 30, 10])
	net.SGD(training_set, 10, 10, 3.0, test_data=test_set)

	pass
コード例 #6
0
def run_networks():
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    accuracies = []
    for size in SIZES:
        print
        "\n\nTraining network with data set size %s" % size
        net.large_weight_initializer()
        num_epochs = 1500000 / size
        net.SGD(training_data[:size], num_epochs, 10, 0.5, lmbda=size * 0.0001)
        accuracy = net.accuracy(validation_data) / 100.0
        print
        "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data.json", "w")
    json.dump(accuracies, f)
    f.close()
コード例 #7
0
def main():
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )

    architecture = [{
        'size': 784,
        'activation': 'sigmoid'
    }, {
        'size': 30,
        'activation': 'sigmoid'
    }, {
        'size': 10,
        'activation': 'sigmoid'
    }]

    net = Network(architecture, 'mse', seed=1)

    train_X = np.array([pair[0] for pair in training_data]).reshape(50000, 784)
    train_Y = np.array([pair[1] for pair in training_data]).reshape(50000, 10)
    test_X = np.array([pair[0] for pair in test_data]).reshape(10000, 784)
    test_y = np.array([pair[1] for pair in test_data]).reshape(10000)

    net.fit(train_X, train_Y, 30, 30, 80.0, test_X, test_y)
def main():
    # Load the data
    full_td, _, _ = mnist_loader.load_data_wrapper()
    td = full_td[:1000]  # Just use the first 1000 items of training data
    epochs = 500  # Number of epochs to train for

    print("\nTwo hidden layers:")
    net = network2.Network([784, 30, 30, 10])
    initial_norms(td, net)
    abbreviated_gradient = [
        ag[:6] for ag in get_average_gradient(net, td)[:-1]]
    print("Saving the averaged gradient for the top six neurons in each " + \
          "layer.\nWARNING: This will affect the look of the book, so be " + \
          "sure to check the\nrelevant material (early chapter 5).")
    f = open("initial_gradient.json", "w")
    json.dump(abbreviated_gradient, f)
    f.close()
    shutil.copy("initial_gradient.json", "../../js/initial_gradient.json")
    training(td, net, epochs, "norms_during_training_2_layers.json")
    plot_training(
        epochs, "norms_during_training_2_layers.json", 2)

    print("\nThree hidden layers:")
    net = network2.Network([784, 30, 30, 30, 10])
    initial_norms(td, net)
    training(td, net, epochs, "norms_during_training_3_layers.json")
    plot_training(
        epochs, "norms_during_training_3_layers.json", 3)

    print("\nFour hidden layers:")
    net = network2.Network([784, 30, 30, 30, 30, 10])
    initial_norms(td, net)
    training(td, net, epochs,
             "norms_during_training_4_layers.json")
    plot_training(
        epochs, "norms_during_training_4_layers.json", 4)
コード例 #9
0
from src import mnist_loader
from src import network

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
from __future__ import absolute_import
import src.network as network
import src.network_batch_backprop as network_batch_b
import src.mnist_loader as mnist_loader
import src.cnn_network as cnn_network
import src.graph_plot as graph
# import src.mnist_data
x_train, y_train, x_validation, y_validation, x_test, y_test = mnist_loader.load_data_wrapper(
)
# # Using Mini Batch for Training the Model
print("*" * 100)
print("Training Mini Batch BackPropagation")
print("*" * 100)
net = network.Network([784, 20, 40, 10])
net.SGD(x_train, y_train, 50, 10, 3.0, x_test, y_test)
# #
#Using Batch Propagation to train the network
print("*" * 100)
print("Training Batch BackPropagation")
print("*" * 100)
net = network_batch_b.Network_batch([784, 20, 40, 10])
net.SGD(x_train, y_train, 500, 3.0, x_test, y_test)
print("*" * 100)
#
#Using CNN Encoder with adding some noise to training data
print("*" * 100)
print("Training CNN")
print("*" * 100)
# parameters are epoch, batch_size, n_factor(what percentage of noise we need to add in data)
network = cnn_network.CNN_network(30, 128, 10)
print("Accuracy on Test Data : ", network.save_weights()[1] * 100)
コード例 #11
0
def trainNetwork():
    training_data, validation_data, test_data = load_data_wrapper()
    net = Network([784, 60, 30, 10])
    net.SGD(training_data, 50, 10, 3.0, test_data=test_data)
    return net
コード例 #12
0
import os, sys
import random
import cPickle

from src.network import Network
from src import network as network
from src import mnist_loader
from user_network import analyzer

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
del validation_data

mode = 'Test'

# create analyzer
my_analyzer = analyzer.Analyzer()
# set training's data
basic_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))

# list_of_dicts = ['sobel', 'laplacian', 'canny', 'harris', 'basic2']
list_of_dicts = ['basic2']
norm = lambda (x) : x / float(sum(x))

for dic_name in list_of_dicts:
    print "Start prediciotn on " + dic_name
    training_directory = os.path.join(basic_dir, "outputs/NN_" + dic_name)
    my_analyzer.update_trainings_results(training_directory)

    # compare_nets = lambda net : net.num_of_training_data
    nets = my_analyzer.nets
    predictions = {}
コード例 #13
0
from src.mnist_loader import load_data_wrapper
from src.network import Network

if __name__ == '__main__':
    train, vaild, test = load_data_wrapper()
    net = Network([784, 30, 10])
    net.SGD(train, 30, 10, 3.0, test_data=test)
コード例 #14
0
ファイル: network.py プロジェクト: myWsq/assignment
        return [output_unit.output(middle_output) for output_unit in self._output]


if __name__ == '__main__':
    INPUT_SIGNAL = 784
    OUTPUT_SIGNAL = 10
    MIDDLE_SIGNAL = 3


    def get_max_index(arr):
        tmp = [item for item in arr]
        return tmp.index(max(tmp))


    n = Network(INPUT_SIGNAL, OUTPUT_SIGNAL, MIDDLE_SIGNAL)
    training_data, validation_data, test_data = load_data_wrapper()

    # while True:
    #     flag = False  # 未出现误差点
    #     counter = 0
    #     for each in training_data:
    #         counter += 1
    #         result = n.output(each[0])
    #         if not get_max_index(result) == get_max_index(each[1]):
    #             n.train(each[1])
    #             flag = True
    #             break
    #         else:
    #             print(counter)
    #     if not flag:
    #         break
コード例 #15
0
from src.mnist_loader import load_data_wrapper
from src.utils import *
from matplotlib import pyplot as plt


def noise_img(arr, noise_coefficient):
    new_arr = []
    len_column = len(arr)
    for i in range(len_column):
        new_iter = clamp(arr[i] + noise_coefficient, 0, 1)
        new_arr.append(new_iter)
    return new_arr


tr_data, test_data, size_picture = load_data_wrapper()

weights = np.zeros(size_picture)

lr = 0.1

epochs = 500000

for e in range(epochs):
    for data, target in tr_data:
        if target == 0:
            target = 1
        else:
            target = 0
        x = np.sum(weights * data)
        y = sigmoid(x)
        E = -(target - y)