Пример #1
0
def run_network(filename, num_epochs, training_set_size=1000, lmbda=0.0):
    """Train the network for ``num_epochs`` on ``training_set_size``
    images, and store the results in ``filename``.  Those results can
    later be used by ``make_plots``.  Note that the results are stored
    to disk in large part because it's convenient not to have to
    ``run_network`` each time we want to make a plot (it's slow).

    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    net.large_weight_initializer()
    test_cost, test_accuracy, training_cost, training_accuracy \
        = net.SGD(training_data[:training_set_size], num_epochs, 10, 0.5,
                  evaluation_data=test_data, lmbda = lmbda,
                  monitor_evaluation_cost=True,
                  monitor_evaluation_accuracy=True,
                  monitor_training_cost=True,
                  monitor_training_accuracy=True)
    f = open(filename, "w")
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy], f)
    f.close()
def run_network(training_size, num_epochs, regularization_type, file_name):
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10],
                           cost=network2.CrossEntropyCost(),
                           regularization=regularization_type)

    print("\n\nTraining network with data set size %s" % (training_size))
    net.large_weight_initializer()

    test_cost, test_accuracy, training_cost, training_accuracy \
        = net.SGD(training_data[:training_size], num_epochs,
                  10, 0.5, lmbda=training_size*0.0001,
                  evaluation_data=test_data,
                  monitor_evaluation_cost=True,
                  monitor_evaluation_accuracy=True,
                  monitor_training_cost=True,
                  monitor_training_accuracy=True)

    accuracy = net.accuracy(validation_data) / 100.0
    print("Accuracy on validation data was %s percent" % accuracy)

    f = open(file_name, "w")
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy], f)
    f.close()
Пример #3
0
def run_network(filename, num_epoches, training_set_size=1000, lmbda=0.0):
    """
    Train the network for 'num_epoches' on 'training_set_size' images, and store the results in 'filename'.
    Those results can later be used by 'make_plots'.
    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    net.large_weight_initializer()
    test_cost, test_accuracy, training_cost, training_accuracy = net.SGD(
        training_data[:training_set_size],
        num_epoches,
        10,
        0.5,
        evaluation_data=test_data,
        lmbda=lmbda,
        monitor_evaluation_accuracy=True,
        monitor_evaluation_cost=True,
        monitor_training_cost=True,
        monitor_training_accuracy=True)
    with open(filename, "w") as f:
        json.dump([test_cost, test_accuracy, training_cost, training_accuracy],
                  f)
Пример #4
0
def run_networks():
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    accuracies = []
    for size in SIZES:
        print "\n\nTraining network with data set size %s" % size
        net.large_weight_initializer()
        num_epochs = 1500000 / size 
        net.SGD(training_data[:size], num_epochs, 10, 0.5, lmbda = size*0.0001)
        accuracy = net.accuracy(validation_data) / 100.0
        print "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data.json", "w")
    json.dump(accuracies, f)
    f.close()
Пример #5
0
def run_network(filename, num_epochs, training_set_size=1000, lmbda=0.0):
    """Train the network for ``num_epochs`` on ``training_set_size``
    images, and store the results in ``filename``.  Those results can
    later be used by ``make_plots``.  Note that the results are stored
    to disk in large part because it's convenient not to have to
    ``run_network`` each time we want to make a plot (it's slow).
    """
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    test_cost, test_accuracy, training_cost, training_accuracy = \
         net.SGD(training_data[:training_set_size], num_epochs,
               mini_batch_size=100, eta = 0.5, lmbda = lmbda,
                 evaluation_data = test_data,
                 monitor_evaluation_cost = True,
                 monitor_evaluation_accuracy= True,
                 monitor_training_cost= True,
                 monitor_training_accuracy= True)
    with open(filename, 'w') as fp:
        json.dump([test_cost, test_accuracy, training_cost, training_accuracy],
                  fp)
def run_network(filename, n, eta):
    """
    Train the network using both the default and the large starting weights.
    Store the results in the file with name 'filename', where they can later be used by 'make_plots'.
    'n' is the neuron number of the hidden layer.
    'eta' is learning rate.
    """
    # Make results more easily reproducible
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 100, 10], cost=network2.CrossEntropyCost())
    print("Train the network with the default starting weights.")
    default_vc, default_va, default_tc, default_ta = net.SGD(
        training_data,
        30,
        10,
        eta,
        evaluation_data=validation_data,
        lmbda=5.0,
        monitor_evaluation_accuracy=True)
    print("Train the network with the large starting weights.")
    net.large_weight_initializer()
    large_vc, large_va, large_tc, large_ta = net.SGD(
        training_data,
        30,
        10,
        eta,
        evaluation_data=validation_data,
        lmbda=5.0,
        monitor_evaluation_accuracy=True)
    with open(filename, "w") as f:
        json.dump(
            {
                "default_weight_initialization":
                [default_vc, default_va, default_tc, default_ta],
                "large_weight_initialization":
                [large_vc, large_va, large_tc, large_ta]
            }, f)
Пример #7
0
def run_network(filename, num_epochs, training_set_size=1000, lmbda=0.0):
    """Treina a rede para ``num_epochs`` em ``training_set_size``
     imagens e armazene os resultados em ``filename``. Esses resultados podem
     mais tarde ser usado por ``make_plots``. Note que os resultados são armazenados
     no disco em grande parte porque é conveniente não ter que
     ``run_network`` toda vez que quisermos fazer um gráfico (é lento).
    """
    # Torne os resultados mais facilmente reproduzíveis
    random.seed(12345678)
    np.random.seed(12345678)
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost())
    net.large_weight_initializer()
    test_cost, test_accuracy, training_cost, training_accuracy \
        = net.SGD(training_data[:training_set_size], num_epochs, 10, 0.5,
                  evaluation_data=test_data, lmbda = lmbda,
                  monitor_evaluation_cost=True,
                  monitor_evaluation_accuracy=True,
                  monitor_training_cost=True,
                  monitor_training_accuracy=True)
    f = open(filename, "w")
    json.dump([test_cost, test_accuracy, training_cost, training_accuracy], f)
    f.close()
Пример #8
0
import network2

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

input_size = 28 * 28
hidden_size = 30
hidden_layer_num = 1
output_size = 10

Neural_structure = [input_size]
for l in range(hidden_layer_num):
    Neural_structure.append(hidden_size)
Neural_structure.append(output_size)

net = network2.Network(Neural_structure, cost=network2.CrossEntropyCost())

print('Layer # = %d' % (hidden_layer_num))
print('NN structure:', Neural_structure)

epochs = 30
mini_batch_size = 10
eta = 0.5

net.SGD(training_data[:100],
        epochs,
        mini_batch_size,
        eta,
        lmbda=5.0,
        evaluation_data=validation_data,
        monitor_evaluation_accuracy=True,
Пример #9
0
import mnist_loader
import network2 as network

# 训练集、验证集、测试集
training_data, validation_data, test_data = mnist_loader.load_data_wapper()
print(type(training_data))

# 调用SGD随机梯度下降算法 (使用cross-entropy函数)
network = network.Network([784, 30, 10])
cost = network.CrossEntropyCost()
network.large_weight_initializer()
# monitor_evaluation_accuracy 可以查看学习曲线
network.SGD(training_data,
            30,
            20,
            3.0,
            test_data=test_data,
            monitor_evaluation_accuracy=True)
Пример #10
0
# SH-I

import loader
import network2
import analysis
import overfitting

split = 0  # size of the test set
num_epochs = 100  # number of epochs
plot = False
protein = 'DC_SIGN'

training_data, test_data = loader.load_encoded(protein, split=split)

net = network2.Network([920, 100, 3], cost=network2.CrossEntropyCost())

data = net.SGD(training_data,
               num_epochs,
               10,
               0.5,
               evaluation_data=test_data,
               lmbda=0,
               monitor_evaluation_cost=True,
               monitor_evaluation_accuracy=True,
               monitor_training_cost=True,
               monitor_training_accuracy=True)

if plot:

    overfitting.make_plots(data, num_epochs, training_set_size=606 - split)