예제 #1
0
from Mnist import Mnist
from Network import Network


m = Mnist('train-images-idx3-ubyte', 'train-labels-idx1-ubyte',
              't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
net = Network([784, 30, 10])
net.SGD(m.train_data, 30, 10, 3.0, test_data=m.test_data)
예제 #2
0
print("We now have a network with random assignments for weights and biases.")
print("It probably won't do so well... Let's see.")
input(
    "Before we train, let's get a baseline. Here is how the network performs on a set of testing images. (Press Enter)."
)

network.evaluate(test_data)
print(
    "Probably not so great. Let's train the network to recognize these digits."
)
print("Watch the weights and biases change as the network is trained.")
input("Press enter to begin training.")

network.SGD(training_data,
            mini_batch_size,
            learning_rate,
            epochs,
            updates=True)

print("Now that the network is trained, let's see how it does with"
      "images it has never seen before!")

wrong = network.evaluate(test_data)

input("Probably a lot better!\n")

input(
    "Let's see what it got wrong. Press Enter to scroll through the network's guesses."
)

nd = NetDraw()
예제 #3
0
파일: test.py 프로젝트: fsong666/nn
from Network import Network
from MNIST import MNIST
import numpy as np

if __name__ == '__main__':
    mnist = MNIST()
    training_data, validation_data, test_data = mnist.load_mnist_wrapper(
        factor=0.8, num_class=10)
    # training_data, validation_data, test_data = load_data_wrapper()
    net = Network(sizes=[784, 30, 20, 10],
                  training_data=training_data,
                  test_data=test_data,
                  validation_data=validation_data,
                  learning_rate=2.5,
                  mini_batch_size=16,
                  epochs=30)
    net.SGD()
예제 #4
0
        print ("Running with a CPU.  If this is not desired, then the modify "+\
            "network3.py to set\nthe GPU flag to True.")

    training_data, validation_data, test_data = load_data_shared()

    mini_batch_size = 10
    from Network import Network
    from FullyConnectedLayer import FullyConnectedLayer
    from SoftmaxLayer import SoftmaxLayer
    from ConvPoolLayer import ConvPoolLayer
    

    net = Network([
            FullyConnectedLayer(n_in=784, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)
        ], mini_batch_size)

    net.SGD(training_data, 1, mini_batch_size, 0.1, validation_data, test_data)

    # add a convolutional layer: 
    
    net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                        filter_shape=(20, 1, 5, 5),
                        poolsize=(2, 2)),
            FullyConnectedLayer(n_in=20*12*12, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)], 
                mini_batch_size)

    net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
    expanded_training_data, _, _ = load_data_shared("../../neural-networks-and-deep-learning/data/mnist_expanded.pkl.gz")
예제 #5
0
#coding:utf-8

from Network import Network
import mnist_loader

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

exit(0)
예제 #6
0
import load_data
from Convolutional_Network import CNNetwork
from Network import Network
import numpy as np

training_data, patch_data, validation_data, test_data = load_data.load_data_wrapper(
)

netff0 = Network([784, 100, 25, 10])
netff0.SGD(training_data[:10000], 10, 10, .5, test_data=test_data[:100])
netcff0 = CNNetwork([100, 25, 10],
                    patch_data,
                    n_clusters=16,
                    patch_size=(8, 8),
                    pool_size=(5, 5))
netcff0.SGD(training_data[:10000],
            10,
            10,
            .5,
            test_data=test_data[:100],
            convolve=True)

np.random.seed(seed=0)
altered_training_data = [(load_data.random_maniputlate_image(img), key)
                         for img, key in training_data[:10000]]
altered_test_data = [(load_data.random_maniputlate_image(img), key)
                     for img, key in test_data[:100]]

netcff1 = CNNetwork([100, 25, 10],
                    patch_data,
                    n_clusters=16,
예제 #7
0
import numpy as np
import idx2numpy
import matplotlib.pyplot as plt
from Network import Network

epochs = 30
network_architecture = [784, 20, 10]

train_images_file = 'train-images.idx3-ubyte'
train_images_array = idx2numpy.convert_from_file(train_images_file)
input_matrix = np.reshape(train_images_array, (60000, 28 * 28)) / 500

train_labels_file = 'train-labels.idx1-ubyte'
train_labels_array = idx2numpy.convert_from_file(train_labels_file)

test_images_file = 't10k-images.idx3-ubyte'
test_images_array = idx2numpy.convert_from_file(test_images_file)
input_matrix_test = np.reshape(test_images_array, (10000, 28 * 28))
input_matrix_test = np.reshape(test_images_array, (10000, 28 * 28)) / 500

test_labels_file = 't10k-labels.idx1-ubyte'
test_labels_array = idx2numpy.convert_from_file(test_labels_file)

network1 = Network(network_architecture)

network1.SGD(input_matrix, train_labels_array, epochs, input_matrix_test,
             test_labels_array)
예제 #8
0
#coding=utf-8
from Network import Network
from mnist_loader import *

model = Network([784, 20, 20, 10])
train, val, test = load_data_wrapper('./data/mnist.pkl.gz')
model.SGD(train, 50, 2000, 0.8, val)
예제 #9
0
def start():
    m = Mnist('train-images-idx3-ubyte', 'train-labels-idx1-ubyte',
              't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
    net = Network([784, 30, 10])
    net.SGD(m.train_data, 30, 10, 3.0, test_data=m.test_data)