Esempio n. 1
0
def test_mnist():
    images = utility.read_idx_images(
        '../../mnist_data/train-images.idx3-ubyte')
    labels = utility.read_idx_labels(
        '../../mnist_data/train-labels.idx1-ubyte')
    labels = utility.make_onehot(labels, np.arange(10))
    return train.train_regular(nn.NeuralNet((784, 30, 10)), images, labels)
Esempio n. 2
0
 def test_make_onehot(self):
     int_labels = xr.DataArray(np.array([[0, 0], [1, 2]]),
                               dims=('batches', nn.DIM_CASE))
     int_symbols = [0, 1]
     int_onehot = util.make_onehot(int_labels, int_symbols)
     int_expected = np.array([[[1, 0], [1, 0]], [[0, 1], [0, 0]]])
     self.assertDictEqual(
         dict(int_onehot.sizes), {
             'batches': int_labels.sizes['batches'],
             nn.DIM_CASE: int_labels.sizes[nn.DIM_CASE],
             nn.DIM_LABEL: len(int_symbols)
         })
     for i, j in zip(int_onehot, int_expected):
         np.testing.assert_array_equal(i, j)
Esempio n. 3
0
 def test_train(self):
     net = nn.NeuralNet(LAYER_SIZES, func_fill=np.ones)
     net2 = nn.NeuralNet(LAYER_SIZES, func_fill=np.ones)
     self.assert_nn_equal(net, net2)
     num_batches = 4
     inputs = xr.DataArray(
         np.zeros((num_batches * NUM_CASES, INPUT_SIZE)), dims=[nn.DIM_CASE, nn.DIM_IN])
     labels = utility.make_onehot(xr.DataArray(
         np.zeros((num_batches * NUM_CASES,)), dims=[nn.DIM_CASE]), np.zeros(NUM_LABELS))
     trained = net.train(inputs, labels, batch_size=NUM_CASES)
     self.assert_dimensions(trained.matrices, nn.KEY_WEIGHT, {
                            nn.DIM_IN: LAYER_SIZES[:-1], nn.DIM_OUT: LAYER_SIZES[1:]})
     self.assert_dimensions(trained.matrices, nn.KEY_BIAS, {
                            nn.DIM_OUT: LAYER_SIZES[1:]})
     self.assertTrue(self.nn_not_equal(net, trained))
Esempio n. 4
0
def empty_labels(inputs, dim=nn.DIM_CASE, symbols=10):
    """Creates a onehot vector that is entirely zeros.

    Arguments:
        inputs {xarray} -- array of inputs to make labels for,
            used just to reference the dimensions and sizes.

    Keyword Arguments:
        dim {str} -- dimension along which to create labels (default: {nn.DIM_CASE})
        symbols {int} -- number of unique symbols, which is the size
            of the onehot dimension (default: {10})

    Returns:
        xarray -- same output as utility.make_onehot()
    """

    labels = xr.DataArray(np.full((inputs.sizes[dim]), None), dims=(dim))
    return utility.make_onehot(labels, np.arange(symbols))
Esempio n. 5
0
 def test_pass_back(self):
     net = nn.NeuralNet(LAYER_SIZES, func_fill=np.ones)
     activations = {}
     for i, l_size in zip(range(NUM_LAYERS+1), LAYER_SIZES):
         activations[nn.mkey(i, nn.KEY_OUT_PRE)] = xr.DataArray(
             np.zeros((NUM_CASES, l_size)), dims=(nn.DIM_CASE, nn.DIM_IN))
         activations[nn.mkey(i, nn.KEY_OUT_POST)] = xr.DataArray(
             np.ones((NUM_CASES, l_size)), dims=(nn.DIM_CASE, nn.DIM_IN))
     inputs = xr.DataArray(
         np.ones((NUM_CASES, INPUT_SIZE)), dims=(nn.DIM_CASE, nn.DIM_IN))
     activations = net.pass_forward(inputs)
     labels = utility.make_onehot(xr.DataArray(np.arange(NUM_CASES), dims=(
         nn.DIM_CASE)), np.arange(NUM_LABELS))  # labels are 0 to n
     gradients = net.pass_back(activations, labels)
     self.assert_dimensions(gradients, nn.KEY_WEIGHT, {nn.DIM_CASE: [
                            NUM_CASES]*NUM_LAYERS, nn.DIM_IN: LAYER_SIZES[:-1], nn.DIM_OUT: LAYER_SIZES[1:]})
     self.assert_dimensions(gradients, nn.KEY_BIAS, {
                            nn.DIM_CASE: [NUM_CASES] * NUM_LAYERS, nn.DIM_OUT: LAYER_SIZES[1:]})
Esempio n. 6
0
a non-parametric probability distribution of the activations. Let's train a simple neural
network with 1 hidden layer of 30 neurons on the MNIST dataset, and generate histograms
for its output neurons:

"""
import numpy as np, xarray as xr, matplotlib.pyplot as plt
from littlenet import apd, utility, neural_net as nn, train

net = nn.NeuralNet((784, 30, 10))
utility.write_object(net, 'rand_net', directory='./models/init-apd')
# always write to file to avoid re-generating and re-training networks

rand_net = utility.read_object('./models/init-apd/rand_net.pyc')
train_inputs, train_labels, test_inputs, test_labels = utility.training_and_test_inputs(
    is_onehot=False)
train_onehot = utility.make_onehot(train_labels, range(10))
test_onehot = utility.make_onehot(test_labels, range(10))
trained_net, progress = train.train_nn(net,
                                       train_inputs,
                                       train_onehot,
                                       test_inputs,
                                       test_onehot,
                                       sample_rate=500,
                                       hyperparams={
                                           'batch': 10,
                                           'rate': 3.0
                                       })
train.write_nn(trained_net,
               progress,
               name='rand_net',
               save_dir='./models/init-apd')