예제 #1
0
 def setUp(self):
     """
     Setup layer with some values
     """
     layer = FullLayer(2, 3)
     layer.W = np.array(((1, 2, 3), (4, 5, 6)), 'float64').T
     layer.b = np.array(((2, 4, 6),), 'float64')
     self.layer = layer
예제 #2
0
    def setUp(self):
        """
        Set up a small sequential network
        """
        self.layer1 = FullLayer(5, 10)
        self.relu1 = ReluLayer()
        self.layer2 = FullLayer(10, 2)
        self.softmax = SoftMaxLayer()
        self.loss = CrossEntropyLayer()

        self.model = Sequential(
            (self.layer1, self.relu1, self.layer2, self.softmax), self.loss)
예제 #3
0
class TestMultiLayer(unittest.TestCase):
    """
    test a multilayer network (check gradients)
    """
    def setUp(self):
        """
        Set up a small sequential network
        """
        self.layer1 = FullLayer(5, 10)
        self.relu1 = ReluLayer()
        self.layer2 = FullLayer(10, 2)
        self.softmax = SoftMaxLayer()
        self.loss = CrossEntropyLayer()

        self.model = Sequential(
            (self.layer1,
             self.relu1,
             self.layer2,
             self.softmax),
            self.loss
            )

    def test_forward(self):
        """
        Test forward pass with some fake input
        """
        # make some fake input
        x = np.array([[0.5, 0.2, 0.1, 0.3, 0.7],
                      [0.3, 0.1, 0.05, 0.8, 0.9]])
        y = np.array([[0, 1],
                      [1, 0]])

        # test layers individually
        y1 = self.layer1.forward(x)
        relu = self.relu1.forward(y1)
        y2 = self.layer2.forward(relu)
        soft = self.softmax.forward(y2)
        loss = self.loss.forward(soft, y)

        # test sequential model
        y_model = self.model.forward(x, y)

        self.assertAlmostEquals(loss, y_model)

    def test_backward(self):
        """
        Test the backward function using the numerical gradient
        """
        # make some fake input
        x = np.array([[0.5, 0.2, 0.1, 0.3, 0.7],
                      [0.3, 0.1, 0.05, 0.8, 0.9],
                  ])
        y = np.array([[0, 1],
                      [1, 0]])

        out = self.model.forward(x, y)
        grads = self.model.backward()

        # test some gradients at the input
        h = 0.001
        for i in range(x.shape[0]):
            for j in range(x.shape[1]):
                new_x = np.copy(x)
                new_x[i, j] += h

                out2 = self.model.forward(new_x, y)
                
                diff = (out2 - out) / h
                print "######"
                print diff
                print grads[i, j]
                print "######"
                self.assertTrue(np.abs(diff - grads[i,j]) < 0.001)
from layers.flatten import FlattenLayer
from layers.maxpool import MaxPoolLayer

# getting training data and testing data
(x_train, y_train), (x_test, y_test) = cifar100(seed=1213351124)

# initialize the each layer for ML model
layer1 = ConvLayer(3, 16, 3)
relu1 = ReluLayer()
maxpool1= MaxPoolLayer()
layer2 = ConvLayer(16, 32, 3)
relu2 = ReluLayer()
maxpool2 = MaxPoolLayer()
loss1 = CrossEntropyLayer()
flatten = FlattenLayer()
layer3 = FullLayer(2048, 3)
softmax1 = SoftMaxLayer()
model = Sequential(
    (
        layer1,
        relu1,
        maxpool1,
        layer2,
        relu2,
        maxpool2,
        flatten,
        layer3,
        softmax1
    ),
    loss1)
예제 #5
0
from layers.maxpool import MaxPoolLayer
from layers.flatten import FlattenLayer

import numpy as np
import matplotlib.pyplot as plt

(x_train, y_train), (x_test, y_test) = cifar100(1213076538)

layer1 = ConvLayer(3, 16, 3)
relu1 = ReluLayer()
layer2 = MaxPoolLayer(2)
layer3 = ConvLayer(16, 32, 3)
relu2 = ReluLayer()
layer4 = MaxPoolLayer(2)
layer5 = FlattenLayer()
layer6 = FullLayer(32*8*8, 4)
softmax = SoftMaxLayer()
loss = CrossEntropyLayer()


lr = 0.1
scores = []
n_epochs = 5
batch_size = 128

x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)

model = Sequential((layer1, relu1, layer2, layer3, relu2, layer4, layer5, layer6, softmax), loss)
예제 #6
0
loss = np.zeros(shape=(k, epochs))
for train_index, test_index in kf.split(myX[np.arange(533), :, :, :]):
    train_x, test_x = myX[train_index, :, :, :], myX[test_index, :, :, :]
    train_y, test_y = y[train_index], y[test_index]
    #training
    print('Creating model with lr = ' + str(lr))
    myNet = Sequential(
        layers=(
            ConvLayer(n_i=3, n_o=16, h=3),
            ReluLayer(),
            MaxPoolLayer(size=2),
            ConvLayer(n_i=16, n_o=32, h=3),
            ReluLayer(),
            MaxPoolLayer(size=2),
            FlattenLayer(),
            FullLayer(n_i=12 * 12 * 32, n_o=6),  # no neutral class:/
            SoftMaxLayer()),
        loss=CrossEntropyLayer())

    print("Initiating training")
    loss[counter, :] = myNet.fit(x=train_x,
                                 y=train_y,
                                 epochs=epochs,
                                 lr=lr,
                                 batch_size=batch_size)
    myNet.save()
    pred = myNet.predict(test_x)
    accuracy = np.mean(pred == test_y)
    errork[counter] = 1 - accuracy
    print('At fold = ' + str(counter + 1))
    print('Accuracy of Convolutional Neural Network = ' + str(accuracy))
예제 #7
0

#Import and process the input
(train_x, train_y), (val_x,val_y), (test_x, test_y) = fer2013()

lr = 0.1
epochs = 100
batch_size = 128
myNet = Sequential(layers=(ConvLayer(n_i=1,n_o=16,h=3),
                           ReluLayer(),
                           MaxPoolLayer(size=2),
                           ConvLayer(n_i=16,n_o=32,h=3),
                           ReluLayer(),
                           MaxPoolLayer(size=2),
                           FlattenLayer(),
                           FullLayer(n_i=12*12*32,n_o=7),
                           SoftMaxLayer()),
                   loss=CrossEntropyLayer())

myNet.load()
"""
pred = myNet.predict(val_x)
accuracy = np.mean(pred == val_y)
print('At learning rate = '+str(lr))
print('Validation Accuracy of Convolutional Neural Network = '+str(accuracy))
"""

forw = myNet.forward(test_x)
pred = myNet.predict(test_x)
accuracy = np.mean(pred == test_y)
print('At learning rate = '+str(lr))
예제 #8
0
from __future__ import print_function
from layers.full import FullLayer
from layers.softmax import SoftMaxLayer
from layers.cross_entropy import CrossEntropyLayer
from layers.sequential import Sequential
from layers.relu import ReluLayer
from layers.dataset import cifar100
from sklearn.metrics import accuracy_score
import numpy as np

(x_train, y_train), (x_test, y_test) = cifar100(1213391684)

layer1 = FullLayer(3072, 2052)
relu1 = ReluLayer()
layer2 = FullLayer(2052, 680)
relu2 = ReluLayer()
layer3 = FullLayer(680, 4)
softmax = SoftMaxLayer()
loss = CrossEntropyLayer()
model = Sequential((layer1, relu1, layer2, relu2, layer3, softmax), loss)

model.fit(x_train, y_train, epochs=25, lr=0.06)
out = model.predict(x_test)
y = np.reshape(y_test, (x_test.shape[0], 1))
print("y_test.shape:", y.shape)
print("out.shape:", out.shape)
t = accuracy_score(y, out)
print(y_test, "Predicted Val", out)
print('accuracy score', t)
score = (np.mean(out == y))
print("score", score)