Esempio n. 1
0
    def setUp(self):
        """
        Set up a small sequential network
        """
        self.layer1 = FullLayer(5, 10)
        self.relu1 = ReluLayer()
        self.layer2 = FullLayer(10, 2)
        self.softmax = SoftMaxLayer()
        self.loss = CrossEntropyLayer()

        self.model = Sequential(
            (self.layer1, self.relu1, self.layer2, self.softmax), self.loss)
def deserialize_network(input_filename):
	input_file = open(input_filename, mode='rb')
	serialized_network = input_file.read()
	input_file.close()

	magic, num_layers = struct.unpack("<HH", serialized_network[:4])
	if magic != 0x4E4E:
		return None

	network = []
	layer_dict = {
		0xC1: lambda layer: Conv1DLayer.from_serialized(layer),
		0xFC: lambda layer: FullyConnectedLayer.from_serialized(layer),
		0x10: lambda layer: ReluLayer.from_serialized(layer),
		0xCC: lambda layer: TwoDConvolution.from_serialized(layer),
		0x55: lambda layer: SigmoidLayer.from_serialized(layer)
	}
	for i in range(0, num_layers):
		table_offset = i*8 + 4
		start, end = struct.unpack("<II", serialized_network[table_offset:table_offset+8])
		serialized_layer = serialized_network[start:end]
		network.append(layer_dict[int(serialized_layer[0])](serialized_layer))

	return network
Esempio n. 3
0
class TestMultiLayer(unittest.TestCase):
    """
    test a multilayer network (check gradients)
    """
    def setUp(self):
        """
        Set up a small sequential network
        """
        self.layer1 = FullLayer(5, 10)
        self.relu1 = ReluLayer()
        self.layer2 = FullLayer(10, 2)
        self.softmax = SoftMaxLayer()
        self.loss = CrossEntropyLayer()

        self.model = Sequential(
            (self.layer1,
             self.relu1,
             self.layer2,
             self.softmax),
            self.loss
            )

    def test_forward(self):
        """
        Test forward pass with some fake input
        """
        # make some fake input
        x = np.array([[0.5, 0.2, 0.1, 0.3, 0.7],
                      [0.3, 0.1, 0.05, 0.8, 0.9]])
        y = np.array([[0, 1],
                      [1, 0]])

        # test layers individually
        y1 = self.layer1.forward(x)
        relu = self.relu1.forward(y1)
        y2 = self.layer2.forward(relu)
        soft = self.softmax.forward(y2)
        loss = self.loss.forward(soft, y)

        # test sequential model
        y_model = self.model.forward(x, y)

        self.assertAlmostEquals(loss, y_model)

    def test_backward(self):
        """
        Test the backward function using the numerical gradient
        """
        # make some fake input
        x = np.array([[0.5, 0.2, 0.1, 0.3, 0.7],
                      [0.3, 0.1, 0.05, 0.8, 0.9],
                  ])
        y = np.array([[0, 1],
                      [1, 0]])

        out = self.model.forward(x, y)
        grads = self.model.backward()

        # test some gradients at the input
        h = 0.001
        for i in range(x.shape[0]):
            for j in range(x.shape[1]):
                new_x = np.copy(x)
                new_x[i, j] += h

                out2 = self.model.forward(new_x, y)
                
                diff = (out2 - out) / h
                print "######"
                print diff
                print grads[i, j]
                print "######"
                self.assertTrue(np.abs(diff - grads[i,j]) < 0.001)
from layers.dataset import cifar100
from layers.full import FullLayer
from layers.softmax import SoftMaxLayer
from layers.cross_entropy import CrossEntropyLayer
from layers.sequential import Sequential
from layers.relu import ReluLayer
from layers.conv import ConvLayer
from layers.flatten import FlattenLayer
from layers.maxpool import MaxPoolLayer

# getting training data and testing data
(x_train, y_train), (x_test, y_test) = cifar100(seed=1213351124)

# initialize the each layer for ML model
layer1 = ConvLayer(3, 16, 3)
relu1 = ReluLayer()
maxpool1= MaxPoolLayer()
layer2 = ConvLayer(16, 32, 3)
relu2 = ReluLayer()
maxpool2 = MaxPoolLayer()
loss1 = CrossEntropyLayer()
flatten = FlattenLayer()
layer3 = FullLayer(2048, 3)
softmax1 = SoftMaxLayer()
model = Sequential(
    (
        layer1,
        relu1,
        maxpool1,
        layer2,
        relu2,
Esempio n. 5
0
 def test_forward(self):
     layer = ReluLayer(3, 3)
     output = layer.forward(np.array([-2, 1, 2]))
     numpy.testing.assert_array_equal(output, np.array([0, 1, 2]))
Esempio n. 6
0
 def test_backward(self):
     layer = ReluLayer(3, 3)
     output = layer.forward(np.array([-2, 1, 2]))
     backward_results = layer.backward(np.array([1, 1, 1]))
     numpy.testing.assert_array_equal(backward_results[0],
                                      np.array([0, 1, 1]))
Esempio n. 7
0
 def setUp(self):
     self.layer = ReluLayer()
Esempio n. 8
0
class TestRelu(unittest.TestCase):
    def setUp(self):
        self.layer = ReluLayer()

    def test_forward(self):
        """
        Test the forward function with some values
        """
        x = np.array([[-1, 1, 0, 3]]).T
        y = self.layer.forward(x)
        should_be = np.array([[0, 1, 0, 3]]).T

        self.assertTrue(np.allclose(y, should_be))

    def test_backward(self):
        """
        Test the backward function with some values
        """
        x = np.array([[-1, 1, 0, 3]]).T
        y = self.layer.forward(x)

        z = np.ones((4, 1))
        x_grad = self.layer.backward(z)

        should_be = np.array([[0, 1, 0, 1]]).T

        self.assertTrue(np.allclose(x_grad, should_be))

    def test_backward2(self):
        """
        Test the backward function using the numerical gradient
        """
        x = np.array([[-1, 1, 0, 3]]).T
        out1 = self.layer.forward(x)

        z = np.ones((4, 1))
        x_grad = self.layer.backward(z)

        h = 0.0001
        x2 = x + np.array([[h, 0, 0, 0]]).T
        out2 = self.layer.forward(x2)
        diff = (out2 - out1) / h
        self.assertTrue(np.allclose(np.sum(diff), x_grad[0]))

        h = 0.0001
        x2 = x + np.array([[0, h, 0, 0]]).T
        out2 = self.layer.forward(x2)
        diff = (out2 - out1) / h
        self.assertTrue(np.allclose(np.sum(diff), x_grad[1]))

        h = 0.0001
        x2 = x + np.array([[0, 0, -h, 0]]).T
        out2 = self.layer.forward(x2)
        diff = (out1 - out2) / h
        self.assertTrue(np.allclose(np.sum(diff), x_grad[2]))

        h = 0.0001
        x2 = x + np.array([[0, 0, 0, h]]).T
        out2 = self.layer.forward(x2)
        diff = (out2 - out1) / h
        self.assertTrue(np.allclose(np.sum(diff), x_grad[3]))
Esempio n. 9
0
lr = 0.3
epochs = 60
batch_size = 32

counter = 0
errork = np.zeros(k)
loss = np.zeros(shape=(k, epochs))
for train_index, test_index in kf.split(myX[np.arange(533), :, :, :]):
    train_x, test_x = myX[train_index, :, :, :], myX[test_index, :, :, :]
    train_y, test_y = y[train_index], y[test_index]
    #training
    print('Creating model with lr = ' + str(lr))
    myNet = Sequential(
        layers=(
            ConvLayer(n_i=3, n_o=16, h=3),
            ReluLayer(),
            MaxPoolLayer(size=2),
            ConvLayer(n_i=16, n_o=32, h=3),
            ReluLayer(),
            MaxPoolLayer(size=2),
            FlattenLayer(),
            FullLayer(n_i=12 * 12 * 32, n_o=6),  # no neutral class:/
            SoftMaxLayer()),
        loss=CrossEntropyLayer())

    print("Initiating training")
    loss[counter, :] = myNet.fit(x=train_x,
                                 y=train_y,
                                 epochs=epochs,
                                 lr=lr,
                                 batch_size=batch_size)
Esempio n. 10
0
def MLP():
    np.random.seed(13141)

    debug_mode = False
    dbg = Debug(debug_mode)

    parser = argparse.ArgumentParser(
        description='Train and test neural network on cifar dataset.')
    parser.add_argument('experiment_name',
                        help='used for outputting log files')
    parser.add_argument('--num_hidden_units',
                        type=int,
                        help='number of hidden units')
    parser.add_argument('--learning_rate',
                        type=float,
                        help='learning rate for solver')
    parser.add_argument('--momentum_mu',
                        type=float,
                        help='mu for momentum solver')
    parser.add_argument('--mini_batch_size', type=int, help='mini batch size')
    parser.add_argument('--num_epoch', type=int, help='number of epochs')
    args = parser.parse_args()

    experiment_name = args.experiment_name
    iter_log_file = "logs/{0}_iter_log.txt".format(experiment_name)
    epoch_log_file = "logs/{0}_epoch_log.txt".format(experiment_name)
    print
    timer.begin("dataset")
    DATASET_PATH = 'cifar-2class-py2/cifar_2class_py2.p'
    data = CifarDataset()
    data.load(DATASET_PATH)

    num_training = data.get_num_train()
    num_test = data.get_num_test()
    input_dim = data.get_data_dim()

    num_hidden_units = 50 if args.num_hidden_units is None else args.num_hidden_units
    learning_rate = 0.01 if args.learning_rate is None else args.learning_rate
    momentum_mu = 0.6 if args.momentum_mu is None else args.momentum_mu
    mini_batch_size = 64 if args.mini_batch_size is None else args.mini_batch_size
    num_epoch = (500 if not debug_mode else
                 1) if args.num_epoch is None else args.num_epoch

    print("num_hidden_units: {0}".format(num_hidden_units))
    print("learning_rate: {0}".format(learning_rate))
    print("momentum_mu: {0}".format(momentum_mu))
    print("mini_batch_size: {0}".format(mini_batch_size))
    print("num_epoch: {0}".format(num_epoch))

    net = Sequential(debug=debug_mode)
    net.add(LinearLayer(input_dim, num_hidden_units))
    net.add(ReluLayer())
    net.add(LinearLayer(num_hidden_units, 2))
    net.add(SoftMaxLayer())

    print("{0}\n".format(net))

    loss = CrossEntropyLoss()

    training_objective = Objective(loss)
    test_objective = Objective(loss)
    errorRate = ErrorRate()

    print("Loss function: {0}\n".format(loss))

    solver = MomentumSolver(lr=learning_rate, mu=momentum_mu)

    monitor = Monitor()
    monitor.createSession(iter_log_file, epoch_log_file)
    cum_iter = 0
    for epoch in range(num_epoch):
        print("Training epoch {0}...".format(epoch))
        timer.begin("epoch")
        for iter, batch in enumerate(data.get_train_batches(
                mini_batch_size)):  #BATCHES ARE FORMED HERE
            if iter > 1 and debug_mode:
                break

            timer.begin("iter")

            (x, target) = batch
            batch_size = x.shape[2]

            z = net.forward(x)
            dbg.disp("\toutput: {0}".format(z))
            dbg.disp("\toutput shape: {0}".format(z.shape))

            if debug_mode:
                l = loss.forward(z, target)
                dbg.disp("\tloss: {0}".format(l))
                dbg.disp("\tloss shape: {0}".format(l.shape))

            gradients = loss.backward(z, target)
            dbg.disp("\tgradients: {0}".format(gradients))
            dbg.disp("\tgradients shape: {0}".format(gradients.shape))

            grad_x = net.backward(x, gradients)
            dbg.disp("\tgrad_x: {0}".format(grad_x))
            dbg.disp("\tgrad_x: {0}".format(grad_x.shape))

            net.updateParams(solver)

            loss_avg = training_objective.compute(z, target)
            elapsed = timer.getElapsed("iter")

            print("\t[iter {0}]\tloss: {1}\telapsed: {2}".format(
                iter, loss_avg, elapsed))
            monitor.recordIteration(cum_iter, loss_avg, elapsed)

            cum_iter += 1

        target = data.get_test_labels()
        x = data.get_test_data()
        output = net.forward(x)  #forward_layer
        loss_avg_test = test_objective.compute(output, target)
        error_rate_test = errorRate.compute(output, target)  #100 % - ACCURACY

        target = data.get_train_labels()
        x = data.get_train_data()
        output = net.forward(x)  #forward_layer
        loss_avg_train = training_objective.compute(output, target)
        error_rate_train = errorRate.compute(output, target)  #100 % - ACCURACY

        elapsed = timer.getElapsed("epoch")

        print(
            "End of epoch:\ttest objective: {0}\ttrain objective: {1}".format(
                loss_avg_test, loss_avg_train))
        print("\t\ttest error rate: {0}\ttrain error rate: {1}".format(
            error_rate_test, error_rate_train))
        print("Finished epoch {1} in {0:2f}s.\n".format(elapsed, epoch))
        monitor.recordEpoch(epoch, loss_avg_train, loss_avg_test,
                            error_rate_train, error_rate_test, elapsed)

    monitor.finishSession()