Esempio n. 1
0
 def __build_model(self, n_features, n_classes):
     # Create layers of network
     layers = []
     for l in range(self.n_layers):
         # First layer
         if(l == 0):
             layers.append(ReluLayer(n_features, self.neurons_per_layer,
                                          self.learning_rate, self.reg_lambda))
         # Last layer
         elif(l == self.n_layers - 1):
             layers.append(SoftmaxLayer(self.neurons_per_layer, n_classes,
                                             self.learning_rate, self.reg_lambda))
         # Middle layers
         else:
             layers.append(ReluLayer(self.neurons_per_layer, self.neurons_per_layer,
                                          self.learning_rate, self.reg_lambda))
             
     return layers
Esempio n. 2
0
 def __init__(self,
              layers,
              l2_decay=0.001,
              debug=False,
              learning_rate=0.001):
     super(Network, self).__init__()
     mapping = {
         "input": lambda x: InputLayer(x),
         "fc": lambda x: FullyConnectedLayer(x),
         "convolution": lambda x: ConvLayer(x),
         "pool": lambda x: MaxPoolLayer(x),
         "squaredloss": lambda x: SquaredLossLayer(x),
         "softmax": lambda x: SoftmaxLayer(x),
         "relu": lambda x: ReLULayer(x),
         "dropout": lambda x: DropoutLayer(x)
     }
     self.layers = []
     self.l2_decay = l2_decay
     self.debug = debug
     self.learning_rate = learning_rate
 def __init__(self, layer_types, layer_shapes, conv_layer_types=None, layers=None):
     self.layer_types = layer_types
     self.layer_shapes = layer_shapes
     self.num_genes = 0
     self.conv_layer_types = conv_layer_types
     if layers is not None:
         self.layers = layers
         for typ, shpe in zip(layer_types, layer_shapes):
             if typ == "conv":
                 self.num_genes += shpe[1][0]
             elif typ == "dense":
                 self.num_genes += shpe[0][0]
             elif typ == "soft":
                 self.num_genes += shpe[0][0]
     else:
         self.layers = []
         cntr = 0
         n_conv_layer_types = -1
         if conv_layer_types is not None:
             n_conv_layer_types = len(conv_layer_types)
         for typ, shpe in zip(layer_types, layer_shapes):
             if typ == "conv":
                 if cntr <= n_conv_layer_types:
                     self.layers.append(ConvLayer(image_shape=shpe[0],
                                                  filter_shape=shpe[1],
                                                  filter_method=conv_layer_types[cntr][0],
                                                  zero_padding=conv_layer_types[cntr][1]))
                     cntr += 1
                 else:
                     self.layers.append(ConvLayer(image_shape=shpe[0],
                                                  filter_shape=shpe[1]))
                 self.num_genes += shpe[1][0]
             elif typ == "dense":
                 self.layers.append(DenseLayer(layer_shape=shpe[0]))
                 self.num_genes += shpe[0][0]
             elif typ == "soft":
                 self.layers.append(SoftmaxLayer(layer_shape=shpe[0]))
                 self.num_genes += shpe[0][0]
def crossover(father, mother, alpha=0.5):
    layers = []
    for lt, ls, fl, ml in zip(father.get_layer_types(),
                              father.get_layer_shapes(), father.get_layers(),
                              mother.get_layers()):
        if lt == "conv":
            lyr = ConvLayer(image_shape=ls[0], filter_shape=ls[1])
            # Loop for each filter
            for i in range(ls[1][0]):
                parent = ml
                # Randomly pick either mother or father gene based on alpha
                if random.random() < alpha:
                    parent = fl
                lyr.set_filter(index=i, filtr=deepcopy(parent.get_filter(i)))
            layers.append(lyr)
        elif lt == "dense" or lt == "soft":
            weights = []
            biases = []
            lyr = DenseLayer(layer_shape=ls[0])
            if lt == "soft":
                lyr = SoftmaxLayer(layer_shape=ls[0])

            # Loop for each neuron
            for i in range(ls[0][0]):
                parent = ml
                # Randomly pick either mother or father gene based on alpha
                if random.random() < alpha:
                    parent = fl
                weights.append(deepcopy(parent.get_weights(i)))
                biases.append(deepcopy(parent.get_biases(i)))
            lyr.set_weights_biases(weights, biases)
            layers.append(lyr)
    child = Individual(deepcopy(father.get_layer_types()),
                       deepcopy(father.get_layer_shapes()),
                       deepcopy(father.get_conv_layer_types()), layers)
    child = mutate_individual(child)
    return child
Esempio n. 5
0
import mnist
import numpy as np
from conv_layer import ConvLayer
from max_pool_layer import MaxPoolLayer
from softmax_layer import SoftmaxLayer

train_images = mnist.train_images()[:1000]
train_labels = mnist.train_labels()[:1000]
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]

conv = ConvLayer(8)
pool = MaxPoolLayer()
softmax = SoftmaxLayer(13 * 13 * 8, 10)


def check(image):
    out = conv.forward((image / 255) - 0.5)
    out = pool.forward(out)
    out = softmax.forward(out)

    return np.argmax(out)


def forward(image, label):
    '''
  Completes a forward pass of the CNN and calculates the accuracy and
  cross-entropy loss.
  - image is a 2d numpy array
  - label is a digit
  '''
Esempio n. 6
0
def test_softmax_activation():
    # Simple case
    lay = SoftmaxLayer(1, 1)
    input_arr = np.array([[7]])
    expected_arr = np.array([[1]])
    actual_arr = lay.activation(input_arr)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Simple case with batches
    lay = SoftmaxLayer(1, 1)
    input_arr = np.array([[2], [7]])
    expected_arr = np.array([[1], [1]])
    actual_arr = lay.activation(input_arr)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Multiple inputs
    lay = SoftmaxLayer(4, 4)
    input_arr = np.array([[1, 2, 3, 4]])
    exp_sum = np.exp(1) + np.exp(2) + np.exp(3) + np.exp(4)
    expected_arr = np.exp(np.array([[1, 2, 3, 4]])) / exp_sum
    actual_arr = lay.activation(input_arr)

    # round down to nearest 10 decimal places
    # because floating point sucks
    expected_arr = np.round(expected_arr, 10)
    actual_arr = np.round(actual_arr, 10)
    np.testing.assert_array_equal(expected_arr, actual_arr)

    # Multiple inputs
    lay = SoftmaxLayer(4, 4)
    input_arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])

    exp_sum = np.exp(1) + np.exp(2) + np.exp(3) + np.exp(4)
    expected_arr1 = np.exp(np.array([1, 2, 3, 4])) / exp_sum

    exp_sum = np.exp(5) + np.exp(6) + np.exp(7) + np.exp(8)
    expected_arr2 = np.exp(np.array([5, 6, 7, 8])) / exp_sum

    expected_arr = np.array([expected_arr1, expected_arr2])

    actual_arr = lay.activation(input_arr)

    # round down to nearest 10 decimal places
    # because floating point sucks
    expected_arr = np.round(expected_arr, 10)
    actual_arr = np.round(actual_arr, 10)
    np.testing.assert_array_equal(expected_arr, actual_arr)
def load_population(file_name):
    counter = 0
    file = open(file_name, 'r')
    #read line by line
    arr = file.read().splitlines()

    pop_size = int(arr[counter])
    counter += 1
    initial_pop = []
    clt = []

    for i in range(pop_size):
        layer_types = []
        layer_shapes = []
        layers = []
        conv_layer_types = []

        num_layers = int(arr[counter])
        counter += 1

        for j in range(num_layers):
            type = arr[counter]
            layer_types.append(type)
            counter += 1

            if type == "conv":
                filter_method = arr[counter]
                counter += 1
                zero_padding = int(arr[counter])
                counter += 1

                conv_layer_types.append((filter_method, zero_padding))

                image_shape = (int(arr[counter]), int(arr[counter + 1]),
                               int(arr[counter + 2]))
                counter += 3

                filter_shape = (int(arr[counter]), int(arr[counter + 1]),
                                int(arr[counter + 2]), int(arr[counter + 3]))
                counter += 4

                layer_shapes.append([image_shape, filter_shape])

                filters = []

                for i in range(filter_shape[0]):
                    weights = np.zeros(filter_shape[1:])
                    bias = 0

                    for i in range(filter_shape[1]):
                        for j in range(filter_shape[2]):
                            for k in range(filter_shape[3]):
                                weights[i][j][k] = float(arr[counter])
                                counter += 1
                    bias = float(arr[counter])
                    counter += 1

                    filters.append(Filter(filter_shape[1:], weights, bias))

                clt = conv_layer_types
                layers.append(
                    ConvLayer(image_shape, filter_shape, filter_method,
                              zero_padding, filters))

            elif type == "dense" or type == "soft":
                shpe = (int(arr[counter]), int(arr[counter + 1]))
                layer_shapes.append([shpe])
                counter += 2

                weights = np.zeros(shpe)
                biases = np.zeros(shpe[0])

                for cl in range(shpe[0]):
                    for pl in range(shpe[1]):
                        weights[cl][pl] = float(arr[counter])
                        counter += 1

                for cl in range(shpe[0]):
                    biases[cl] = float(arr[counter])
                    counter += 1

                if type == "dense":
                    layers.append(DenseLayer(shpe, weights, biases))
                elif type == "soft":
                    layers.append(SoftmaxLayer(shpe, weights, biases))

        initial_pop.append(Individual(layer_types, layer_shapes, clt, layers))
    population = Population(pop_size, initial_pop[0].get_layer_types(),
                            initial_pop[0].get_layer_shapes(), clt,
                            initial_pop)
    return population
Esempio n. 8
0
class TestSoftmaxLayer(TestCase):

    def setUp(self):
        self.layer = SoftmaxLayer()
        self.layer.set_input_shape((4, ))

        self.predicted = np.array([0.2, 0.4, 0.4], dtype=np.float64)
        self.true = np.array([0, 1, 0], dtype=np.float64)

    def test_forward_prop_two_equal(self):
        input = np.array([43265, 0, 0, 43265], dtype=np.float64)
        output = self.layer.forward_prop(input)
        expected_output = np.array([0.5, 0, 0, 0.5], dtype=np.float64)
        numpy.testing.assert_array_equal(output, expected_output)

    def test_forward_prop_one_large(self):
        input = np.array([43265, 2, 54, 21], dtype=np.float64)
        output = self.layer.forward_prop(input)
        expected_output = np.array([1, 0, 0, 0], dtype=np.float64)
        numpy.testing.assert_array_equal(output, expected_output)

    def test_back_prop(self):
        out_grad = np.array([3, 3, 3, 3], dtype=np.float64)
        with self.assertRaises(NotImplementedError):
            self.layer.back_prop(out_grad)

    def test_initial_gradient(self):
        expected_gradient = np.array([0.2, -0.6, 0.4], dtype=np.float64)
        gradient = self.layer.initial_gradient(self.predicted, self.true)
        numpy.testing.assert_array_equal(expected_gradient, gradient)

    def test_loss(self):
        expected_loss = 0.9162907
        loss = self.layer.loss(self.predicted, self.true)
        self.assertAlmostEqual(expected_loss, loss)

    def test_get_output_shape(self):
        self.layer.set_input_shape((5, ))
        self.assertEqual(self.layer.get_output_shape(), (5, ))
Esempio n. 9
0
    def setUp(self):
        self.layer = SoftmaxLayer()
        self.layer.set_input_shape((4, ))

        self.predicted = np.array([0.2, 0.4, 0.4], dtype=np.float64)
        self.true = np.array([0, 1, 0], dtype=np.float64)