def test_adjust(self):
        weights_per_neuron_count = 2
        before_neuron_count = 3
        current_neuron_count = 5
        bias = 1

        before_layer = l.Layer(weights=utils.generate_one_layer_weights(weights_per_neuron_count,
                                                                        before_neuron_count,
                                                                        bias),
                        activation_functions=utils.generate_layer_activation_functions(before_neuron_count),
                        activation_function_derivatives=utils.generate_layer_activation_function_derivatives(before_neuron_count))

        for neuron in before_layer.get_neurons():
            neuron.set_output(1)

        current_layer = l.Layer(weights=utils.generate_one_layer_weights(before_neuron_count,
                                                                         current_neuron_count,
                                                                         bias),
                        activation_functions=utils.generate_layer_activation_functions(current_neuron_count),
                        activation_function_derivatives=utils.generate_layer_activation_function_derivatives(current_neuron_count))

        for neuron in current_layer.get_neurons():
            neuron.set_delta(1)

        current_layer.adjust(1, before_layer)
Ejemplo n.º 2
0
    def generate_layers(self):
        """
        Generate the network layers.

        Using the weights, activation functions and their derivatives, each
        layer of the network will be constructed based on the structure of the
        weights.
        """
        layers = []

        # In case we need only to forward propagate without tuning the network
        # with backpropagation, the activation function derivatives are not
        # necessary, thus, they can be None. However, in order not to change
        # the structure of the code too much for this case, instead of working
        # with a None item, we work with a None list, which keeps the logic
        # of the implementation based on lists intact.
        if self.__activation_functions_derivatives is None:
            self.__activation_functions_derivatives = \
                [None] * len(self.__activation_functions)

        self.__prepare_layers_generation(len(self.__weights))

        for w, a, af in zip(self.__weights, self.__activation_functions,
                            self.__activation_functions_derivatives):
            layer = lay.Layer(w, a, af)
            layers.append(layer)

        if len(layers) == 0:
            raise Exception("No layer has been created.")

        return layers
Ejemplo n.º 3
0
    def adjust(self, learning_rate, input):
        """
        Adjust weights for each layer after backpropagation.

        The adjust logic is made at neuron level. At this level, we are
        passing the command to each layer, and each layer will pass it to each
        neuron.
        """
        input_neurons_weights = [[1]] * len(input)
        # since we only need to adjust the weights, we do not care about the
        # activation function at this step.Therefore we can pass a None array
        input_layer = lay.Layer(input_neurons_weights, [None] * len(input),
                                None)
        input_layer_neurons = input_layer.get_neurons()

        if len(input_layer_neurons) != len(input):
            raise Exception("Cannot adjust weights. The number of input layer"
                            "neurons is different than the number of inputs.")

        for n, i in zip(input_layer.get_neurons(), input):
            n.set_output(i)

        for layer in self.__layers:
            layer.adjust(learning_rate, input_layer)
            input_layer = layer
    def test_backward_propagate_hidden(self):
        # generate the before layer which will be asked its errors
        before_layer = l.Layer(weights=utils.generate_one_layer_weights(3, 3, 1),
                        activation_functions=utils.generate_layer_activation_functions(3),
                        activation_function_derivatives=utils.generate_layer_activation_function_derivatives(3))

        for neuron in before_layer.get_neurons():
            neuron.set_delta(1)

        # generate the current hidden layer we are working with
        layer = l.Layer(weights=utils.generate_one_layer_weights(1, 3, 1),
                        activation_functions=utils.generate_layer_activation_functions(3),
                        activation_function_derivatives=utils.generate_layer_activation_function_derivatives(3))

        for neuron in layer.get_neurons():
            neuron.set_output(1)

        layer.backward_propagate_hidden(before_layer)

        for neuron in layer.get_neurons():
            self.assertEqual(3, neuron.get_delta())
    def test_backward_propagate_output(self):
        layer = l.Layer(weights=utils.generate_one_layer_weights(3, 3, 314),
                        activation_functions=range(3),
                        activation_function_derivatives=utils.generate_layer_activation_function_derivatives(3))
        actual_outputs = []

        for neuron in layer.get_neurons():
            neuron.set_output(1)
            actual_outputs.append(2)

        layer.backward_propagate_output(actual_outputs)

        for neuron in layer.get_neurons():
            self.assertEqual(1, neuron.get_delta())
    def test_compute_output_different_neurons_weights_count(self):
        weights_count = [10, 5]
        neuron_count = [10, 10]
        bias = 1

        for wc, nc in zip(weights_count, neuron_count):
            inputs = utils.generate_array(wc)
            weights = utils.generate_one_layer_weights(wc, nc, bias)
            activation_functions = [utils.activation_function] * nc

            layer = l.Layer(weights, activation_functions)

            layer_output = layer.forward_propagate(inputs)
            expected_output = [wc + bias] * nc

            self.assertEqual(expected_output, layer_output)