Esempio n. 1
0
def half_adder_train(inputs, user_iteration, learning_rate):
    targets = [[0, 0], [0, 1], [0, 1], [1, 0]]
    half_adder = NeuronNetwork([
        NeuronLayer([
            Neuron("1", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)),
            Neuron("2", [randint(-1, 1), randint(-1, 1)], randint(-1, 1))
        ]),
        NeuronLayer([
            Neuron("3", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)),
            Neuron("4", [randint(-1, 1), randint(-1, 1)], randint(-1, 1))
        ])
    ])

    iterations, errors, outputs = half_adder.train(inputs, targets,
                                                   learning_rate,
                                                   user_iteration)

    print(
        f"============ | Half adder | ============\n"
        f"After {iterations} iterations:\n"
        f"Errors: {errors}\n"
        f"Inputs: {inputs}\n"
        f"Outputs: {outputs}\n"
        f"Targets: {targets}\n"
        f"Weights:",
        end=' ')
    layer = half_adder.layers[-1]
    print([neuron.weights for neuron in layer.neurons])
    print(f"Bias: {[neuron.bias for neuron in layer.neurons]}\n\n")
Esempio n. 2
0
def xor_port_train(inputs, user_iteration, learning_rate):
    targets = [0, 1, 1, 0]

    xor_port = NeuronNetwork([
        NeuronLayer([
            Neuron("Nor gate", [randint(-1, 1), randint(-1, 1)],
                   randint(-1, 1)),
            Neuron("And gate", [randint(-1, 1), randint(-1, 1)],
                   randint(-1, 1))
        ]),
        NeuronLayer([
            Neuron("Nor gate", [randint(-1, 1), randint(-1, 1)],
                   randint(-1, 1))
        ])
    ])

    iterations, errors, outputs = xor_port.train(inputs, targets,
                                                 learning_rate, user_iteration)

    print(
        f"============ | Xor gate | ============\n"
        f"After {iterations} iterations:\n"
        f"Errors: {errors}\n"
        f"Inputs: {inputs}\n"
        f"Outputs: {outputs}\n"
        f"Targets: {targets}\n"
        f"Weights:",
        end=' ')
    layer = xor_port.layers[-1]
    print([neuron.weights for neuron in layer.neurons])
    print(f"Bias: {[neuron.bias for neuron in layer.neurons]}\n\n")
 def __init__(self, num_inputs, num_hidden, num_outputs, multiply = None, hidden_layer_weights = None, hidden_layer_bias = None, output_layer_weights = None, output_layer_bias = None):
     self.num_inputs = num_inputs
     # create neuron layer from the neuronLayer class
     self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
     # add multiply parameter here to signify that multiplcation problem is being addressed
     self.output_layer = NeuronLayer(num_outputs, output_layer_bias, multiply)
     # two different methods for hidden layer weights and output layer weights
     self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
     self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)
Esempio n. 4
0
 def __init__(self, numOfInputs, numOfOutputs, numOfHiddenLayers,
              neuronsPerHiddenLayer):
     self.numOfInputs = numOfInputs
     self.numOfOutputs = numOfOutputs
     self.numOfHiddenLayers = numOfHiddenLayers
     self.neuronsPerHiddenLayer = neuronsPerHiddenLayer
     self.layers = [NeuronLayer(neuronsPerHiddenLayer, numOfInputs)]
     for i in xrange(1, numOfHiddenLayers):
         self.layers.append(
             NeuronLayer(neuronsPerHiddenLayer, neuronsPerHiddenLayer))
     self.layers.append(NeuronLayer(numOfOutputs, neuronsPerHiddenLayer))
Esempio n. 5
0
    def __init__(self,
                 num_hidden_layer,
                 total_inputs,
                 total_outputs,
                 bias_hidden_layer=None,
                 bias_output_layer=None,
                 learning_rate=None):

        self.total_inputs = total_inputs
        self.learning_rate = learning_rate if learning_rate else 0.5

        self.hidden_layer = NeuronLayer(num_hidden_layer, bias_hidden_layer)
        self.output_layer = NeuronLayer(total_outputs, bias_output_layer)

        self.init_weight_hidden_layer()
        self.init_output_hidden_layer()
Esempio n. 6
0
from neuron import Neuron
from neuronLayer import NeuronLayer
from neuronNetwork import NeuronNetwork

inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]

# HALF ADDER
half_adder = NeuronNetwork([
    NeuronLayer([
        Neuron("1", [-100, -100], 50),  # Nor
        Neuron("2", [75, 75], -100)
    ]),  # And
    NeuronLayer([Neuron("3", [0, 150], -100),
                 Neuron("4", [-100, -100], 50)])
])  # Nor
print("Half adder with rounded values:")
print("        Carry, Sum")
for i in inputs:
    print(f"{i} --> {[round(out) for out in half_adder.feed_forward(i)]}")

print("\n\nHalf adder without rounded values:")
print("        Carry, Sum")
for i in inputs:
    print(f"{i} --> {half_adder.feed_forward(i)}")
class NeuralNetwork:
    # publicly accessed and altered later so starting learning rate doesn't really matter
    LEARNING_RATE = 0.05

    def __init__(self, num_inputs, num_hidden, num_outputs, multiply = None, hidden_layer_weights = None, hidden_layer_bias = None, output_layer_weights = None, output_layer_bias = None):
        self.num_inputs = num_inputs
        # create neuron layer from the neuronLayer class
        self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
        # add multiply parameter here to signify that multiplcation problem is being addressed
        self.output_layer = NeuronLayer(num_outputs, output_layer_bias, multiply)
        # two different methods for hidden layer weights and output layer weights
        self.init_weights_from_inputs_to_hidden_layer_neurons(hidden_layer_weights)
        self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(output_layer_weights)

    def init_weights_from_inputs_to_hidden_layer_neurons(self, hidden_layer_weights):
        weight_num = 0
        for h in range(len(self.hidden_layer.neurons)):
            for i in range(self.num_inputs):
                if not hidden_layer_weights:
                    self.hidden_layer.neurons[h].weights.append(random.random())
                else:
                    self.hidden_layer.neurons[h].weights.append(hidden_layer_weights[weight_num])
                weight_num += 1

    def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(self, output_layer_weights):
        weight_num = 0
        for o in range(len(self.output_layer.neurons)):
            for h in range(len(self.hidden_layer.neurons)):
                if not output_layer_weights:
                    self.output_layer.neurons[o].weights.append(random.random())
                else:
                    self.output_layer.neurons[o].weights.append(output_layer_weights[weight_num])
                weight_num += 1

    # print current information in the net
    def inspect(self):
        print('------')
        print('* Inputs: {}'.format(self.num_inputs))
        print('------')
        print('Hidden Layer')
        self.hidden_layer.inspect()
        print('------')
        print('* Output Layer')
        self.output_layer.inspect()
        print('------')

    def feed_forward(self, inputs):
        hidden_layer_outputs = self.hidden_layer.feed_forward(inputs)
        return self.output_layer.feed_forward(hidden_layer_outputs)

    # use online learning to update the weights after each training case
    def train(self, training_inputs, training_outputs):
        self.feed_forward(training_inputs)

        # output neuron deltas
        pd_errors_wrt_output_neuron_total_net_input = [0] * len(self.output_layer.neurons)
        for o in range(len(self.output_layer.neurons)):

            # ∂E/∂zⱼ
            pd_errors_wrt_output_neuron_total_net_input[o] = self.output_layer.neurons[o].calculate_pd_error_wrt_total_net_input(training_outputs[o])

        # hidden neuron deltas
        pd_errors_wrt_hidden_neuron_total_net_input = [0] * len(self.hidden_layer.neurons)
        for h in range(len(self.hidden_layer.neurons)):

            # need to calculate the derivative of the error with respect to the output of each hidden layer neuron
            # dE/dyⱼ = Σ ∂E/∂zⱼ * ∂z/∂yⱼ = Σ ∂E/∂zⱼ * wᵢⱼ
            d_error_wrt_hidden_neuron_output = 0
            for o in range(len(self.output_layer.neurons)):
                d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[o] * self.output_layer.neurons[o].weights[h]

            # ∂E/∂zⱼ = dE/dyⱼ * ∂zⱼ/∂
            pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output * self.hidden_layer.neurons[h].calculate_pd_total_net_input_wrt_input()

        # update output neuron weights
        for o in range(len(self.output_layer.neurons)):
            for w_ho in range(len(self.output_layer.neurons[o].weights)):

                # ∂Eⱼ/∂wᵢⱼ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢⱼ
                pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[o] * self.output_layer.neurons[o].calculate_pd_total_net_input_wrt_weight(w_ho)

                # Δw = α * ∂Eⱼ/∂wᵢ
                self.output_layer.neurons[o].weights[w_ho] -= self.LEARNING_RATE * pd_error_wrt_weight

        # update hidden neuron weights
        for h in range(len(self.hidden_layer.neurons)):
            for w_ih in range(len(self.hidden_layer.neurons[h].weights)):

                # ∂Eⱼ/∂wᵢ = ∂E/∂zⱼ * ∂zⱼ/∂wᵢ
                pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[h] * self.hidden_layer.neurons[h].calculate_pd_total_net_input_wrt_weight(w_ih)

                # Δw = α * ∂Eⱼ/∂wᵢ
                self.hidden_layer.neurons[h].weights[w_ih] -= self.LEARNING_RATE * pd_error_wrt_weight

    # calculate the total error so we can test and try to minimize it
    def calculate_total_error(self, training_sets):
        total_error = 0
        for t in range(len(training_sets)):
            training_inputs, training_outputs = training_sets[t]
            self.feed_forward(training_inputs)
            for o in range(len(training_outputs)):
                total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
        return total_error
Esempio n. 8
0
class NeuralNetwork:
    def __init__(self,
                 num_hidden_layer,
                 total_inputs,
                 total_outputs,
                 bias_hidden_layer=None,
                 bias_output_layer=None,
                 learning_rate=None):

        self.total_inputs = total_inputs
        self.learning_rate = learning_rate if learning_rate else 0.5

        self.hidden_layer = NeuronLayer(num_hidden_layer, bias_hidden_layer)
        self.output_layer = NeuronLayer(total_outputs, bias_output_layer)

        self.init_weight_hidden_layer()
        self.init_output_hidden_layer()

    def init_weight_hidden_layer(self):
        for index_hidden_layer in range(len(self.hidden_layer.neurons)):
            for index_total_inputs in range(self.total_inputs):
                self.hidden_layer.neurons[index_hidden_layer].weights.append(
                    random.random())

    def init_output_hidden_layer(self):
        for index_output_layer in range(len(self.output_layer.neurons)):
            for index_hidden_layer in range(len(self.hidden_layer.neurons)):
                self.output_layer.neurons[index_output_layer].weights.append(
                    random.random())

    def feed_forward(self, inputs):
        hidden_layer_outputs = self.hidden_layer.feed_forward(inputs)
        return self.output_layer.feed_forward(hidden_layer_outputs)

    def training(self, training_inputs, training_outputs):
        self.feed_forward(training_inputs)

        error_output_layer = self.find_error_output_layer(training_outputs)
        error_hidden_layer = self.find_error_hidden_layer(error_output_layer)

        self.update_output_layer_weights(error_output_layer)
        self.update_hidden_layer_weights(error_hidden_layer)

    def find_error_output_layer(self, training_outputs):
        error_output_layer = [0] * len(self.output_layer.neurons)

        for index_output_layer in range(len(self.output_layer.neurons)):
            error_output_layer[index_output_layer] = self.output_layer.neurons[
                index_output_layer].get_error_from_expected_output(
                    training_outputs[index_output_layer])

        return error_output_layer

    def find_error_hidden_layer(self, errors_output_layer):
        errors_hidden_layer = [0] * len(self.hidden_layer.neurons)

        for index in range(len(self.hidden_layer.neurons)):
            sum_expected_weight = 0
            for index_output_layer in range(len(self.output_layer.neurons)):
                sum_expected_weight += errors_output_layer[
                    index_output_layer] * self.output_layer.neurons[
                        index_output_layer].weights[index]
            errors_hidden_layer[
                index] = sum_expected_weight * self.hidden_layer.neurons[
                    index].calculate_o()

        return errors_hidden_layer

    def update_hidden_layer_weights(self, error_output_layer):
        for index in range(len(self.hidden_layer.neurons)):
            for w_index in range(len(
                    self.hidden_layer.neurons[index].weights)):
                error_weight = error_output_layer[
                    index] * self.hidden_layer.neurons[
                        index].get_input_by_index(w_index)
                self.hidden_layer.neurons[index].weights[
                    w_index] -= self.learning_rate * error_weight

    def update_output_layer_weights(self, error_hidden_layer):
        for index in range(len(self.output_layer.neurons)):
            for w_index in range(len(
                    self.output_layer.neurons[index].weights)):
                error_weight = error_hidden_layer[
                    index] * self.output_layer.neurons[
                        index].get_input_by_index(w_index)
                self.output_layer.neurons[index].weights[
                    w_index] -= self.learning_rate * error_weight