예제 #1
0
def main():
    l1 = NeuronLayer((28, 28), True, False)
    l2 = NeuronLayer((10, 10))
    l3 = NeuronLayer((10,), False, True)
    
    network = NeuralNetwork()
    
    network.add_layer(l1)
    network.add_layer(l2)
    network.add_layer(l3)
    network.connect_layers()
    
    pr = cProfile.Profile()
    pr.enable()
    
    training_images = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-images.idx3-ubyte"))
    training_labels = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-labels.idx1-ubyte"))
    
    network.load_data(training_images, training_labels)
    
    test_images = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-images.idx3-ubyte")
    test_labels = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-labels.idx1-ubyte")
    
    network.load_test_data(test_images, test_labels)
    
    network.SGD(0.1, 0.1, 30, 10)
    
    pr.disable()
    pr.print_stats(sort="cumtime")
예제 #2
0
    def test_training(self):
        Neuron1 = Neuron([0.4, 0.3], 0.5, 0.5)
        Neuron2 = Neuron([0.3], 0.4, 0.5)
        Layer1 = NeuronLayer([Neuron1])
        Layer2 = NeuronLayer([Neuron2])
        Red1 = NeuronNetwork([Layer1, Layer2])
        Red1.training([[1, 1]], [[1]], 1)
        assert Neuron1.getBias() == 0.502101508999489
        assert Neuron1.getWeight() == [0.40210150899948904, 0.302101508999489]
        assert Neuron2.getBias() == 0.43937745312797394
        assert Neuron2.getWeight() == [0.33026254863991883]

        Neuron5 = Neuron([0.7, 0.3], 0.5, 0.5)
        Neuron6 = Neuron([0.3, 0.7], 0.4, 0.5)
        Neuron7 = Neuron([0.2, 0.3], 0.3, 0.5)
        Neuron8 = Neuron([0.4, 0.2], 0.6, 0.5)
        Layer3 = NeuronLayer([Neuron5, Neuron6])
        Layer4 = NeuronLayer([Neuron7, Neuron8])
        Red2 = NeuronNetwork([Layer3, Layer4])
        Red2.training([[1, 1]], [[1, 1]], 1)
        assert Neuron5.getWeight() == [0.7025104485493278, 0.3025104485493278]
        assert Neuron5.getBias() == 0.5025104485493278
        assert Neuron6.getWeight() == [0.30249801135748333, 0.7024980113574834]
        assert Neuron6.getBias() == 0.40249801135748337
        assert Neuron7.getWeight() == [
            0.22994737881955657, 0.32938362863950127
        ]
        assert Neuron7.getBias() == 0.3366295422515899
        assert Neuron8.getWeight() == [
            0.41943005652646226, 0.21906429169838573
        ]
        assert Neuron8.getBias() == 0.6237654881509048
예제 #3
0
    def setUp(self):
        """Prepares a Halfadder Neuron Network"""
        # Layer 1 Neurons:
        n1 = Neuron([12, 12], Sigmoid().activate, bias=-18)
        n2 = Neuron([-12, -12], Sigmoid().activate, bias=6)
        n3 = Neuron([12, 12], Sigmoid().activate, bias=-18)
        # Layer 2 Neurons:
        n4 = Neuron([-12, -12, 0], Sigmoid().activate, bias=6)
        n5 = Neuron([0, 0, 12], Sigmoid().activate, bias=-6)
        # Layers
        l1 = NeuronLayer([n1, n2, n3])
        l2 = NeuronLayer([n4, n5])

        self.ntwrk = NeuronNetwork([l1, l2])
예제 #4
0
	def train(self, trainSet, labelSet):

		self.nInNeurons = len(trainSet[0])
		self.hiddenLayer = NeuronLayer(self.nHiddenNeurons, self.nInNeurons) 
		self.nOutNeurons = len(labelSet[0])
		self.outLayer = NeuronLayer(self.nOutNeurons, self.nHiddenNeurons)

		hiddenNeuronActivation = self.feedForward(trainSet)	
		
		outWeights = np.dot(np.linalg.pinv(hiddenNeuronActivation), np.array(labelSet))
		
		for hNeuron in range(self.nHiddenNeurons):
			for outNeuron in range(self.nOutNeurons):
				self.outLayer.neurons[outNeuron].weight[hNeuron] = outWeights[hNeuron][outNeuron]
예제 #5
0
 def addLayer(self, n):
     nInputs = self.nInputs
     prev = None
     if len(self.layers) > 0:
         prev = self.getOutputLayer()
         nInputs = prev.getN()
     self.layers.append(NeuronLayer(n, nInputs, prev))
예제 #6
0
    def __init__(self,
                 num_inputs,
                 num_hidden,
                 num_outputs,
                 hidden_layer_weights=None,
                 hidden_layer_bias=None,
                 output_layer_weights=None,
                 output_layer_bias=None):
        self.num_inputs = num_inputs

        self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
        self.output_layer = NeuronLayer(num_outputs, output_layer_bias)

        self.init_weights_from_inputs_to_hidden_layer_neurons(
            hidden_layer_weights)
        self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            output_layer_weights)
예제 #7
0
    def __init__(self,
                 inputs_num,
                 hidden_nums,
                 outputs_num,
                 hidden_layers_weights=None,
                 hidden_layers_bias=None,
                 output_layer_weights=None,
                 output_layer_bias=None,
                 softmax_enabled=0):
        self.inputs_num = inputs_num
        self.softmax_enabled = softmax_enabled

        self.hidden_layers = []
        for i in range(len(hidden_nums)):
            self.hidden_layers.append(
                NeuronLayer(hidden_nums[i], hidden_layers_bias[i]))
        self.output_layer = NeuronLayer(outputs_num, output_layer_bias)

        self.init_weights_from_inputs_to_hidden_layers_neurons(
            hidden_layers_weights)
        self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            output_layer_weights)
예제 #8
0
    def __init__(self, f, n_n_porcapa, n_capas, o):
        """
        class constructor
        :param f: number of inputs
        :param n_n_porcapa: number of neurons per hidden layer
        :param n_capas: number of hidden layers
        :param o: number of outputs
        """
        assert len(n_n_porcapa) == n_capas, \
            "Wrong input for the Neural Network"

        layers = [NeuronLayer(n=n_n_porcapa[0], ni=f)]

        for n, ni in zip(n_n_porcapa[1:], n_n_porcapa):
            layers.append(NeuronLayer(n=n, ni=ni))
        layers.append(NeuronLayer(n=o, ni=n_n_porcapa[-1]))

        self.f = f
        self.n_layers = n_capas + 1
        self.layers = np.asarray(layers)

        self.set_last_layer()
        self.set_sibling_layers()
예제 #9
0
class BPNetwork:
    LEARNING_RATE = 0.01

    def __init__(self,
                 inputs_num,
                 hidden_nums,
                 outputs_num,
                 hidden_layers_weights=None,
                 hidden_layers_bias=None,
                 output_layer_weights=None,
                 output_layer_bias=None,
                 softmax_enabled=0):
        self.inputs_num = inputs_num
        self.softmax_enabled = softmax_enabled

        self.hidden_layers = []
        for i in range(len(hidden_nums)):
            self.hidden_layers.append(
                NeuronLayer(hidden_nums[i], hidden_layers_bias[i]))
        self.output_layer = NeuronLayer(outputs_num, output_layer_bias)

        self.init_weights_from_inputs_to_hidden_layers_neurons(
            hidden_layers_weights)
        self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            output_layer_weights)

    # 初始化输入层到隐含层、隐含层之间的 weight
    def init_weights_from_inputs_to_hidden_layers_neurons(
            self, hidden_layers_weights):
        for hl in range(len(self.hidden_layers)):
            weight_num = 0
            for h in range(len(self.hidden_layers[hl].neurons)):
                for i in range(self.inputs_num):
                    self.hidden_layers[hl].neurons[h].weights.append(
                        hidden_layers_weights[hl][weight_num])
                    weight_num += 1

        # weight_num = 0
        # for h in range(len(self.hidden_layer.neurons)):
        #     weights_list = np.empty([self.inputs_num], dtype=int)
        #     for i in np.nditer(weights_list, op_flags=['readwrite']):
        #         i[...] = hidden_layer_weights[weight_num]
        #         weight_num += 1
        #     self.hidden_layer.neurons[h].weights = np.array(weights_list)

    # 初始化隐含层到输出层的 weight
    def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            self, output_layer_weights):
        # weight_num = 0
        # for o in range(len(self.output_layer.neurons)):
        #     weights_list = np.empty([len(self.hidden_layer.neurons)], dtype=int)
        #     for h in np.nditer(weights_list, op_flags=['readwrite']):
        #         h[...] = output_layer_weights[weight_num]
        #         weight_num += 1
        #     self.output_layer.neurons[o].weights = np.array(weights_list)

        weight_num = 0
        for o in range(len(self.output_layer.neurons)):
            for h in range(len(self.hidden_layers[-1].neurons)):
                self.output_layer.neurons[o].weights.append(
                    output_layer_weights[weight_num])
                weight_num += 1

    # 输出网络信息
    def inspect(self):
        print('------')
        print('* Inputs: {}'.format(self.inputs_num))
        print('------')
        # print('Hidden Layer')
        # for i in range(len(self.hidden_layers)):
        #     self.hidden_layers[i].inspect()
        # print('------')
        print('* Output Layer')
        self.output_layer.inspect()
        print('------')

    def softmax_feed_forward(self, inputs):
        last_hidden_layer_outputs = inputs
        for hl in range(len(self.hidden_layers)):
            last_hidden_layer_outputs = self.hidden_layers[hl].feed_forward(
                last_hidden_layer_outputs)
        return self.output_layer.softmax_output_forward(
            last_hidden_layer_outputs)

    def feed_forward(self, inputs):
        last_hidden_layer_outputs = inputs
        for hl in range(len(self.hidden_layers)):
            last_hidden_layer_outputs = self.hidden_layers[hl].feed_forward(
                last_hidden_layer_outputs)
        return self.output_layer.output_forward(last_hidden_layer_outputs)

    def train(self, training_inputs, training_outputs):
        if self.softmax_enabled == 1:
            self.softmax_feed_forward(training_inputs)
        else:
            self.feed_forward(training_inputs)

        # 以下是求导

        # 输出层:d(Loss Function)/d(Output)
        pd_errors_wrt_output_neuron_total_net_input = [0] * len(
            self.output_layer.neurons)
        if self.softmax_enabled == 1:
            # ∂E/∂Si: 对于所有的 Si,d(Cross)/d(SoftMax_Output) 都是 -Σ Ei/Si
            pd_cross_error_wrt_softmax_output = self.output_layer.calculate_pd_cross_error_wrt_softmax_output(
                training_outputs)
            # ∂E/∂i = ∂E/∂Si * ∂Si/∂i
            pd_errors_wrt_output_neuron_total_net_input = self.output_layer.calculate_pd_cross_error_wrt_total_net_input(
                pd_cross_error_wrt_softmax_output)
        else:
            for o in range(len(self.output_layer.neurons)):
                # ∂E/∂iⱼ
                pd_errors_wrt_output_neuron_total_net_input[
                    o] = self.output_layer.neurons[
                        o].calculate_test_pd_error_wrt_total_net_input(
                            training_outputs[o])

        # 隐含层:d(Output_input)/d(Hidden_input)
        pd_errors_wrt_hidden_neuron_total_net_input_array = [[]] * len(
            self.hidden_layers)
        last_layer_pd_errors_wrt_neuron_total_net_input = pd_errors_wrt_output_neuron_total_net_input
        last_layer = self.output_layer
        for hl in range(len(self.hidden_layers)):
            pd_errors_wrt_hidden_neuron_total_net_input = [0] * len(
                self.hidden_layers[-hl].neurons)
            for h in range(len(self.hidden_layers[-hl].neurons)):

                # dE/dhoⱼ = Σ ∂E/∂oiⱼ * ∂oi/∂hoⱼ = Σ ∂E/∂oiⱼ * wᵢⱼ
                d_error_wrt_hidden_neuron_output = 0
                for o in range(len(last_layer.neurons)):
                    d_error_wrt_hidden_neuron_output += last_layer_pd_errors_wrt_neuron_total_net_input[o] * \
                                                        last_layer.neurons[o].weights[h]

                # ∂E/∂hiⱼ = ∂E/∂hoⱼ * ∂hoⱼ/∂hiⱼ
                pd_errors_wrt_hidden_neuron_total_net_input[h] = d_error_wrt_hidden_neuron_output * \
                                                                 self.hidden_layers[-hl].neurons[
                                                                     h].calculate_pd_total_net_input_wrt_input()
            pd_errors_wrt_hidden_neuron_total_net_input_array[
                -hl] = pd_errors_wrt_hidden_neuron_total_net_input
            last_layer = self.hidden_layers[-hl]
            last_layer_pd_errors_wrt_neuron_total_net_input = pd_errors_wrt_hidden_neuron_total_net_input

        # 更新输出层 weight,bias
        for o in range(len(self.output_layer.neurons)):
            for w_ho in range(len(self.output_layer.neurons[o].weights)):
                # weight: ∂Eⱼ/∂wᵢⱼ = ∂E/∂oⱼ * ∂oⱼ/∂wᵢⱼ
                pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[
                    o] * self.output_layer.neurons[
                        o].calculate_pd_total_net_input_wrt_weight(w_ho)

                # weight: Δw = α * ∂Eⱼ/∂wᵢ
                self.output_layer.neurons[o].weights[
                    w_ho] -= self.LEARNING_RATE * pd_error_wrt_weight

            # bias: ∂Eⱼ/∂bᵢⱼ = ∂E/∂oⱼ * ∂oⱼ/∂bᵢⱼ = ∂E/∂oⱼ * 1
            pd_error_wrt_output_layer_bias = pd_errors_wrt_output_neuron_total_net_input[
                o]

            # bias: Δb = α * ∂Eⱼ/∂bᵢⱼ
            self.output_layer.bias -= self.LEARNING_RATE * pd_error_wrt_output_layer_bias

        # 更新隐含层 weight,bias
        pd_error_wrt_hidden_layer_bias = 0
        for hl in range(len(self.hidden_layers)):
            for h in range(len(self.hidden_layers[-hl].neurons)):
                for w_hh in range(
                        len(self.hidden_layers[-hl].neurons[h].weights)):
                    # weight: ∂Eⱼ/∂wᵢ = ∂E/∂hⱼ * ∂hⱼ/∂wᵢ
                    pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input_array[-hl][h] * \
                                          self.hidden_layers[-hl].neurons[
                                              h].calculate_pd_total_net_input_wrt_weight(w_hh)

                    # weight: Δw = α * ∂Eⱼ/∂wᵢ
                    self.hidden_layers[-hl].neurons[h].weights[
                        w_hh] -= self.LEARNING_RATE * pd_error_wrt_weight

                    # bias: ∂Eⱼ/∂bᵢⱼ = Σ ∂E/∂hⱼ * ∂hⱼ/∂bᵢⱼ * w = Σ ∂E/∂hⱼ * w
                    pd_error_wrt_hidden_layer_bias += pd_errors_wrt_hidden_neuron_total_net_input_array[
                        -hl][h] * self.hidden_layers[-hl].neurons[h].weights[
                            w_hh]

            # bias: Δb = α * ∂Eⱼ/∂bᵢⱼ
            self.hidden_layers[
                -hl].bias -= self.LEARNING_RATE * pd_error_wrt_hidden_layer_bias

    def calculate_total_error(self, training_sets):
        total_error = 0
        for t in range(len(training_sets)):
            training_inputs, training_outputs = training_sets[t]
            self.feed_forward(training_inputs)
            for o in range(len(training_outputs)):
                total_error += self.output_layer.neurons[o].calculate_error(
                    training_outputs[o])
        return total_error
예제 #10
0
def TestNeuronLayer(shape1, shape2):
    neuron_layer = NeuronLayer(shape1, True, False)
    if neuron_layer.get_shape() != shape1 or \
        neuron_layer.get_activations().shape != shape1 or \
        neuron_layer.get_biases().shape != shape1:
        return False

    neuron_layer2 = NeuronLayer(shape2, False, True)

    neuron_layer.set_output(neuron_layer2)
    neuron_layer2.set_input(neuron_layer)

    if neuron_layer2.get_weights(
    ).size != neuron_layer.get_num_neurons() * neuron_layer2.get_num_neurons():
        return False

    return True
예제 #11
0
 def test_get_activation(self):
     layer = NeuronLayer(n=4, ni=5)
     self.assertEqual(Sigmoid().__class__,
                      self.layer1.get_activation().__class__)
예제 #12
0
class NeuralNetwork:
    LEARNING_RATE = 0.5

    def __init__(self,
                 num_inputs,
                 num_hidden,
                 num_outputs,
                 hidden_layer_weights=None,
                 hidden_layer_bias=None,
                 output_layer_weights=None,
                 output_layer_bias=None):
        self.num_inputs = num_inputs

        self.hidden_layer = NeuronLayer(num_hidden, hidden_layer_bias)
        self.output_layer = NeuronLayer(num_outputs, output_layer_bias)

        self.init_weights_from_inputs_to_hidden_layer_neurons(
            hidden_layer_weights)
        self.init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            output_layer_weights)

    def init_weights_from_inputs_to_hidden_layer_neurons(
            self, hidden_layer_weights):
        weight_num = 0
        for h in range(len(self.hidden_layer.neurons)):
            for i in range(self.num_inputs):
                if not hidden_layer_weights:
                    self.hidden_layer.neurons[h].weights.append(
                        random.random())
                else:
                    self.hidden_layer.neurons[h].weights.append(
                        hidden_layer_weights[weight_num])
                weight_num += 1

    def init_weights_from_hidden_layer_neurons_to_output_layer_neurons(
            self, output_layer_weights):
        weight_num = 0
        for o in range(len(self.output_layer.neurons)):
            for h in range(len(self.hidden_layer.neurons)):
                if not output_layer_weights:
                    self.output_layer.neurons[o].weights.append(
                        random.random())
                else:
                    self.output_layer.neurons[o].weights.append(
                        output_layer_weights[weight_num])
                weight_num += 1

    def inspect(self):
        print('------')
        print('* Inputs: {}'.format(self.num_inputs))
        print('------')
        print('Hidden Layer')
        self.hidden_layer.inspect()
        print('------')
        print('* Output Layer')
        self.output_layer.inspect()
        print('------')

    def feed_forward(self, inputs):
        hidden_layer_outputs = self.hidden_layer.feed_forward(inputs)
        return self.output_layer.feed_forward(hidden_layer_outputs)

    # Uses online learning, ie updating the weights after each training case
    def train(self, training_inputs, training_outputs):
        self.feed_forward(training_inputs)

        pd_errors_wrt_output_neuron_total_net_input = [0] * len(
            self.output_layer.neurons)
        for o in range(len(self.output_layer.neurons)):
            pd_errors_wrt_output_neuron_total_net_input[
                o] = self.output_layer.neurons[
                    o].calculate_pd_error_wrt_total_net_input(
                        training_outputs[o])

        pd_errors_wrt_hidden_neuron_total_net_input = [0] * len(
            self.hidden_layer.neurons)
        for h in range(len(self.hidden_layer.neurons)):
            d_error_wrt_hidden_neuron_output = 0
            for o in range(len(self.output_layer.neurons)):
                d_error_wrt_hidden_neuron_output += pd_errors_wrt_output_neuron_total_net_input[
                    o] * self.output_layer.neurons[o].weights[h]
            pd_errors_wrt_hidden_neuron_total_net_input[
                h] = d_error_wrt_hidden_neuron_output * self.hidden_layer.neurons[
                    h].calculate_pd_total_net_input_wrt_input()

        for o in range(len(self.output_layer.neurons)):
            for w_ho in range(len(self.output_layer.neurons[o].weights)):
                pd_error_wrt_weight = pd_errors_wrt_output_neuron_total_net_input[
                    o] * self.output_layer.neurons[
                        o].calculate_pd_total_net_input_wrt_weight(w_ho)
                self.output_layer.neurons[o].weights[
                    w_ho] -= self.LEARNING_RATE * pd_error_wrt_weight

        for h in range(len(self.hidden_layer.neurons)):
            for w_ih in range(len(self.hidden_layer.neurons[h].weights)):
                pd_error_wrt_weight = pd_errors_wrt_hidden_neuron_total_net_input[
                    h] * self.hidden_layer.neurons[
                        h].calculate_pd_total_net_input_wrt_weight(w_ih)
                self.hidden_layer.neurons[h].weights[
                    w_ih] -= self.LEARNING_RATE * pd_error_wrt_weight

    def calculate_total_error(self, training_sets):
        total_error = 0
        for t in range(len(training_sets)):
            training_inputs, training_outputs = training_sets[t]
            self.feed_forward(training_inputs)
            for o in range(len(training_outputs)):
                total_error += self.output_layer.neurons[o].calculate_error(
                    training_outputs[o])
        return total_error
예제 #13
0
 def test_set_prev_layer(self):
     layer1 = NeuronLayer(n=6, ni=4)
     layer2 = NeuronLayer(n=4, ni=5)
     self.assertIsNone(self.layer2.prev_layer)
     self.layer2.set_prev_layer(self.layer1)
     self.assertEqual(self.layer1, self.layer2.prev_layer)
예제 #14
0
파일: main.py 프로젝트: BGS/school
from NeuralNet import NeuralNetwork
from NeuronLayer import NeuronLayer
import cProfile
import os

if __name__ == "__main__":
    l1 = NeuronLayer((28, 28), True, False)
    l2 = NeuronLayer((100, ))
    l3 = NeuronLayer((10, ), False, True)
    network = NeuralNetwork()
    network.add_layer(l1)
    network.add_layer(l2)
    network.add_layer(l3)
    network.connect_layers()
    pr = cProfile.Profile()
    pr.enable()
    network.load_data(os.path.abspath("data/train-images.idx3-ubyte"),
                      os.path.abspath("data/train-labels.idx1-ubyte"))
    network.load_test_data(os.path.abspath("data/t10k-images.idx3-ubyte"),
                           os.path.abspath("data/t10k-labels.idx1-ubyte"))
    network.SGD(0.1, 0.1, 30, 10)
    pr.disable()
    pr.print_stats(sort="cumtime")
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

n1 = Neuron([-0.5,0.5],Sigmoid().activate,bias=1.5)

l1 = NeuronLayer([n1])

ntwrk = NeuronNetwork([l1],1)

x = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[0],[0],[1]]

ntwrk.train(x,y,1000,0.0000001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x,y))

print("Should be as close as possible to high (1)")
print("[1,1] gives:")
print(ntwrk.feed_forward([1,1]))
print("Should be as close as possible to low (0)")
print("[0,1] gives:")
print(ntwrk.feed_forward([0,1]))
print("[1,0] gives:")
print(ntwrk.feed_forward([1,0]))
print("[0,0] gives:")
print(ntwrk.feed_forward([0,0]))
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

n1 = Neuron([0.0, 0.1], Sigmoid().activate)
n2 = Neuron([0.2, 0.3], Sigmoid().activate)
n3 = Neuron([0.4, 0.5], Sigmoid().activate)

n4 = Neuron([0.6, 0.7, 0.8], Sigmoid().activate)
n5 = Neuron([0.9, 1.0, 1.1], Sigmoid().activate)

l1 = NeuronLayer([n1, n2, n3])
l2 = NeuronLayer([n4, n5])

ntwrk = NeuronNetwork([l1, l2], 0.5)

x = [[0, 0], [1, 0], [0, 1], [1, 1]]
y = [[0, 0], [1, 0], [1, 0], [0, 1]]

ntwrk.train(x, y, 80000, 0.001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x, y))

print("Output should be close to [0,0]")
print(ntwrk.feed_forward([0, 0]))
print("Output should be close to [1,0]")
print(ntwrk.feed_forward([1, 0]))
print(ntwrk.feed_forward([0, 1]))
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

# Layer 1
n1 = Neuron([0.2,-0.4],Sigmoid().activate)
n2 = Neuron([0.7,0.1],Sigmoid().activate)
# Layer 2
n3 = Neuron([0.6,0.9],Sigmoid().activate)

l1 = NeuronLayer([n1,n2])
l2 = NeuronLayer([n3])

ntwrk = NeuronNetwork([l1,l2],1)

x = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[1],[1],[0]]

ntwrk.train(x,y,40000,0.001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x,y))

print("Should be as close as possible to high (1)")
print("[0,1] gives:")
print(ntwrk.feed_forward([0,1]))
print("[1,0] gives:")
print(ntwrk.feed_forward([1,0]))
print("Should be as close as possible to low (0)")
예제 #18
0
class TestNeuronLayer(TestCase):
    def setUp(self):
        self.layer1 = NeuronLayer(n=4, ni=5)
        self.layer2 = NeuronLayer(n=6, ni=4)

    def test_set_next_layer(self):
        self.assertIsNone(self.layer1.next_layer)
        self.layer1.set_next_layer(self.layer2)
        self.assertEqual(self.layer2, self.layer1.next_layer)

    def test_set_prev_layer(self):
        layer1 = NeuronLayer(n=6, ni=4)
        layer2 = NeuronLayer(n=4, ni=5)
        self.assertIsNone(self.layer2.prev_layer)
        self.layer2.set_prev_layer(self.layer1)
        self.assertEqual(self.layer1, self.layer2.prev_layer)

    def test_get_outputs(self):
        o = self.layer1.get_outputs()
        self.assertEqual(4, len(o))
        self.assertTrue(np.issubdtype(o.dtype, np.number))

    def test_get_deltas(self):
        d = self.layer1.get_deltas()
        self.assertEqual(4, len(d))
        self.assertTrue(np.issubdtype(d.dtype, np.number))

    def test_get_weights(self):
        w = self.layer1.get_weights()
        self.assertTrue(np.issubdtype(w.dtype, np.number))
        self.assertEqual(4, len(w))
        self.assertEqual(5, len(w[0]))
        self.assertEqual(5, len(w[1]))
        self.assertEqual(5, len(w[2]))
        self.assertEqual(5, len(w[3]))

    def test_set_learning_rate(self):
        self.assertEqual(0.5, self.layer1.neurons[0].lr)
        self.assertEqual(0.5, self.layer1.neurons[1].lr)
        self.assertEqual(0.5, self.layer1.neurons[2].lr)
        self.assertEqual(0.5, self.layer1.neurons[3].lr)
        self.layer1.set_learning_rate(0.2)
        self.assertEqual(0.2, self.layer1.neurons[0].lr)
        self.assertEqual(0.2, self.layer1.neurons[1].lr)
        self.assertEqual(0.2, self.layer1.neurons[2].lr)
        self.assertEqual(0.2, self.layer1.neurons[3].lr)

    def test_set_activation(self):
        self.assertEqual(Sigmoid().__class__,
                         self.layer1.get_activation().__class__)
        self.layer1.set_activation(Tanh())
        self.assertEqual(Tanh().__class__,
                         self.layer1.get_activation().__class__)

    def test_load_weights(self):
        wi = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [10, 20, 30, 40, 50],
              [50, 60, 70, 80, 90]]
        self.layer1.load_weights(wi)
        w = self.layer1.get_weights()
        self.assertTrue(np.issubdtype(w.dtype, np.number))
        self.assertEqual(4, len(w))
        self.assertTrue(np.allclose([1, 2, 3, 4, 5], w[0]))
        self.assertTrue(np.allclose([6, 7, 8, 9, 10], w[1]))
        self.assertTrue(np.allclose([10, 20, 30, 40, 50], w[2]))
        self.assertTrue(np.allclose([50, 60, 70, 80, 90], w[3]))

    def test_get_activation(self):
        layer = NeuronLayer(n=4, ni=5)
        self.assertEqual(Sigmoid().__class__,
                         self.layer1.get_activation().__class__)
예제 #19
0
 def setUp(self):
     self.layer1 = NeuronLayer(n=4, ni=5)
     self.layer2 = NeuronLayer(n=6, ni=4)
예제 #20
0
from numpy import array, random
import sys
from NeuralNetwork import NeuralNetwork
from NeuronLayer import NeuronLayer

if __name__ == "__main__":
    FirstLayer = NeuronLayer(N, I)  #primeira camada(N neuronios, I entradas)
    SecondLayer = NeuronLayer(N, I)  #segunda camada(N neuronio, I entradas)
    neural_network = NeuralNetwork(FirstLayer, SecondLayer)
    ArrIn, ArrOut = [[]], [[]]
    for i in range(len(ArrIn)):
        sys.stdout.write(str(neural_network.weights()) + '\n')
        tinputs = array(ArrIn)  #array de entradas
        toutputs = array(ArrOut).T  #array de saída
        neural_network.train(
            tinputs, toutputs, n
        )  #substitir n para treinar a rede, usando o conjunto de treinamento, n vezes.
        sys.stdout.write(str(neural_network.weights()))
        hidden_state, output = neural_network.Fit(
            array())  #array de teste de rede
        sys.stdout.write('\n' + str(output))