Example #1
0
    def initializeNN(self):
        for i in range(0, self.noInput):
            w = []
            wb = []
            if len(self.inputWeights) != 0 and len(self.inputBiasWeights) != 0:
                w = [self.inputWeights[i]]
                wb = self.inputBiasWeights[i]
            self.input[i] = (Neuron.Neuron(af.NoActivationFunction(), 1, w,
                                           wb))
        for i in range(0, self.noHiddenLayers):
            layer = [None] * self.noHidden

            for j in range(0, self.noHidden):
                # Find weights for current neuron
                w = []
                wb = []
                if len(self.hiddenWeights) != 0 and len(
                        self.hiddenBiasWeights) != 0:
                    if i == 0:
                        w = self.hiddenWeights[:self.noInput]
                    else:
                        start = self.noInput + (i - 1) * self.noHidden
                        end = self.noInput + i * self.noHidden
                        w = self.hiddenWeights[start:end]
                    wb = self.hiddenBiasWeights[i * self.noHidden + j]
                else:
                    # No weights present =>  initialise random neuron
                    pass
                # Add new neuron
                if i == 0:
                    layer[j] = (Neuron.Neuron(af.Logistic(), self.noInput, w,
                                              wb))
                else:
                    layer[j] = (Neuron.Neuron(af.Logistic(), self.noHidden, w,
                                              wb))

            self.hidden[i] = layer

        for i in range(0, self.noOutput):
            w = []
            wb = []
            if len(self.outputWeights) != 0 and len(
                    self.outputBiasWeights) != 0:
                w = self.outputWeights[i * self.noHidden:(i + 1) *
                                       self.noHidden]
                wb = self.outputBiasWeights[i]
            self.output[i] = (Neuron.Neuron(af.Logistic(), self.noHidden, w,
                                            wb))
Example #2
0
    def initializeNN(self):
        for i in range(0, self.noInput):
            w = None
            wb = None
            if self.inputWeights != None and self.inputBiasWeights != None:
                w = [self.inputWeights[i]]
                wb = self.inputBiasWeights[i]

            # self.input.append(Neuron(af.NoActivationFunction(), self.noInput))
            self.input.append(Neuron(af.NoActivationFunction(), 1, w, wb))
        for i in range(0, self.noHiddenLayers):
            layer = []

            for j in range(0, self.noHidden):
                # Find weights for current neuron
                w = None
                wb = None
                if self.hiddenWeights != None and self.hiddenBiasWeights != None:
                    if i == 0:
                        w = self.hiddenWeights[:self.noInput]
                        wb = self.hiddenBiasWeights[j]
                    else:
                        w = self.hiddenWeights[self.noInput + (i - 1) *
                                               self.noHidden:self.noInput +
                                               i * self.noHidden]
                        wb = self.hiddenBiasWeights[i * self.noHidden + j]
                else:
                    # No weights present =>  initialise random neuron
                    pass
                # Append new neuron
                if i == 0:
                    layer.append(Neuron(af.Logistic(), self.noInput, w, wb))
                else:
                    layer.append(Neuron(af.Logistic(), self.noHidden, w, wb))

            self.hidden.append(layer)

        for i in range(0, self.noOutput):
            w = None
            wb = None
            if self.outputWeights != None and self.outputBiasWeights != None:
                w = self.outputWeights
                wb = self.outputBiasWeights[i]
            self.output.append(Neuron(af.Logistic(), self.noHidden, w, wb))