Exemple #1
0
    def initializeNN(self):
        for i in range(0, self.noInput):
            w = []
            wb = []
            if len(self.inputWeights) != 0 and len(self.inputBiasWeights) != 0:
                w = [self.inputWeights[i]]
                wb = self.inputBiasWeights[i]
            self.input[i] = (Neuron.Neuron(af.NoActivationFunction(), 1, w,
                                           wb))
        for i in range(0, self.noHiddenLayers):
            layer = [None] * self.noHidden

            for j in range(0, self.noHidden):
                # Find weights for current neuron
                w = []
                wb = []
                if len(self.hiddenWeights) != 0 and len(
                        self.hiddenBiasWeights) != 0:
                    if i == 0:
                        w = self.hiddenWeights[:self.noInput]
                    else:
                        start = self.noInput + (i - 1) * self.noHidden
                        end = self.noInput + i * self.noHidden
                        w = self.hiddenWeights[start:end]
                    wb = self.hiddenBiasWeights[i * self.noHidden + j]
                else:
                    # No weights present =>  initialise random neuron
                    pass
                # Add new neuron
                if i == 0:
                    layer[j] = (Neuron.Neuron(af.Logistic(), self.noInput, w,
                                              wb))
                else:
                    layer[j] = (Neuron.Neuron(af.Logistic(), self.noHidden, w,
                                              wb))

            self.hidden[i] = layer

        for i in range(0, self.noOutput):
            w = []
            wb = []
            if len(self.outputWeights) != 0 and len(
                    self.outputBiasWeights) != 0:
                w = self.outputWeights[i * self.noHidden:(i + 1) *
                                       self.noHidden]
                wb = self.outputBiasWeights[i]
            self.output[i] = (Neuron.Neuron(af.Logistic(), self.noHidden, w,
                                            wb))
Exemple #2
0
    def initializeNN(self):
        for i in range(0, self.noInput):
            w = None
            wb = None
            if self.inputWeights != None and self.inputBiasWeights != None:
                w = [self.inputWeights[i]]
                wb = self.inputBiasWeights[i]

            # self.input.append(Neuron(af.NoActivationFunction(), self.noInput))
            self.input.append(Neuron(af.NoActivationFunction(), 1, w, wb))
        for i in range(0, self.noHiddenLayers):
            layer = []

            for j in range(0, self.noHidden):
                # Find weights for current neuron
                w = None
                wb = None
                if self.hiddenWeights != None and self.hiddenBiasWeights != None:
                    if i == 0:
                        w = self.hiddenWeights[:self.noInput]
                        wb = self.hiddenBiasWeights[j]
                    else:
                        w = self.hiddenWeights[self.noInput + (i - 1) *
                                               self.noHidden:self.noInput +
                                               i * self.noHidden]
                        wb = self.hiddenBiasWeights[i * self.noHidden + j]
                else:
                    # No weights present =>  initialise random neuron
                    pass
                # Append new neuron
                if i == 0:
                    layer.append(Neuron(af.Logistic(), self.noInput, w, wb))
                else:
                    layer.append(Neuron(af.Logistic(), self.noHidden, w, wb))

            self.hidden.append(layer)

        for i in range(0, self.noOutput):
            w = None
            wb = None
            if self.outputWeights != None and self.outputBiasWeights != None:
                w = self.outputWeights
                wb = self.outputBiasWeights[i]
            self.output.append(Neuron(af.Logistic(), self.noHidden, w, wb))
Exemple #3
0
 def testScaledExponentialLinear(self, x, y):
     self.assertAlmostEqual(
         ActivationFunctions.scaledExponentialLinear(x), y, 2)
Exemple #4
0
 def testLeakyRectifiedLinear(self, x, y):
     self.assertEqual(ActivationFunctions.leakyRectifiedLinear(x), y)
Exemple #5
0
 def testParametricRectifiedLinear(self, x, y):
     self.assertEqual(ActivationFunctions.parametricRectifiedLinear(x), y)
Exemple #6
0
 def testInverseSquareRoot(self, x, y):
     self.assertAlmostEqual(ActivationFunctions.inverseSquareRoot(x), y, 2)
Exemple #7
0
 def testBipolarRectifiedLinear(self, x, y):
     self.assertEqual(ActivationFunctions.bipolarRectifiedLinear(x), y)
Exemple #8
0
 def testArcSinH(self, x, y):
     self.assertAlmostEqual(ActivationFunctions.arcSinH(x), y, 2)
Exemple #9
0
 def testElliotSig(self, x, y):
     self.assertAlmostEqual(ActivationFunctions.elliotSig(x), y, 2)
Exemple #10
0
 def testSQNL(self, x, y):
     self.assertEqual(ActivationFunctions.sqnl(x), y)
Exemple #11
0
 def testTanh(self, x, y):
     self.assertAlmostEqual(ActivationFunctions.tanH(x), y, 2)
Exemple #12
0
 def testSigmoid(self, x, y):
     self.assertAlmostEqual(ActivationFunctions.sigmoid(x), y, 2)
Exemple #13
0
 def testBinaryStep(self, x, y):
     self.assertEqual(ActivationFunctions.binaryStep(x), y)
Exemple #14
0
 def testIdentity(self, x, y):
     self.assertEqual(ActivationFunctions.identity(x), y)