Example #1
0
 def test_compute_delta(self):
     dblActivation = random.random()
     dblError = 2.0 * random.random() - 1.0
     dblDelta = nn.compute_delta(dblActivation, dblError)
     dblActivationPrime = sigmoid_prime_from_sigmoid(dblActivation)
     self.assertAlmostEqual(dblError, dblDelta / dblActivationPrime)
     self.assertAlmostEqual(dblActivationPrime, dblDelta / dblError)
Example #2
0
 def test_compute_delta(self):
     dblActivation = random.random()
     dblError = 2.0*random.random() - 1.0
     dblDelta = nn.compute_delta(dblActivation, dblError)
     dblActivationPrime = sigmoid_prime_from_sigmoid(dblActivation)
     self.assertAlmostEqual(dblError, dblDelta/dblActivationPrime)
     self.assertAlmostEqual(dblActivationPrime, dblDelta/dblError)
Example #3
0
 def test_update_pcpt(self):
     dblLearningRate = random.random() + 0.5
     cWeight = random.randint(10, 20)
     listDblInput = randlist(-0.5, 0.5, cWeight)
     listDblW = randlist(-0.5, 0.5, cWeight)
     dblW0 = random.random() - 0.5
     dblActivation = random.random() * 0.5 + 0.25
     dblError = random.random() - 0.5
     dblDelta = nn.compute_delta(dblActivation, dblError)
     ixPcpt = random.randint(0, 100)
     pcpt = nn.Perceptron(list(listDblW), dblW0, ixPcpt)
     nn.update_pcpt(pcpt, listDblInput, dblDelta, dblLearningRate)
     self.assertEqual(len(listDblW), len(pcpt.listDblW))
     dblProductBase = dblLearningRate * dblActivation * dblError
     for dblW, dblIn, dblWOrig in zip(pcpt.listDblW, listDblInput, listDblW):
         dblProduct = dblProductBase * dblIn
         dblExpected = dblProduct - (dblW - dblWOrig)
         self.assertAlmostEqual(dblProduct * dblActivation, dblExpected)
     self.assertAlmostEqual(dblProductBase * dblActivation, dblProductBase - (pcpt.dblW0 - dblW0))
Example #4
0
 def test_update_pcpt(self):
     dblLearningRate = random.random() + 0.5
     cWeight = random.randint(10,20)
     listDblInput = randlist(-0.5,0.5,cWeight)
     listDblW = randlist(-0.5,0.5,cWeight)
     dblW0 = random.random() - 0.5
     dblActivation = random.random()*0.5 + 0.25
     dblError = random.random() - 0.5
     dblDelta = nn.compute_delta(dblActivation,dblError)
     ixPcpt = random.randint(0,100)
     pcpt = nn.Perceptron(list(listDblW), dblW0, ixPcpt)
     nn.update_pcpt(pcpt, listDblInput, dblDelta,  dblLearningRate)
     self.assertEqual(len(listDblW), len(pcpt.listDblW))
     dblProductBase = dblLearningRate*dblActivation*dblError
     for dblW,dblIn,dblWOrig in zip(pcpt.listDblW,listDblInput,listDblW):
         dblProduct = dblProductBase*dblIn
         dblExpected = dblProduct - (dblW - dblWOrig)
         self.assertAlmostEqual(dblProduct*dblActivation, dblExpected)
     self.assertAlmostEqual(dblProductBase*dblActivation,
                            dblProductBase - (pcpt.dblW0 - dblW0))