def testCorrectBackwardPassNonVec(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, [3, 3, 3], learningRate=0.5, momentum=0) expOutVector = MagicMock(spec=np.ndarray) outputVectors = [ np.array([2, 2, 2, 2]), np.array([2, 2, 2, 2]), np.array([2, 2, 2]) ] with patch( 'mlpBp.mlp.MultiLayerPerceptronNetwork._calcDerivErrorSigmoidOutput', lambda _, __: 1): with patch( 'mlpBp.mlp.MultiLayerPerceptronNetwork._calcDerivErrorSigmoidHiddenNonVec', lambda _, __, ___: 2): with patch( 'mlpBp.mlp.MultiLayerPerceptronNetwork._forwardWeightsUpdateNonVec', lambda _, __: _): testMLP._backwardPassNonVec(outputVectors, expOutVector) for layerId in range(len(testMLP.networkDeltas)): if layerId == len(testMLP.networkDeltas) - 1: self.assertTrue( np.alltrue( testMLP.networkDeltas[layerId] == -2)) else: self.assertTrue( np.alltrue( testMLP.networkDeltas[layerId] == -4))
def testAcceptsCorrectOutputVector(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, TEST_NETWORK_TOPO, 0, 0) try: outputVector = np.array([1] * TEST_NETWORK_TOPO[-1], dtype='float64') testMLP._testOutputVecFormat(outputVector) except Exception: self.fail("Raised exception although output is valid")
def testLearningRateDecay(self): testRandom = TestSeqRandom() testMLP = MultiLayerPerceptronNetwork(testRandom, neuronsPerLayer=[3, 4, 3], learningRate=0.5, momentum=0.6, decayLearningRateFlag=True) testMLP.epoch = 90 adjustedRate = testMLP.learningRate self.assertEqual(adjustedRate, gaussianDecay(0.5, 90))
def testCorrectForwardPass(self): tests = [([3, 3, 3], [13, 13, 13]), ([4, 1], [5])] for (layout, expOut) in tests: with self.subTest(test=(layout, expOut)): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, layout, 0, 0) inputVec = np.ones(layout[0], dtype='float64') with patch('mlpBp.mlp.sigmoid', lambda x: x) as _: outputs = testMLP._forwardPass(inputVec) expArray = np.array(expOut, dtype='float64') self.assertTrue(np.array_equal(expArray, outputs[-1]))
def testCorrectNormalisation(self): vec = np.array([150, 150, 150], dtype='float64') actVec = MultiLayerPerceptronNetwork._normaliseVector( vec, (np.array([50, 0, 50], dtype='float64'), np.array([450, 400, 450], dtype='float64'))) expVec = np.array([0.25, 0.375, 0.25], dtype='float64') self.assertTrue(np.array_equal(actVec, expVec))
def testCorrectForwardWeightsUpdateNonVec(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, [3, 3, 3], learningRate=0.5, momentum=0) outputVectors = MagicMock(spec=list) with patch( 'mlpBp.mlp.MultiLayerPerceptronNetwork._calcDerivErrorWeightNonVec', lambda _, __: 1): testMLP._forwardWeightsUpdateNonVec(outputVectors) for layerId in range(len(testMLP.networkWeights)): self.assertTrue( np.alltrue(testMLP.networkWeights[layerId] == 0.5)) self.assertTrue( np.alltrue(testMLP.tMinus1NetworkWeights[layerId] == 1))
def testCorrectInitializedWeights(self): testRandom = TestSeqRandom() testMLP = MultiLayerPerceptronNetwork(testRandom, TEST_NETWORK_TOPO, 0, 0) for layer in testMLP.networkWeights: expect = 1 for x in layer: for y in x: self.assertEqual(expect, y) expect += 1
def testCorrectErrorRate(self): tests = [([1, 0, 2], [1, 0, 0], 2), ([1, 0, 1], [1, 0, 1], 0), ([1, 1], [0, 0], 1)] for (actual, target, expErr) in tests: with self.subTest(test=(actual, target, expErr)): out1 = np.array(actual, dtype='float64') out2 = np.array(target, dtype='float64') err = MultiLayerPerceptronNetwork._calcErrorRate(out1, out2) self.assertEqual(err, expErr)
def testCorrectDeltaDimentions(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, TEST_NETWORK_TOPO, 0, 0) self.assertEqual( len(TEST_NETWORK_TOPO) - 1, len(testMLP.networkDeltas)) for id, layer in enumerate(testMLP.networkDeltas): self.assertEqual(1, layer.ndim) self.assertEqual(layer.size, TEST_NETWORK_TOPO[id + 1])
def testCorrectLayerDimentions(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, TEST_NETWORK_TOPO, 0, 0) self.assertEqual( len(TEST_NETWORK_TOPO) - 1, len(testMLP.networkWeights)) for id, layer in enumerate(testMLP.networkWeights): (noWeights, noNeurons) = layer.shape self.assertEqual(noWeights, TEST_NETWORK_TOPO[id] + 1) self.assertEqual(noNeurons, TEST_NETWORK_TOPO[id + 1])
def testCorrectCalcOutcomeWinnerTakeAll(self): tests = [([1, 0, 2], [1, 0, 0], 0), ([0.98, 0, 0.95], [1, 0, 0], 1), ([0, 0], [0, 0], 1)] for (actual, target, expOutcome) in tests: with self.subTest(test=(actual, target, expOutcome)): out1 = np.array(actual, dtype='float64') out2 = np.array(target, dtype='float64') out = MultiLayerPerceptronNetwork._calcOutcomesWinnerTakeAll( out1, out2) self.assertEqual(out, expOutcome)
def testForwardWeightUpdateNonVecEqualsVecVersion(self, out1, out2, out3, delta1, delta2): testRandom = TestSeqRandom() testVecMLP = MultiLayerPerceptronNetwork(testRandom, [3, 4, 3], 0.5, 0.6) testNonVecMLP = MultiLayerPerceptronNetwork(testRandom, [3, 4, 3], 0.5, 0.6) prevWeights = copy.deepcopy(testVecMLP.networkWeights) testVecMLP.networkDeltas = [delta1, delta2] testNonVecMLP.networkDeltas = [delta1, delta2] outputVectors = [out1, out2, out3] testNonVecMLP._forwardWeightsUpdateNonVec(outputVectors) testVecMLP._forwardWeightsUpdateVec(outputVectors) for i in range(len(testVecMLP.networkWeights)): assert_array_equal(testVecMLP.networkWeights[i], testNonVecMLP.networkWeights[i]) assert_raises(AssertionError, assert_array_equal, testVecMLP.networkWeights[i], prevWeights[i])
def testCorrectCalcOutcomeRoundEach(self): tests = [([1, 0, 2], [1, 0, 0], 2), ([1, 0, 1], [1, 0, 1], 3), ([1, 1], [0, 0], 0)] for (actual, target, expOutcome) in tests: with self.subTest(test=(actual, target, expOutcome)): out1 = np.array(actual, dtype='float64') out2 = np.array(target, dtype='float64') out = MultiLayerPerceptronNetwork._calcOutcomesRoundEach( out1, out2) self.assertEqual(out, expOutcome)
def testRaisesInvalidOutputVector(self): testRandom = TestConstRandom(1) testMLP = MultiLayerPerceptronNetwork(testRandom, TEST_NETWORK_TOPO, 0, 0) outVector = np.array([1] * (TEST_NETWORK_TOPO[-1] + 1), dtype='float64') self.assertRaisesRegex(Exception, "Invalid Output vector format", testMLP._testOutputVecFormat, outVector) outVector = 5 self.assertRaisesRegex(Exception, "Invalid Output vector format", testMLP._testOutputVecFormat, outVector)
def testCorrectDerivErrorSigmoidHiddenNonVec(self): tests = [([2, 2, 2], [[1, 1, 1]], 0, 6), ([3, 3, 3], [[0, 0, 0], [2, 2, 2]], 1, 18)] for (deltas, weights, curNode, expOut) in tests: with self.subTest(test=(deltas, weights, expOut)): nextLayerDeltas = np.array(deltas, dtype='float64') nextLayerWeights = np.array(weights, dtype='float64') result = MultiLayerPerceptronNetwork._calcDerivErrorSigmoidHiddenNonVec( curNode, nextLayerWeights, nextLayerDeltas) self.assertEqual(result, expOut)
def testBackwardPassNonVecEqualsVecVersion(self, out1, out2, out3, expOut): testRandom = TestSeqRandom() testVecMLP = MultiLayerPerceptronNetwork(testRandom, [3, 4, 3], 0.5, 0.6) testNonVecMLP = MultiLayerPerceptronNetwork(testRandom, [3, 4, 3], 0.5, 0.6) outputVectors = [out1, out2, out3] testNonVecMLP._backwardPassNonVec(outputVectors, expOut) testVecMLP._backwardPassVec(outputVectors, expOut) for i in range(len(testVecMLP.networkWeights)): assert_array_equal(testVecMLP.networkWeights[i], testNonVecMLP.networkWeights[i])
def testCorrectLayerOutputCalculation(self): inputVector = np.array([1, 1], dtype='float64') layerWeights = np.ones((3, 2), dtype='float64') (newInput, output) = MultiLayerPerceptronNetwork._calcLayerOutput( inputVector, layerWeights) expectInput = sigmoid(np.array([3, 3], dtype='float64')) self.assertTrue(np.array_equal(expectInput, output)) self.assertTrue( np.array_equal(newInput, np.array([1] * 3, dtype='float64')))
from mlpBp.parser import readParams, parseParams from mlpBp.mlp import MultiLayerPerceptronNetwork, NumpyRandom from sys import argv params = parseParams(readParams(argv[1])) rndGen = NumpyRandom() mlp = MultiLayerPerceptronNetwork(rndGen, params["layout"], params["learningRate"], params["momentum"], params["verbose"], params["outcomeCalcMethod"], params["decayLearningRate"]) mlp.trainOnDataSet(params["trainSet"], params["testSet"], params["maxIterations"], params["minErrorRate"]) # mlp.evaluate(params["testSet"])