def test_teach_acceptable_error(self): backpropagator = Backpropagator() normalizer = Normalizer(in_min = -15, in_max = 15, out_min = -30, out_max = 30, norm_min = -2, norm_max = 2) network = FeedForwardNN(normalizer, [1, 3, 1]) network.randomize_connection_weights(seed = 74) expectations = [Expectation([i], [2*i]) for i in range(-5, 5)] result = backpropagator.teach(network, expectations, learning_rate = 1.5, max_iterations = 2000, acceptable_error = .5) self.assertLessEqual(result.error, .5) errors = 0 for exp in expectations: errors += backpropagator.calculate_error( network.receive_inputs(exp.inputs), exp.outputs) self.assertLessEqual(errors, .5)
def test_propagate_errors(self): backpropagator = Backpropagator() normalizer = Normalizer(in_max = 100, out_max = 200) network = FeedForwardNN(normalizer, [1, 2, 1]) expectation = Expectation([50], [148]) result = network.receive_inputs(expectation.inputs) backpropagator.learn(network, expectation, 1)
def test_input_passes_through_network(self): normalizer = Normalizer(in_max=10) network = FeedForwardNN(normalizer, [2, 4, 5, 3]) network.receive_inputs([5.0, 7.1]) for output in network.output_layer: self.assertNotAlmostEqual(output.output, 0.0) sentSignals = [ connection.signal_sent for layer in network.layers for node in layer for connection in node.out_connections ] self.assertTrue(s != 0.0 for s in sentSignals)
def test_calculation(self): normalizer = Normalizer(in_min=1, in_max=15, out_min=-1, out_max=1, norm_min=-3, norm_max=3) network = FeedForwardNN(normalizer, [1, 2, 2, 1]) neurons = network.neurons outputs = network.receive_inputs([1]) self.assertEqual(len(outputs), 1) # Nodes 1, 4, and 7 are bias nodes self.assertAlmostEqual(neurons[0].output, 0.04742587318) self.assertAlmostEqual(neurons[1].output, 1) self.assertAlmostEqual(neurons[2].output, 0.7402802899) self.assertAlmostEqual(neurons[3].output, 0.7402802899) self.assertAlmostEqual(neurons[4].output, 1) self.assertAlmostEqual(neurons[5].output, 0.9227677584) self.assertAlmostEqual(neurons[6].output, 0.9227677584) self.assertAlmostEqual(neurons[7].output, 1) self.assertAlmostEqual(neurons[8].output, 0.9450874486) self.assertAlmostEqual(outputs[0], 0.8901748973)
def test_teach_max_iterations(self): backpropagator = Backpropagator() normalizer = Normalizer(in_min = -15, in_max = 15, out_min = -30, out_max = 30, norm_min = -2, norm_max = 2) network = FeedForwardNN(normalizer, [1, 2, 2, 1]) network.randomize_connection_weights(seed = 74) expectations = [Expectation([i], [2*i]) for i in range(-5, 5)] result = backpropagator.teach(network, expectations, learning_rate = 1.5, max_iterations = 123, acceptable_error = 0) self.assertEqual(result.epochs, 123)
def learn_function(fn, in_min, in_max): expectations = [Expectation([i], fn(i)) for i in np.linspace(in_min, in_max + 1, num=100)] in_range = in_max - in_min in_min = in_min - 0.1 * in_range in_max = in_max + 0.1 * in_range out_min = None out_max = None for e in expectations: if out_min == None or e.outputs[0] < out_min: out_min = e.outputs[0] if out_max == None or e.outputs[0] > out_max: out_max = e.outputs[0] out_range = out_max - out_min out_min = out_min - 0.1 * out_range out_max = out_max + 0.1 * out_range backpropagator = Backpropagator() normalizer = Normalizer(in_min=in_min, in_max=in_max, out_min=out_min, out_max=out_max, norm_min=-10, norm_max=10) network = FeedForwardNN(normalizer, [1, 3, 3, 1], is_regression=True) network.randomize_connection_weights(min=-1, max=1) show_fit_tracker(network, expectations) backpropagator.teach( network, expectations, learning_rate=0.008, max_iterations=30000, acceptable_error=1, callback_func=update_fit_plot, ) return network
def test_learn(self): backpropagator = Backpropagator() normalizer = Normalizer(in_min = 1, in_max = 15, out_min = -1, out_max = 1, norm_min = -3, norm_max = 3) network = FeedForwardNN(normalizer, [1, 2, 2, 1]) network.randomize_connection_weights(seed = 74) neurons = network.neurons expectation = Expectation([1], [0.8415]) error = backpropagator.calculate_error( network.receive_inputs(expectation.inputs), expectation.outputs) for i in range(20): last_error = error backpropagator.learn(network, expectation, 1.5) actual = network.receive_inputs(expectation.inputs) print(actual) error = backpropagator.calculate_error(actual, expectation.outputs) self.assertLess(error, last_error)