def test_input_passes_through_network(self):
        normalizer = Normalizer(in_max=10)
        network = FeedForwardNN(normalizer, [2, 4, 5, 3])

        network.receive_inputs([5.0, 7.1])

        for output in network.output_layer:
            self.assertNotAlmostEqual(output.output, 0.0)

        sentSignals = [
            connection.signal_sent for layer in network.layers for node in layer for connection in node.out_connections
        ]

        self.assertTrue(s != 0.0 for s in sentSignals)
    def test_teach_acceptable_error(self):
        backpropagator = Backpropagator()
        normalizer = Normalizer(in_min = -15, in_max = 15,
                                out_min = -30, out_max = 30,
                                norm_min = -2, norm_max = 2)
        
        network = FeedForwardNN(normalizer, [1, 3, 1])
        
        network.randomize_connection_weights(seed = 74)

        expectations = [Expectation([i], [2*i])
                        for i in range(-5, 5)]

        result = backpropagator.teach(network,
                                      expectations, 
                                      learning_rate = 1.5,
                                      max_iterations = 2000,
                                      acceptable_error = .5)
        
        self.assertLessEqual(result.error, .5)

        errors = 0
        for exp in expectations:
            errors += backpropagator.calculate_error(
                network.receive_inputs(exp.inputs), exp.outputs)

        self.assertLessEqual(errors, .5)
    def test_propagate_errors(self):
        backpropagator = Backpropagator()
        normalizer = Normalizer(in_max = 100, out_max = 200)
        network = FeedForwardNN(normalizer, [1, 2, 1])
        expectation = Expectation([50], [148])
        result = network.receive_inputs(expectation.inputs)

        backpropagator.learn(network, expectation, 1)
    def test_learn(self):
        backpropagator = Backpropagator()
        normalizer = Normalizer(in_min = 1, in_max = 15,
                                out_min = -1, out_max = 1,
                                norm_min = -3, norm_max = 3)
        
        network = FeedForwardNN(normalizer, [1, 2, 2, 1])
        network.randomize_connection_weights(seed = 74)
        neurons = network.neurons        
        expectation = Expectation([1], [0.8415])

        error = backpropagator.calculate_error(
            network.receive_inputs(expectation.inputs),
            expectation.outputs)

        for i in range(20):
            last_error = error
            backpropagator.learn(network, expectation, 1.5)
            actual = network.receive_inputs(expectation.inputs)
            print(actual)
            error = backpropagator.calculate_error(actual, expectation.outputs)
            self.assertLess(error, last_error)
    def test_calculation(self):
        normalizer = Normalizer(in_min=1, in_max=15, out_min=-1, out_max=1, norm_min=-3, norm_max=3)

        network = FeedForwardNN(normalizer, [1, 2, 2, 1])
        neurons = network.neurons

        outputs = network.receive_inputs([1])

        self.assertEqual(len(outputs), 1)
        # Nodes 1, 4, and 7 are bias nodes
        self.assertAlmostEqual(neurons[0].output, 0.04742587318)
        self.assertAlmostEqual(neurons[1].output, 1)
        self.assertAlmostEqual(neurons[2].output, 0.7402802899)
        self.assertAlmostEqual(neurons[3].output, 0.7402802899)
        self.assertAlmostEqual(neurons[4].output, 1)
        self.assertAlmostEqual(neurons[5].output, 0.9227677584)
        self.assertAlmostEqual(neurons[6].output, 0.9227677584)
        self.assertAlmostEqual(neurons[7].output, 1)
        self.assertAlmostEqual(neurons[8].output, 0.9450874486)
        self.assertAlmostEqual(outputs[0], 0.8901748973)