def test_for_non_positive_values(self):
        a = Rectifier.gradient(np.array([-0.5]))
        self.assertEqual(a.tolist(), [0])

        a = Rectifier.gradient(np.array([0]))
        self.assertEqual(a.tolist(), [0])

        a = Rectifier.gradient(np.array([-10.5]))
        self.assertEqual(a.tolist(), [0])
    def test_for_positive_input_values(self):
        a = Rectifier.gradient(np.array([0.001]))
        self.assertEqual(a.tolist(), [1])

        a = Rectifier.gradient(np.array([30]))
        self.assertEqual(a.tolist(), [1])

        a = Rectifier.gradient(np.array([10 * 3]))
        self.assertEqual(a.tolist(), [1])
    def test_for_positive_input_values(self):
        a = Rectifier.activation(np.array([0.001]))
        self.assertEqual(a.tolist(), [0.001])

        a = Rectifier.activation(np.array([2]))
        self.assertEqual(a.tolist(), [2])

        a = Rectifier.activation(np.array([10 * 3]))
        self.assertEqual(a.tolist(), [10 * 3])
    def test_get_final_layer_error_for_1_element_vectors(self):
        quadratic = cost_functions.QuadraticCost(neural_net=self.net)

        z_last = np.array([-1], float)
        z_last_prime = Rectifier.gradient(z_last)

        y = np.array([0.5], float)
        a_last = Rectifier.activation(z_last)
        nabla = quadratic.get_final_layer_error(a_last, y, z_last_prime)
        self.assertAlmostEqual(nabla[0], (a_last - y) * z_last_prime, places=2)
Ejemplo n.º 5
0
    def test_get_final_layer_error_for_1_element_vectors(self):
        cross_entropy = cost_functions.CrossEntropyCost(self.net)
        z_last = np.array([3], float)
        z_last_prime = Sigmoid.gradient(z_last)

        y = np.array([0], float)
        a_last = Sigmoid.activation(z_last)
        nabla = cross_entropy.get_final_layer_error(a_last, y, z_last_prime)
        self.assertAlmostEqual(nabla[0], (a_last - y), places=2)

        z_last = np.array([-1], float)
        z_last_prime = Rectifier.gradient(z_last)
        y = np.array([0.5], float)
        a_last = Sigmoid.activation(z_last)
        nabla = cross_entropy.get_final_layer_error(a_last, y, z_last_prime)
        self.assertAlmostEqual(nabla[0], (a_last - y), places=2)
 def test_for_vectors(self):
     mylist = [-10, 0, 2, 10**20]
     a = Rectifier.gradient(np.array(mylist, float))
     self.assertEqual(a.tolist(), [0, 0, 1, 1])
 def test_for_vectors(self):
     mylist = [-10, 0, 2, 10**20]
     a = Rectifier.activation(np.array(mylist, float))
     self.assertEqual(a.tolist(), [0, 0, 2, 10**20])
 def test_for_zero_input_values(self):
     a = Rectifier.activation(np.array([0]))
     self.assertEqual(a.tolist(), [0])
    def test_for_negative_input_values(self):
        a = Rectifier.activation(np.array([-0.5]))
        self.assertEqual(a.tolist(), [0])

        a = Rectifier.activation(np.array([-10.5]))
        self.assertEqual(a.tolist(), [0])