def test_derivatives_with_different_indices_in_jacobian_matrix(self):
        z = np.array([1, -1.5], float)
        j = Softmax.gradient(z)

        s = Softmax.activation(z)
        self.assertEqual(j[0, 1], s[0] * s[1])

        s = Softmax.activation(z)
        self.assertEqual(j[1, 0], s[1] * s[0])
    def test_for_2_element_vectors(self):
        z = np.array([1, 2], float)
        a = Softmax.activation(z)
        self.assertTrue(np.allclose(
            a,
            np.array([0.268941, 0.731058], float),
        ))

        z = np.array([0, 2], float)
        a = Softmax.activation(z)
        self.assertTrue(
            np.allclose(
                a,
                np.array([0.1192029, 0.880797], float),
            ))
Ejemplo n.º 3
0
    def _forward_pass(self, x, third_layer_activation):
        '''
        Performs forward pass of the network.

        a_i - results of applying weights for the data from precious layear
        z_i - result of activation

        We save the results for further backward pass.
        :param x: input data
        :param third_layer_activation: which activation to apply to third layer
        :return: multi-class prediction (n_class, 1)
        '''
        self.x = x
        self.a_1 = self.w1.dot(x) + self.b1
        self.z_1 = Tanh.activation(self.a_1)

        self.a_2 = self.w2.dot(self.z_1) + self.b2
        self.z_2 = Relu.activation(self.a_2)
        self.z_2_with_skip_connection = self.z_2 + self.w_s.dot(x)

        self.a_3 = self.w_out.dot(self.z_2_with_skip_connection) + self.b_out
        if third_layer_activation == 'Softmax':
            self.y_pred = Softmax.activation(self.a_3)
        elif third_layer_activation == 'Tanh':
            self.y_pred = Tanh.activation(self.a_3)
        else:
            raise ValueError("Unknown activation type for 3rd layer")
Ejemplo n.º 4
0
    def test_get_final_layer_error_for_arrays(self):
        cross_entropy = cost_functions.CrossEntropyCost(self.net)

        z_last = np.array([3, -1], float)
        z_last_prime = Softmax.gradient(z_last)

        y = np.array([0, 0.5], float)
        a_last = Softmax.activation(z_last)
        nabla = cross_entropy.get_final_layer_error(a_last, y, z_last_prime)

        self.assertAlmostEqual(nabla[0], a_last[0] - y[0], places=2)
        self.assertAlmostEqual(nabla[1], a_last[1] - y[1], places=2)
 def test_results_add_to_1(self):
     z = np.array([-3, 0.1, 1, 20], float)
     a = Softmax.activation(z)
     self.assertAlmostEqual(a.sum(), 1)
    def test_activations_in_correct_range(self):
        z = np.array([-1000, 0.1, 2, 200], float)
        a = Softmax.activation(z)

        self.assertTrue(np.all(0 <= a) and np.all(a <= 1))
 def test_returns_array_of_valid_shape(self):
     z = np.array([1, 2], float)
     a = Softmax.activation(z)
     self.assertTupleEqual(a.shape, z.shape)
 def test_on_vectors_with_huge_components(self):
     z = np.array([np.finfo(float).max, 2, np.finfo(float).max / 2], float)
     # won't raise OverflowError
     a = Softmax.activation(z)