def test_quadratic_cost(self):
        inputs = [np.array([0.7, 0.6, 0.1], float), np.array([1, 0, 0], float)]
        outputs = [np.array([0, 0.5], float), np.array([0, 0], float)]
        quadracost = cost_functions.QuadraticCost(neural_net=self.net)

        src = PreloadSource((inputs, outputs))
        c = quadracost.get_cost(data_src=src)

        self.assertAlmostEqual(c, 0.75 / 4.0, places=3)
    def test_get_cost_initial(self):
        nnet = NetFactory.create_neural_net(sizes=[1, 1, 1])

        xes = [np.array([-10], float), np.array([100], float)]
        ys = [np.array([0.5], float), np.array([0.75], float)]

        examples = PreloadSource((xes, ys))
        cost_func = cost_functions.QuadraticCost(nnet)
        cost = cost_func.get_cost(examples)
        self.assertAlmostEqual(cost, 1.0 / 64, places=4)
    def test_get_final_layer_error_for_1_element_vectors(self):
        quadratic = cost_functions.QuadraticCost(neural_net=self.net)

        z_last = np.array([-1], float)
        z_last_prime = Rectifier.gradient(z_last)

        y = np.array([0.5], float)
        a_last = Rectifier.activation(z_last)
        nabla = quadratic.get_final_layer_error(a_last, y, z_last_prime)
        self.assertAlmostEqual(nabla[0], (a_last - y) * z_last_prime, places=2)
    def test_get_final_layer_error_for_arrays(self):
        quadratic = cost_functions.QuadraticCost(neural_net=self.net)

        z_last = np.array([3, -1], float)
        z_last_prime = Sigmoid.gradient(z_last)
        y = np.array([0, 0.5], float)
        a_last = Sigmoid.activation(z_last)
        nabla = quadratic.get_final_layer_error(a_last, y, z_last_prime)

        self.assertAlmostEqual(nabla[0], (a_last[0] - y[0]) * z_last_prime[0], places=2)
        self.assertAlmostEqual(nabla[1], (a_last[1] - y[1]) * Sigmoid.gradient(z_last[1]),
                               places=2)
Beispiel #5
0
    def test_compute_gradients_with_quadratic_cost(self):
        nnet = NetFactory.create_neural_net(sizes=[4, 2, 10])
        nnet.randomize_parameters()
        cost_func = cost_functions.QuadraticCost(neural_net=nnet)
        examples = helpers.generate_random_examples(10, 4, 10)
        calculator = BackPropagationBasedCalculator(
            data_src=PreloadSource(examples),
            neural_net=nnet,
            cost_function=cost_func)
        numerical_calculator = NumericalCalculator(
            data_src=PreloadSource(examples),
            neural_net=nnet,
            cost_function=cost_func)

        w_grad1, b_grad1 = calculator.compute_gradients()
        w_grad2, b_grad2 = numerical_calculator.compute_gradients()

        self.compare_grads(grad1=w_grad1, grad2=w_grad2)
        self.compare_grads(grad1=b_grad1, grad2=b_grad2)
Beispiel #6
0
    def test_returns_correct_gradient_shape(self):
        nnet = NetFactory.create_neural_net(sizes=[3, 2, 2, 5])
        cost_func = cost_functions.QuadraticCost(neural_net=nnet)

        x = np.array([5, 2, -0.5], float)
        y = np.array([0.25, 0, 0, 0.7, 0.2], float)
        examples = ([x], [y])
        numerical_calculator = NumericalCalculator(
            data_src=PreloadSource(examples),
            neural_net=nnet,
            cost_function=cost_func)
        w_grad, b_grad = numerical_calculator.compute_gradients()
        self.assertEqual(len(w_grad), 3)
        self.assertEqual(len(b_grad), 3)
        self.assertTupleEqual(w_grad[0].shape, (2, 3))
        self.assertTupleEqual(w_grad[1].shape, (2, 2))
        self.assertTupleEqual(w_grad[2].shape, (5, 2))

        self.assertTupleEqual(b_grad[0].shape, (2, ))
        self.assertTupleEqual(b_grad[1].shape, (2, ))
        self.assertTupleEqual(b_grad[2].shape, (5, ))
Beispiel #7
0
    def test_that_returned_type_is_array(self):
        nnet = NetFactory.create_neural_net(sizes=[2, 1, 2])
        cost_func = cost_functions.QuadraticCost(neural_net=nnet)

        x = np.array([5, 2], float)
        y = np.array([0.25, 0], float)

        examples = ([x], [y])
        calculator = BackPropagationBasedCalculator(
            data_src=PreloadSource(examples),
            neural_net=nnet,
            cost_function=cost_func)

        w_grad, b_grad = calculator.compute_gradients()
        self.assertIsInstance(w_grad, list)
        self.assertIsInstance(w_grad[0], np.ndarray)
        self.assertIsInstance(w_grad[1], np.ndarray)

        self.assertIsInstance(b_grad, list)
        self.assertIsInstance(b_grad[0], np.ndarray)
        self.assertIsInstance(b_grad[1], np.ndarray)
Beispiel #8
0
def step(context):
    context.cost_function = cost_functions.QuadraticCost(
        neural_net=context.nnet)
 def test_get_lambda(self):
     quadracost = cost_functions.QuadraticCost(neural_net=self.net)
     self.assertEqual(quadracost.get_lambda(), 0)