示例#1
0
    def test_optimizer_succeeds_with_optimizing_sum_of_squares_function(
            self, optimizer):
        cost_function = FunctionWithGradient(
            sum_x_squared, finite_differences_gradient(sum_x_squared))

        results = optimizer.minimize(cost_function,
                                     initial_params=np.array([1, -1]))

        assert results.opt_value == pytest.approx(0, abs=1e-5)
        assert results.opt_params == pytest.approx(np.zeros(2), abs=1e-4)

        assert "nfev" in results
        assert "nit" in results
        assert "opt_value" in results
        assert "opt_params" in results
        assert "history" in results
示例#2
0
    def test_optimizer_succeeds_with_optimizing_sum_of_squares_function(self):
        for optimizer in self.optimizers:
            cost_function = FunctionWithGradient(
                sum_x_squared, finite_differences_gradient(sum_x_squared))

            results = optimizer.minimize(cost_function,
                                         initial_params=np.array([1, -1]))
            self.assertAlmostEqual(results.opt_value, 0, places=5)
            self.assertAlmostEqual(results.opt_params[0], 0, places=4)
            self.assertAlmostEqual(results.opt_params[1], 0, places=4)

            self.assertIn("nfev", results.keys())
            self.assertIn("nit", results.keys())
            self.assertIn("opt_value", results.keys())
            self.assertIn("opt_params", results.keys())
            self.assertIn("history", results.keys())
示例#3
0
    def test_optimizer_succeeds_with_optimizing_rosenbrock_function(
            self, optimizer):
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))

        results = optimizer.minimize(cost_function,
                                     initial_params=np.array([0, 0]))
        assert results.opt_value == pytest.approx(0, abs=1e-4)
        assert results.opt_params == pytest.approx(np.ones(2), abs=1e-3)

        assert "nfev" in results
        assert "nit" in results
        assert "opt_value" in results
        assert "opt_params" in results
        assert "history" in results
    def test_optimizer_succeeds_with_optimizing_rosenbrock_function(
            self, optimizer, rosenbrock_function, keep_history):
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))

        results = optimizer.minimize(cost_function,
                                     initial_params=np.array([0, 0]),
                                     keep_history=keep_history)
        assert results.opt_value == pytest.approx(0, abs=1e-4)
        assert results.opt_params == pytest.approx(np.ones(2), abs=1e-3)

        assert all(field in results
                   for field in MANDATORY_OPTIMIZATION_RESULT_FIELDS)

        assert "history" in results or not keep_history
        assert "gradient_history" in results or not keep_history
示例#5
0
    def test_optimizer_succeeds_with_optimizing_rosenbrock_function(self):
        for optimizer in self.optimizers:
            cost_function = FunctionWithGradient(
                rosenbrock_function,
                finite_differences_gradient(rosenbrock_function))

            results = optimizer.minimize(cost_function,
                                         initial_params=np.array([0, 0]))
            self.assertAlmostEqual(results.opt_value, 0, places=4)
            self.assertAlmostEqual(results.opt_params[0], 1, places=3)
            self.assertAlmostEqual(results.opt_params[1], 1, places=3)

            self.assertIn("nfev", results.keys())
            self.assertIn("nit", results.keys())
            self.assertIn("opt_value", results.keys())
            self.assertIn("opt_params", results.keys())
            self.assertIn("history", results.keys())
    def test_optimizer_succeeds_with_optimizing_sum_of_squares_function(
            self, optimizer, sum_x_squared, keep_history):

        cost_function = FunctionWithGradient(
            sum_x_squared, finite_differences_gradient(sum_x_squared))

        results = optimizer.minimize(cost_function,
                                     initial_params=np.array([1, -1]),
                                     keep_history=keep_history)

        assert results.opt_value == pytest.approx(0, abs=1e-5)
        assert results.opt_params == pytest.approx(np.zeros(2), abs=1e-4)

        assert all(field in results
                   for field in MANDATORY_OPTIMIZATION_RESULT_FIELDS)

        assert "history" in results or not keep_history
        assert "gradient_history" in results or not keep_history
    def test_SLSQP_with_inequality_constraints(self):
        # Given
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))
        constraints = {"type": "ineq", "fun": lambda x: x[0] + x[1] - 3}
        optimizer = ScipyOptimizer(method="SLSQP")
        initial_params = np.array([0, 0])

        # When
        results_without_constraints = optimizer.minimize(
            cost_function, initial_params=initial_params)
        optimizer.constraints = constraints
        results_with_constraints = optimizer.minimize(
            cost_function, initial_params=initial_params)

        # Then
        assert results_without_constraints.opt_value == pytest.approx(
            results_with_constraints.opt_value, abs=1e-1)
        assert results_with_constraints.opt_params.sum() >= 3
    def test_SLSQP_with_equality_constraints(self):
        # Given
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))
        constraint_cost_function = sum_x_squared

        constraints = ({"type": "eq", "fun": constraint_cost_function}, )
        optimizer = ScipyOptimizer(method="SLSQP", constraints=constraints)
        initial_params = np.array([1, 1])
        target_params = np.array([0, 0])
        target_value = 1

        # When
        results = optimizer.minimize(cost_function,
                                     initial_params=initial_params)

        # Then
        assert results.opt_value == pytest.approx(target_value, abs=1e-3)
        assert results.opt_params == pytest.approx(target_params, abs=1e-3)
    def test_gradients_history_is_recorded_if_keep_history_is_true(
            self, optimizer, sum_x_squared):
        # To check that history is recorded correctly, we wrap cost_function
        # with a recorder. Optimizer should wrap it a second time and
        # therefore we can compare two histories to see if they agree.
        cost_function = recorder(
            FunctionWithGradient(sum_x_squared,
                                 finite_differences_gradient(sum_x_squared)))

        result = optimizer.minimize(cost_function,
                                    np.array([-1, 1]),
                                    keep_history=True)
        assert len(result.gradient_history) == len(
            cost_function.gradient.history)

        for result_history_entry, cost_function_history_entry in zip(
                result.gradient_history, cost_function.gradient.history):
            assert (result_history_entry.call_number ==
                    cost_function_history_entry.call_number)
            assert np.allclose(result_history_entry.params,
                               cost_function_history_entry.params)
            assert np.allclose(result_history_entry.value,
                               cost_function_history_entry.value)
示例#10
0
    def test_history_info_contains_gradient_history_for_function_with_gradient(
            self):
        cost_function = recorder(
            FunctionWithGradient(sum_x_squared,
                                 finite_differences_gradient(sum_x_squared)))

        cost_function(np.array([1, 2, 3]))
        cost_function.gradient(np.array([0, -1, 1]))

        history_info = construct_history_info(cost_function, True)

        assert len(history_info["history"]) == 1
        assert len(history_info["gradient_history"]) == 1

        history_entry = history_info["history"][0]
        assert history_entry.call_number == 0
        np.testing.assert_array_equal(history_entry.params, [1, 2, 3])
        assert history_entry.value == cost_function(np.array([1, 2, 3]))

        history_entry = history_info["gradient_history"][0]
        assert history_entry.call_number == 0
        np.testing.assert_array_equal(history_entry.params, [0, -1, 1])
        np.testing.assert_array_equal(
            history_entry.value, cost_function.gradient(np.array([0, -1, 1])))
 def cost_function_with_gradients_factory(*args, **kwargs):
     cost_function = cost_function_factory(*args, **kwargs)
     return FunctionWithGradient(cost_function,
                                 finite_differences_gradient(cost_function))