Example #1
0
def test_bug_11886():
    def opt(x):
        return x[0]**2 + x[1]**2

    with np.testing.suppress_warnings() as sup:
        sup.filter(PendingDeprecationWarning)
        A = np.matrix(np.diag([1, 1]))
    lin_cons = LinearConstraint(A, -1, np.inf)
    minimize(opt, 2 * [1],
             constraints=lin_cons)  # just checking that there are no errors
Example #2
0
 def check_limits(self, method, default_iters):
     for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
         for mfev in [50, 500, 5000]:
             self.funcalls = 0
             res = optimize.minimize(self.slow_func,
                                     start_v,
                                     method=method,
                                     options={"maxfev": mfev})
             assert_(self.funcalls == res["nfev"])
             if res["success"]:
                 assert_(res["nfev"] < mfev)
             else:
                 assert_(res["nfev"] >= mfev)
         for mit in [50, 500, 5000]:
             res = optimize.minimize(self.slow_func,
                                     start_v,
                                     method=method,
                                     options={"maxiter": mit})
             if res["success"]:
                 assert_(res["nit"] <= mit)
             else:
                 assert_(res["nit"] >= mit)
         for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
             self.funcalls = 0
             res = optimize.minimize(self.slow_func,
                                     start_v,
                                     method=method,
                                     options={
                                         "maxiter": mit,
                                         "maxfev": mfev
                                     })
             assert_(self.funcalls == res["nfev"])
             if res["success"]:
                 assert_(res["nfev"] < mfev and res["nit"] <= mit)
             else:
                 assert_(res["nfev"] >= mfev or res["nit"] >= mit)
         for mfev, mit in [[np.inf, None], [None, np.inf]]:
             self.funcalls = 0
             res = optimize.minimize(self.slow_func,
                                     start_v,
                                     method=method,
                                     options={
                                         "maxiter": mit,
                                         "maxfev": mfev
                                     })
             assert_(self.funcalls == res["nfev"])
             if res["success"]:
                 if mfev is None:
                     assert_(res["nfev"] < default_iters * 2)
                 else:
                     assert_(res["nit"] <= default_iters * 2)
             else:
                 assert_(res["nfev"] >= default_iters * 2
                         or res["nit"] >= default_iters * 2)
Example #3
0
    def test_attributes_present(self):
        attributes = [
            'nit', 'nfev', 'x', 'success', 'status', 'fun', 'message'
        ]
        skip = {'cobyla': ['nit']}
        for method in MINIMIZE_METHODS:
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning,
                           ("Method .+ does not use (gradient|Hessian.*)"
                            " information"))
                res = optimize.minimize(self.func,
                                        self.x0,
                                        method=method,
                                        jac=self.jac,
                                        hess=self.hess,
                                        hessp=self.hessp)
            for attribute in attributes:
                if method in skip and attribute in skip[method]:
                    continue

                assert hasattr(res, attribute)
                assert_(attribute in dir(res))

            # gh13001, OptimizeResult.message should be a str
            assert isinstance(res.message, str)
Example #4
0
def test_gh10880():
    # checks that verbose reporting works with trust-constr
    bnds = Bounds(1, 2)
    opts = {'maxiter': 1000, 'verbose': 2}
    minimize(lambda x: x**2,
             x0=2.,
             method='trust-constr',
             bounds=bnds,
             options=opts)

    opts = {'maxiter': 1000, 'verbose': 3}
    minimize(lambda x: x**2,
             x0=2.,
             method='trust-constr',
             bounds=bnds,
             options=opts)
Example #5
0
    def test_default_jac_and_hess(self):
        def fun(x):
            return (x - 1)**2

        bounds = [(-2, 2)]
        res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr')
        assert_array_almost_equal(res.x, 1, decimal=5)
    def test_multiple_constraint_objects(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
        x0 = [2, 0, 1]
        coni = []  # only inequality constraints (can use cobyla)
        methods = [
            "trust-constr",
        ]

        # mixed old and new
        coni.append([{
            'type': 'ineq',
            'fun': lambda x: x[0] - 2 * x[1] + 2
        },
                     NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])

        coni.append([
            LinearConstraint([1, -2, 0], -2, np.inf),
            NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)
        ])

        coni.append([
            NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
            NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)
        ])

        for con in coni:
            funs = {}
            for method in methods:
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['trust-constr'], .8, rtol=1e-4)
Example #7
0
    def test_args(self):
        prob = MaratosTestArgs("a", 234)

        result = minimize(prob.fun,
                          prob.x0, ("a", 234),
                          method='trust-constr',
                          jac=prob.grad,
                          hess=prob.hess,
                          bounds=prob.bounds,
                          constraints=prob.constr)

        if prob.x_opt is not None:
            assert_array_almost_equal(result.x, prob.x_opt, decimal=2)

        # gtol
        if result.status == 1:
            assert_array_less(result.optimality, 1e-8)
        # xtol
        if result.status == 2:
            assert_array_less(result.tr_radius, 1e-8)
            if result.method == "tr_interior_point":
                assert_array_less(result.barrier_parameter, 1e-8)
        # max iter
        if result.status in (0, 3):
            raise RuntimeError("Invalid termination condition.")
def test_no_constraints():
    res = minimize(rosenbrock_function,
                   np.array([0.1, -0.5, -5.0]),
                   options={"disp": False})

    assert res.niter < 61  # SciPy version 1.5.0 needs 56 iterations
    assert_allclose(res.x, [1, 1, 1], rtol=1e-4)
Example #9
0
    def test_respect_maxiter(self, method):
        # Check that the number of iterations equals max_iter, assuming
        # convergence doesn't establish before
        MAXITER = 4

        x0 = np.zeros(10)

        sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
                            optimize.rosen_hess, None, None)

        # Set options
        kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}

        if method in ('Newton-CG', ):
            kwargs['jac'] = sf.grad
        elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
                        'trust-constr'):
            kwargs['jac'] = sf.grad
            kwargs['hess'] = sf.hess

        sol = optimize.minimize(sf.fun, x0, **kwargs)
        assert sol.nit == MAXITER
        assert sol.nfev >= sf.nfev
        if hasattr(sol, 'njev'):
            assert sol.njev >= sf.ngev

        # method specific tests
        if method == 'SLSQP':
            assert sol.status == 9  # Iteration limit reached
Example #10
0
    def test_list_of_problems(self):
        list_of_problems = [
            Maratos(),
            Maratos(constr_hess='2-point'),
            Maratos(constr_hess=SR1()),
            Maratos(constr_jac='2-point', constr_hess=SR1()),
            MaratosGradInFunc(),
            HyperbolicIneq(),
            HyperbolicIneq(constr_hess='3-point'),
            HyperbolicIneq(constr_hess=BFGS()),
            HyperbolicIneq(constr_jac='3-point', constr_hess=BFGS()),
            Rosenbrock(),
            IneqRosenbrock(),
            EqIneqRosenbrock(),
            BoundedRosenbrock(),
            Elec(n_electrons=2),
            Elec(n_electrons=2, constr_hess='2-point'),
            Elec(n_electrons=2, constr_hess=SR1()),
            Elec(n_electrons=2, constr_jac='3-point', constr_hess=SR1())
        ]

        for prob in list_of_problems:
            for grad in (prob.grad, '3-point', False):
                for hess in (prob.hess, '3-point', SR1(),
                             BFGS(exception_strategy='damp_update'),
                             BFGS(exception_strategy='skip_update')):

                    # Remove exceptions
                    if grad in ('2-point', '3-point', 'cs', False) and \
                       hess in ('2-point', '3-point', 'cs'):
                        continue
                    if prob.grad is True and grad in ('3-point', False):
                        continue
                    with suppress_warnings() as sup:
                        sup.filter(UserWarning, "delta_grad == 0.0")
                        result = minimize(prob.fun,
                                          prob.x0,
                                          method='trust-constr',
                                          jac=grad,
                                          hess=hess,
                                          bounds=prob.bounds,
                                          constraints=prob.constr)

                    if prob.x_opt is not None:
                        assert_array_almost_equal(result.x,
                                                  prob.x_opt,
                                                  decimal=5)
                        # gtol
                        if result.status == 1:
                            assert_array_less(result.optimality, 1e-8)
                    # xtol
                    if result.status == 2:
                        assert_array_less(result.tr_radius, 1e-8)

                        if result.method == "tr_interior_point":
                            assert_array_less(result.barrier_parameter, 1e-8)
                    # max iter
                    if result.status in (0, 3):
                        raise RuntimeError("Invalid termination condition.")
Example #11
0
    def test_minimize_automethod(self):
        def f(x):
            return x**2

        def cons(x):
            return x - 2

        x0 = np.array([10.])
        with suppress_warnings() as sup:
            sup.filter(UserWarning, "delta_grad == 0.*")

            sol_0 = optimize.minimize(f, x0)
            sol_1 = optimize.minimize(f,
                                      x0,
                                      constraints=[{
                                          'type': 'ineq',
                                          'fun': cons
                                      }])
            sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
            sol_3 = optimize.minimize(f,
                                      x0,
                                      constraints=[{
                                          'type': 'ineq',
                                          'fun': cons
                                      }],
                                      bounds=[(5, 10)])
            sol_4 = optimize.minimize(f,
                                      x0,
                                      constraints=[{
                                          'type': 'ineq',
                                          'fun': cons
                                      }],
                                      bounds=[(1, 10)])

        for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
            assert_(sol.success)
        assert_allclose(sol_0.x, 0, atol=1e-7)
        # These last four were opened up from atol=1e-7 for scipy 1.6.0
        # likely do to a different algorithm being used instead of trust-constr
        # only methods SLSQP, L-BFGS-B, and BFGS are used by minimize
        # automatically. Here, we're forced to used trust-constr for
        # everything since that's all that is available.
        assert_allclose(sol_1.x, 2, atol=1e-4, rtol=1e-4)
        assert_allclose(sol_2.x, 5, atol=1e-4, rtol=1e-4)
        assert_allclose(sol_3.x, 5, atol=1e-4, rtol=1e-4)
        assert_allclose(sol_4.x, 2, atol=1e-4, rtol=1e-4)
Example #12
0
    def test_gh10771(self):
        # check that minimize passes bounds and constraints to a custom
        # minimizer without altering them.
        bounds = [(-2, 2), (0, 3)]
        constraints = 'constraints'

        def custmin(fun, x0, **options):
            assert options['bounds'] is bounds
            assert options['constraints'] is constraints
            return optimize.OptimizeResult()

        x0 = [1, 1]
        optimize.minimize(optimize.rosen,
                          x0,
                          method=custmin,
                          bounds=bounds,
                          constraints=constraints)
Example #13
0
 def test_no_constraints(self):
     prob = Rosenbrock()
     result = minimize(prob.fun,
                       prob.x0,
                       method='trust-constr',
                       jac=prob.grad,
                       hess=prob.hess)
     assert_array_almost_equal(result.x, prob.x_opt, decimal=5)
def test_bounds_class():
    res = minimize(
        rosenbrock_function,
        np.array([0.1, -0.5, -5.0]),
        bounds=Bounds([-10, -10, -np.inf], [10, np.inf, np.inf]),
        method="trust-constr",
        options={"disp": False},
    )

    assert res.niter < 110  # SciPy version 1.5.0 needs 105 iterations
    assert_allclose(res.x, [1, 1, 1], rtol=1e-4)
Example #15
0
    def test_duplicate_evaluations(self, method):
        # check that there are no duplicate evaluations for any methods
        jac = hess = None
        if method in ('newton-cg', 'trust-krylov', 'trust-exact', 'trust-ncg',
                      'dogleg'):
            jac = self.grad
        if method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg'):
            hess = self.hess

        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
            # for trust-constr
            sup.filter(UserWarning, "delta_grad == 0.*")
            optimize.minimize(self.func,
                              self.startparams,
                              method=method,
                              jac=jac,
                              hess=hess)

        for i in range(1, len(self.trace)):
            if np.array_equal(self.trace[i - 1], self.trace[i]):
                raise RuntimeError(
                    "Duplicate evaluations made by {}".format(method))
Example #16
0
    def test_custom(self):
        # This function comes from the documentation example.
        def custmin(fun,
                    x0,
                    args=(),
                    maxfev=None,
                    stepsize=0.1,
                    maxiter=100,
                    callback=None,
                    **options):
            bestx = x0
            besty = fun(x0)
            funcalls = 1
            niter = 0
            improved = True
            stop = False

            while improved and not stop and niter < maxiter:
                improved = False
                niter += 1
                for dim in range(np.size(x0)):
                    for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
                        testx = np.copy(bestx)
                        testx[dim] = s
                        testy = fun(testx, *args)
                        funcalls += 1
                        if testy < besty:
                            besty = testy
                            bestx = testx
                            improved = True
                    if callback is not None:
                        callback(bestx)
                    if maxfev is not None and funcalls >= maxfev:
                        stop = True
                        break

            return optimize.OptimizeResult(fun=besty,
                                           x=bestx,
                                           nit=niter,
                                           nfev=funcalls,
                                           success=(niter > 1))

        x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
        res = optimize.minimize(optimize.rosen,
                                x0,
                                method=custmin,
                                options=dict(stepsize=0.05))
        assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
Example #17
0
    def test_nan_values(self, method):
        # Check nan values result to failed exit status
        np.random.seed(1234)

        count = [0]

        def func(x):
            return np.nan

        def func2(x):
            count[0] += 1
            if count[0] > 2:
                return np.nan
            else:
                return np.random.rand()

        def grad(x):
            return np.array([1.0])

        def hess(x):
            return np.array([[1.0]])

        x0 = np.array([1.0])

        needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
                                'trust-ncg', 'dogleg')
        needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
                                'dogleg')

        funcs = [func, func2]
        grads = [grad] if needs_grad else [grad, None]
        hesss = [hess] if needs_hess else [hess, None]

        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
            sup.filter(UserWarning, "delta_grad == 0.*")
            sup.filter(RuntimeWarning, ".*does not use Hessian.*")
            sup.filter(RuntimeWarning, ".*does not use gradient.*")

            for f, g, h in itertools.product(funcs, grads, hesss):
                count = [0]
                sol = optimize.minimize(f,
                                        x0,
                                        jac=g,
                                        hess=h,
                                        method=method,
                                        options=dict(maxiter=20))
                assert_equal(sol.success, False)
 def test_constraint_dictionary_2(self):
     fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
     cons = {
         'type': 'eq',
         'fun': lambda x, p1, p2: p1 * x[0] - p2 * x[1],
         'args': (1, 1.1),
         'jac': lambda x, p1, p2: np.array([[p1, -p2]])
     }
     with suppress_warnings() as sup:
         sup.filter(UserWarning, "delta_grad == 0.0")
         res = minimize(fun,
                        self.x0,
                        method=self.method,
                        bounds=self.bnds,
                        constraints=cons)
     assert_allclose(res.x, [1.7918552, 1.62895927])
     assert_allclose(res.fun, 1.3857466063348418)
    def test_constraint_dictionary_3(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
        cons = [{
            'type': 'ineq',
            'fun': lambda x: x[0] - 2 * x[1] + 2
        },
                NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]

        with suppress_warnings() as sup:
            sup.filter(UserWarning, "delta_grad == 0.0")
            res = minimize(fun,
                           self.x0,
                           method=self.method,
                           bounds=self.bnds,
                           constraints=cons)
        assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
        assert_allclose(res.fun, 1.125, rtol=1e-4)
Example #20
0
    def test_issue_9044(self):
        # https://github.com/scipy/scipy/issues/9044
        # Test the returned `OptimizeResult` contains keys consistent with
        # other solvers.

        def callback(x, info):
            assert_('nit' in info)
            assert_('niter' in info)

        result = minimize(lambda x: x**2, [0],
                          jac=lambda x: 2 * x,
                          hess=lambda x: 2,
                          callback=callback,
                          method='trust-constr')
        assert_(result.get('success'))
        assert_(result.get('nit', -1) == 1)

        # Also check existence of the 'niter' attribute, for backward
        # compatibility
        assert_(result.get('niter', -1) == 1)
Example #21
0
    def test_respect_maxiter_trust_constr_ineq_constraints(self):
        # special case of minimization with trust-constr and inequality
        # constraints to check maxiter limit is obeyed when using internal
        # method 'tr_interior_point'
        MAXITER = 4
        f = optimize.rosen
        jac = optimize.rosen_der
        hess = optimize.rosen_hess

        fun = lambda x: np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
        cons = ({'type': 'ineq', 'fun': fun}, )

        x0 = np.zeros(10)
        sol = optimize.minimize(f,
                                x0,
                                constraints=cons,
                                jac=jac,
                                hess=hess,
                                method='trust-constr',
                                options=dict(maxiter=MAXITER))
        assert sol.nit == MAXITER
    def test_constraint_dictionary_1(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
        cons = ({
            'type': 'ineq',
            'fun': lambda x: x[0] - 2 * x[1] + 2
        }, {
            'type': 'ineq',
            'fun': lambda x: -x[0] - 2 * x[1] + 6
        }, {
            'type': 'ineq',
            'fun': lambda x: -x[0] + 2 * x[1] + 2
        })

        with suppress_warnings() as sup:
            sup.filter(UserWarning, "delta_grad == 0.0")
            res = minimize(fun,
                           self.x0,
                           method=self.method,
                           bounds=self.bnds,
                           constraints=cons)
        assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
        assert_allclose(res.fun, 0.8, rtol=1e-4)
Example #23
0
    def test_empty_constraint(self):
        def function(x):
            return x[0]**2 + x[1]**2

        def functionjacobian(x):
            return np.array([2. * x[0], 2. * x[1]])

        def functionhvp(x, v):
            return 2. * v

        def constraint(x):
            return np.array([x[0]**2 - x[1]**2])

        def constraintjacobian(x):
            return np.array([[2 * x[0], -2 * x[1]]])

        def constraintlcoh(x, v):
            return np.array([[2., 0.], [0., -2.]]) * v[0]

        constraint = NonlinearConstraint(constraint, 1., np.inf,
                                         constraintjacobian, constraintlcoh)

        startpoint = [1., 2.]

        bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])

        result = minimize(function,
                          startpoint,
                          method='trust-constr',
                          jac=functionjacobian,
                          hessp=functionhvp,
                          constraints=[constraint],
                          bounds=bounds)

        # Note that the SciPy 1.6.0 version of trust-constr can achieve decimal=4
        # Likely due to differences in factorization method
        # This version does not support the NormalEquation or QRFactorization methods
        assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=2)
def test_generate_pareto_data(data, factorization_method, rtol, iter_buffer):
    inputs = np.array(data["inputs"])
    outputs = np.array(data["outputs"])
    y_axis_index = data["y_axis_index"]
    y_axis_goal = data["y_axis_goal"]
    x_axis_index = data["x_axis_index"]
    x_axis_goal = data["x_axis_goal"]
    bounds = data["bounds"]
    output_targets = data["output_targets"]
    baseline_pareto_points = np.array(data["pareto_points"])
    baseline_total_iterations = data["total_iterations"]

    terms, response_surfaces = get_response_surface(inputs, outputs)

    # check analytical gradients against numerical gradients
    for i in range(len(response_surfaces)):
        test_points = uniform(
            low=[pair[0] for pair in bounds],
            high=[pair[1] for pair in bounds],
            size=(10, len(bounds)),
        )
        for test_point in test_points:
            difference = check_grad(
                partial(evaluate_response_surface, terms,
                        response_surfaces[i]),
                partial(evaluate_response_surface_grad, terms,
                        response_surfaces[i]),
                test_point,
            )
            assert (difference / evaluate_response_surface(
                terms, response_surfaces[i], test_point) < 1e-4)

    # add equality constraints for any outputs with targets
    constraints = []
    for index, target in enumerate(output_targets):
        if target:
            constraints.append(
                NonlinearConstraint(
                    partial(evaluate_response_surface, terms,
                            response_surfaces[index]),
                    target,
                    target,
                    jac=partial(evaluate_response_surface_grad, terms,
                                response_surfaces[index]),
                ))

    def objective_func(x, index, sign=1.0):
        return evaluate_response_surface(terms,
                                         response_surfaces[index],
                                         x,
                                         factor=sign)

    def objective_func_grad(x, index, sign=1.0):
        return evaluate_response_surface_grad(terms,
                                              response_surfaces[index],
                                              x,
                                              factor=sign)

    x0 = np.array([(pair[0] + pair[1]) / 2 for pair in bounds])

    # get the x-axis range for the pareto optimization
    with suppress_warnings() as sup:
        sup.filter(UserWarning, "delta_grad == 0.*")
        # purposely leaving out jac here, will test below
        res = minimize(
            objective_func,
            x0,
            args=(x_axis_index, -1.0),
            bounds=bounds,
            constraints=constraints,
            options={
                "disp": False,
                "factorization_method": factorization_method
            },
        )
        x_max = -res.fun

        # x_min_starting_point = X[y[:,x_axis_index].argmin(),:]
        res = minimize(
            objective_func,
            x0,
            args=(x_axis_index, 1.0),
            bounds=bounds,
            constraints=constraints,
            method="trust-constr",
            options={
                "disp": False,
                "factorization_method": factorization_method
            },
        )
    x_min = res.fun

    pareto_points = np.linspace(x_min,
                                x_max,
                                num=baseline_pareto_points.shape[0])

    # find the actual pareto points
    pareto_input_values = []

    y_axis_sign = 1 if y_axis_goal == "min" else -1

    total_iterations = 0
    for x_value in pareto_points:
        if x_axis_goal == "min":
            limits = (-np.inf, x_value)
        else:
            limits = (x_value, np.inf)

        current_constraint = NonlinearConstraint(
            partial(evaluate_response_surface, terms,
                    response_surfaces[x_axis_index]),
            *limits,
            jac=partial(evaluate_response_surface_grad, terms,
                        response_surfaces[x_axis_index]))

        res = minimize(
            objective_func,
            x0,
            args=(y_axis_index, y_axis_sign),
            bounds=bounds,
            constraints=constraints + [
                current_constraint,
            ],
            jac=objective_func_grad,
            method="trust-constr",
            options={
                "disp": False,
                "factorization_method": factorization_method
            },
        )

        total_iterations += res.nit

        pareto_input_values.append(res.x)

    pareto_output_values = []
    for current_input in pareto_input_values:
        current_output = []
        for i in range(outputs.shape[1]):
            current_output.append(
                evaluate_response_surface(terms, response_surfaces[i],
                                          current_input))

        pareto_output_values.append(current_output)

    pareto_output_values = np.array(pareto_output_values)

    assert total_iterations < baseline_total_iterations + iter_buffer
    assert_allclose(pareto_output_values, baseline_pareto_points, rtol=rtol)
Example #25
0
 def routine(*a, **kw):
     kw['method'] = method
     return optimize.minimize(*a, **kw)
    def test_individual_constraint_objects(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
        x0 = [2, 0, 1]

        cone = []  # with equality constraints (can't use cobyla)
        coni = []  # only inequality constraints (can use cobyla)
        methods = [
            "trust-constr",
        ]

        # nonstandard data types for constraint equality bounds
        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
        cone.append(
            NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([1.21])))

        # multiple equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21,
                                1.21))  # two same equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, 1.4],
                                [1.21, 1.4]))  # two different equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, 1.21],
                                1.21))  # equality specified two ways
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, -np.inf],
                                [1.21, np.inf]))  # equality + unbounded

        # nonstandard data types for constraint inequality bounds
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
        coni.append(
            NonlinearConstraint(lambda x: x[0] - x[1], 1.21,
                                np.array([np.inf])))
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
        coni.append(
            NonlinearConstraint(lambda x: x[0] - x[1], np.array(-np.inf), -3))

        # multiple inequalities/equalities
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21,
                                np.inf))  # two same inequalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, -np.inf],
                                [1.21, 1.4]))  # mixed equality/inequality
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.1, .8],
                                [1.2, 1.4]))  # bounded above and below
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [-1.2, -1.4],
                                [-1.1, -.8]))  # - bounded above and below

        # quick check of LinearConstraint class (very little new code to test)
        cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
        cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
        cone.append(
            LinearConstraint([[1, -1, 0], [0, 1, -1]], [1.21, -np.inf],
                             [1.21, 1.4]))

        # Solutions for SciPy 1.6.0 trust-constr algorithm
        solutions = [
            3.672050010240001, 3.672050010240001, 3.672050010240001,
            1.1250000102400013, 1.1250000102400013, 4.114869685317061,
            3.4616666889075245, 3.7616666954827016
        ]
        for i, con in enumerate(coni):
            funs = {}
            for method in methods:
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['trust-constr'], solutions[i], rtol=1e-3)

        # Solutions from SciPy 1.6.0 trust-constr algorithm
        solutions = [
            3.125000000000001, 3.67205, 3.67205, 4.114866666666668,
            4.345399999999999, 4.114866666666668, 3.67205, 3.67205,
            3.672050000000001, 4.114866666666666, 3.6720500000000005
        ]
        for i, con in enumerate(cone):
            funs = {}
            for method in methods[::2]:  # skip cobyla
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['trust-constr'], solutions[i], rtol=1e-5)