예제 #1
0
    def _optimize(self, XP0):
        if self.optimizer.upper() == 'IPOPT':
            res = ipopt.minimize_ipopt(self._action,
                                       XP0,
                                       jac=self._grad_action,
                                       bounds=self.bounds,
                                       options=self.opt_options)
            xstar = res.get('x')

        elif self.optimizer.upper() == 'SNOPT':
            m = 1
            n = len(self.bounds[:, 0])
            x0 = np.zeros(m + n)
            x0[:n] = XP0
            J = np.zeros(n).reshape(1, -1)
            J[0] = 100
            bl = -np.inf * np.ones(n + m)
            bl[:n] = self.bounds[:, 0]
            bu = np.inf * np.ones(n + m)
            bu[:n] = self.bounds[:, 1]

            res = snoptb(self._snopt_obj,
                         self._snopt_con,
                         nnObj=n,
                         nnCon=0,
                         nnJac=0,
                         iObj=0,
                         x0=x0,
                         bl=bl,
                         bu=bu,
                         J=J,
                         name='action',
                         options=self.opt_options)
            xstar = res.x[:-1]
        return xstar, self._action(xstar)
예제 #2
0
    def minimize(
            self,
            problem: Problem,
            x0: np.ndarray,
            id: str,
            history_options: HistoryOptions = None,
    ) -> OptimizerResult:

        if ipopt is None:
            raise ImportError(
                "This optimizer requires an installation of ipopt."
            )

        objective = problem.objective

        bounds = np.array([problem.lb, problem.ub]).T

        ret = ipopt.minimize_ipopt(
            fun=objective.get_fval,
            x0=x0,
            method=None,  # ipopt does not use this argument for anything
            jac=objective.get_grad,
            hess=None,  # ipopt does not support Hessian yet
            hessp=None,  # ipopt does not support Hessian vector product yet
            bounds=bounds,
            tol=None,  # can be set via options
            options=self.options,
        )

        # the ipopt return object is a scipy.optimize.OptimizeResult
        return OptimizerResult(
            x=ret.x,
            exitflag=ret.status,
            message=ret.message
        )
예제 #3
0
파일: core.py 프로젝트: andim/pyneqsys
    def _solve_ipopt(self, intern_x0, **kwargs):
        import warnings
        from ipopt import minimize_ipopt
        warnings.warn("ipopt interface untested at the moment")

        def f_cb(x):
            f_cb.nfev += 1
            return np.sum(np.abs(self.f_cb(x, self.internal_params)))
        f_cb.nfev = 0

        if self.j_cb is not None:
            def j_cb(x):
                j_cb.njev += 1
                return self.j_cb(x, self.internal_params)
            j_cb.njev = 0
            kwargs['jac'] = j_cb

        return minimize_ipopt(f_cb, intern_x0, **kwargs)
예제 #4
0
from scipy.optimize import rosen, rosen_der
from ipopt import minimize_ipopt
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
res = minimize_ipopt(rosen, x0, jac=rosen_der)
print(res)
예제 #5
0
    return max_volume - eval_volume(rho)


constraints = [{
    "type": "ineq",
    "fun": volume_inequality_fun,
    "jac": lambda x: jax.grad(volume_inequality_fun)(x),
}]

x0 = np.ones(C.dim()) * max_volume / (L * h)
res = minimize_ipopt(
    min_f,
    x0,
    jac=True,
    bounds=((0.0, 1.0), ) * C.dim(),
    constraints=constraints,
    options={
        "print_level": 5,
        "max_iter": 100
    },
)

rho_opt_final = numpy_to_fenics(res.x, fa.Function(C))

c = fn.plot(rho_opt_final)
plt.colorbar(c)
plt.show()

# Save optimal solution for visualizing with ParaView
# with XDMFFile("1_dist_load/control_solution_1.xdmf") as f:
#     f.write(rho_opt)
예제 #6
0
파일: core.py 프로젝트: bjodah/pyneqsys
 def _solve_ipopt(self, intern_x0, **kwargs):
     import warnings
     from ipopt import minimize_ipopt
     warnings.warn("ipopt interface untested at the moment")
     return minimize_ipopt(self.f_callback, intern_x0, jac=self.j_callback)
예제 #7
0
 dname = os.path.dirname(abspath)
 os.chdir(dname)
 f0 = np.inf
 for I in range(iterations):
     if initpts is not None:
         u = np.loadtxt(initpts, delimiter='\t').flatten()
     else:
         u = np.random.random((N, dim)).flatten()
     #
     # Bounds
     lb = np.zeros_like(u)
     ub = np.ones_like(u)
     bnds = tuple([(lb[i], ub[i]) for i in range(N*dim)])
     #
     res = minimize_ipopt(lambda X: gaussian_scp(X, C, dim), u,
                          jac=lambda X: gaussian_scp_grad(X, C, dim),
                          bounds=bnds, options={'maxiter': 1000})
     print("Status: %s\nEnergy: %10.4f\n" % (res.success, res.fun))
     if res.fun < f0:
         f0 = res.fun
         x0 = res.x
         # prompt = input("Save the config (y/[n])?\n")
         if save:
             fname = ('../out/G_' + str(C) + '_dim_' + str(dim)+'_N_'
                      + str(N)+'.out')
             if not os.path.isdir("../out"):
                 os.mkdir("../out")
             else:
                 np.savetxt(fname, x0.reshape((-1, dim)), fmt='%.18f',
                            delimiter='\t')
         else:
예제 #8
0
def estimate_mpec_ipopt(
    disc_fac,
    num_states,
    maint_func,
    maint_func_dev,
    num_params,
    scale,
    decision_mat,
    trans_mat,
    state_mat,
    optimizer_options,
    transition_results,
):
    """
    Estimation function of Mathematical Programming with Equilibrium Constraints
    (MPEC) in ruspy.


    Parameters
    ----------
    disc_fac : numpy.float
        see :ref:`disc_fac`
    num_states : int
        The size of the state space.
    maint_func: func
        see :ref: `maint_func`
    maint_func_dev: func
        see :ref: `maint_func_dev`
    num_params : int
        The number of parameters to be estimated.
    scale : numpy.float
        see :ref:`scale`
    decision_mat : numpy.array
        see :ref:`decision_mat`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    optimizer_options : dict
        The options chosen for the optimization algorithm in the initialization
        dictionairy.
    transition_results : dict
        The results from ``estimate_transitions``.

    Returns
    -------
    transition_results : dictionary
        see :ref:`result_trans`
    mpec_cost_parameters : dictionary
        see :ref:`result_costs`


    """

    if not optional_package_is_available:
        raise NotImplementedError(
            """To use this you need to install cyipopt. If you are mac or Linux user
            the command is $ conda install -c conda-forge cyipopt. If you use
            Windows you have to install from source. A description can be found
            here: https://github.com/matthias-k/cyipopt""")

    del optimizer_options["algorithm"]
    gradient = optimizer_options.pop("derivative")
    params = optimizer_options.pop("params")
    lower_bounds = optimizer_options.pop("set_lower_bounds")
    upper_bounds = optimizer_options.pop("set_upper_bounds")
    bounds = np.vstack((lower_bounds, upper_bounds)).T
    bounds = list(map(tuple, bounds))
    if "get_expected_values" in optimizer_options:
        get_expected_values = optimizer_options.pop("get_expected_values")
    else:
        get_expected_values = "No"

    n_evaluations, neg_criterion = wrap_ipopt_likelihood(
        mpec_loglike_cost_params,
        args=(
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            state_mat,
            decision_mat,
            disc_fac,
            scale,
        ),
    )

    constraint_func = wrap_ipopt_constraint(
        mpec_constraint,
        args=(
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            trans_mat,
            disc_fac,
            scale,
        ),
    )

    if gradient == "No":

        def approx_gradient(params):
            fun = approx_derivative(neg_criterion, params, method="2-point")
            return fun

        gradient_func = approx_gradient

        def approx_jacobian(params):
            fun = approx_derivative(constraint_func, params, method="2-point")
            return fun

        jacobian_func = approx_jacobian
    else:
        gradient_func = partial(
            mpec_loglike_cost_params_derivative,
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            disc_fac,
            scale,
            decision_mat,
            state_mat,
        )
        jacobian_func = partial(
            mpec_constraint_derivative,
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            disc_fac,
            scale,
            trans_mat,
        )

    constraints = {
        "type": "eq",
        "fun": constraint_func,
        "jac": jacobian_func,
    }

    tic = time.perf_counter()
    if get_expected_values == "Yes":
        obs_costs = calc_obs_costs(num_states, maint_func, params, scale)
        ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0]
        params = np.concatenate((ev, params))
    results_ipopt = minimize_ipopt(
        neg_criterion,
        params,
        bounds=bounds,
        jac=gradient_func,
        constraints=constraints,
        **optimizer_options,
    )
    toc = time.perf_counter()
    timing = toc - tic

    mpec_cost_parameters = {}
    mpec_cost_parameters["x"] = results_ipopt["x"]
    mpec_cost_parameters["fun"] = results_ipopt["fun"]
    if results_ipopt["success"] is True:
        mpec_cost_parameters["status"] = True
    else:
        mpec_cost_parameters["status"] = False
    mpec_cost_parameters["n_iterations"] = results_ipopt["nit"]
    mpec_cost_parameters["n_evaluations"] = results_ipopt["nfev"]
    mpec_cost_parameters["time"] = timing
    mpec_cost_parameters["n_evaluations_total"] = n_evaluations[0]

    return transition_results, mpec_cost_parameters
예제 #9
0
    n_dev = to_gpu(n)
    cnf_dev = to_gpu(cnf.astype('float32'))
    grad_dev = to_gpu(grad.astype('float32'))
    pt_dev = to_gpu(pt.astype('float32'))

    # Bounds
    lb = np.zeros_like(cnf)
    ub = np.ones_like(cnf)
    bnds = tuple([(lb[i], ub[i]) for i in range(N * dim)])

    f0 = np.inf
    # for I in range(iterations):

    res = minimize_ipopt(
        lambda X: gaussian(X, pt_dev, cnf_dev, c_dev, pt, n),
        cnf,
        jac=lambda X: gaussian_grad(X, grad_dev, cnf_dev, c_dev, n),
        bounds=bnds,
        options={'maxiter': 1000})
    print("Status: %s\nEnergy: %10.4f\n" % (res.success, res.fun))
    if res.fun < f0:
        f0 = res.fun
        x0 = res.x
    if save:
        if not os.path.isdir("../out"):
            os.mkdir("../out")
        fname = ('../out/G_' + str(C) + '_dim_' + str(dim) + '_N_' + str(N) +
                 '.out')
        np.savetxt(fname, x0.reshape((-1, dim)), fmt='%.18f', delimiter='\t')
    else:
        pplot(x0, dim)
        x0 = np.reshape(mat, (cov_dim * cov_dim, ))
    elif run_id == 7:
        mat = 25 * np.eye(cov_dim)
        x0 = np.reshape(mat, (cov_dim * cov_dim, ))
    elif run_id == 8:
        mat = 50 * np.eye(cov_dim)
        x0 = np.reshape(mat, (cov_dim * cov_dim, ))
    elif run_id == 9:
        mat = 75 * np.eye(cov_dim)
        x0 = np.reshape(mat, (cov_dim * cov_dim, ))
    elif run_id == 10:
        mat = 100 * np.eye(cov_dim)
        x0 = np.reshape(mat, (cov_dim * cov_dim, ))

    # solve problem
    res = minimize_ipopt(cost_fcn, x0, jac=None)

    # grab results
    A = np.reshape(res.x, (cov_dim, cov_dim))
    print("A: \n{}".format(A))

    # Save the result
    with open("ekf_matrix_A_{}.pkl".format(run_id), "wb") as fid:
        pickle.dump(A, fid)

elif mode == "collect":
    with open("ekf_matrix_A_{}.pkl".format(run_id), "rb") as fid:
        A = pickle.load(fid)

    simdata = data_dict['test_simdata'][0]
    for i in range(simdata.nruns):
예제 #11
0
  """


#for cov_type in cov_types:
print("COV TYPE: {}".format(args.cov_type))

# Parse covariance type
(ind_l, ind_u) = state_indices(args.cov_type)
cov_dim = ind_u - ind_l

# Initial guess
x0 = np.random.randn(cov_dim*cov_dim)

# Solve optimization problem
if args.estimate_jac:
  res = minimize_ipopt(cost_fcn, x0, jac=None)
else:
  res = minimize_ipopt(cost_fcn, x0, jac=gradient_fcn)

# grab results
A = np.reshape(res.x, (cov_dim,cov_dim))
print("A: \n{}".format(A))

# Save the result
for est in estimators:
  est.add_param("linear_cov_scale_{}".format(args.cov_type), A)
for est in test_estimators:
  est.add_param("linear_cov_scale_{}".format(args.cov_type), A)
print("")

# add computed matrices to output file
예제 #12
0
def nonlinear_basis_pursuit(func,
                            func_jac,
                            func_hess,
                            init_guess,
                            options,
                            eps=0,
                            return_full=False):
    nunknowns = init_guess.shape[0]
    nslack_variables = nunknowns

    def obj(x):
        val = np.sum(x[nunknowns:])
        grad = np.zeros(x.shape[0])
        grad[nunknowns:] = 1.0
        return val, grad

    def hessp(x, p):
        matvec = np.zeros(x.shape[0])
        return matvec

    I = sp.identity(nunknowns)
    tmp = np.array([[1, -1], [-1, -1]])
    A_con = sp.kron(tmp, I)
    #A_con = A_con.A#dense
    lb_con = -np.inf * np.ones(nunknowns + nslack_variables)
    ub_con = np.zeros(nunknowns + nslack_variables)
    #print(A_con.A)
    linear_constraint = LinearConstraint(A_con,
                                         lb_con,
                                         ub_con,
                                         keep_feasible=False)
    constraints = [linear_constraint]

    def constraint_obj(x):
        val = func(x[:nunknowns])
        if func_jac == True:
            return val[0]
        return val

    def constraint_jac(x):
        if func_jac == True:
            jac = func(x[:nunknowns])[1]
        else:
            jac = func_jac(x[:nunknowns])

        if jac.ndim == 1:
            jac = jac[np.newaxis, :]
        jac = sp.hstack(
            [jac,
             sp.csr_matrix((jac.shape[0], jac.shape[1]), dtype=float)])
        jac = sp.csr_matrix(jac)
        return jac

    if func_hess is not None:

        def constraint_hessian(x, v):
            # see https://prog.world/scipy-conditions-optimization/
            # for example how to define NonlinearConstraint hess
            H = func_hess(x[:nunknowns])
            hess = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float)
            hess[:nunknowns, :nunknowns] = H * v[0]
            return hess
    else:
        constraint_hessian = BFGS()

    # experimental parameter. does not enforce interpolation but allows some
    # deviation
    nonlinear_constraint = NonlinearConstraint(constraint_obj,
                                               0,
                                               eps,
                                               jac=constraint_jac,
                                               hess=constraint_hessian,
                                               keep_feasible=False)
    constraints.append(nonlinear_constraint)

    lbs = np.zeros(nunknowns + nslack_variables)
    lbs[:nunknowns] = -np.inf
    ubs = np.inf * np.ones(nunknowns + nslack_variables)
    bounds = Bounds(lbs, ubs)
    x0 = np.concatenate([init_guess, np.absolute(init_guess)])
    method = get_method(options)
    #method = options.get('method','slsqp')
    if 'method' in options:
        del options['method']
    if method != 'ipopt':
        res = minimize(obj,
                       x0,
                       method=method,
                       jac=True,
                       hessp=hessp,
                       options=options,
                       bounds=bounds,
                       constraints=constraints)
    else:
        from ipopt import minimize_ipopt
        from scipy.optimize._constraints import new_constraint_to_old
        con = new_constraint_to_old(constraints[0], x0)
        ipopt_bounds = []
        for ii in range(len(bounds.lb)):
            ipopt_bounds.append([bounds.lb[ii], bounds.ub[ii]])
        res = minimize_ipopt(obj,
                             x0,
                             method=method,
                             jac=True,
                             options=options,
                             constraints=con,
                             bounds=ipopt_bounds)

    if return_full:
        return res.x[:nunknowns], res
    else:
        return res.x[:nunknowns]
예제 #13
0
def lasso(func, func_jac, func_hess, init_guess, lamda, options):
    nunknowns = init_guess.shape[0]
    nslack_variables = nunknowns

    def obj(lamda, x):
        vals = func(x[:nunknowns])
        if func_jac == True:
            grad = vals[1]
            vals = vals[0]
        else:
            grad = func_jac(x[:nunknowns])
        vals += lamda * np.sum(x[nunknowns:])
        grad = np.concatenate([grad, lamda * np.ones(nslack_variables)])
        return vals, grad

    def hess(x):
        H = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float)
        H[:nunknowns, :nunknowns] = func_hess(x[:nunknowns])
        return H

    if func_hess is None:
        hess = None

    I = sp.identity(nunknowns)
    tmp = np.array([[1, -1], [-1, -1]])
    A_con = sp.kron(tmp, I)
    lb_con = -np.inf * np.ones(nunknowns + nslack_variables)
    ub_con = np.zeros(nunknowns + nslack_variables)
    linear_constraint = LinearConstraint(A_con,
                                         lb_con,
                                         ub_con,
                                         keep_feasible=False)
    constraints = [linear_constraint]
    #print(A_con.A)

    lbs = np.zeros(nunknowns + nslack_variables)
    lbs[:nunknowns] = -np.inf
    ubs = np.inf * np.ones(nunknowns + nslack_variables)
    bounds = Bounds(lbs, ubs)
    x0 = np.concatenate([init_guess, np.absolute(init_guess)])
    method = get_method(options)
    #method = options.get('method','slsqp')
    if 'method' in options:
        del options['method']
    if method != 'ipopt':
        res = minimize(partial(obj, lamda),
                       x0,
                       method=method,
                       jac=True,
                       hess=hess,
                       options=options,
                       bounds=bounds,
                       constraints=constraints)
    else:
        #jac_structure_old = lambda : np.nonzero(np.tile(np.eye(nunknowns), (2, 2)))
        def jac_structure():
            rows = np.repeat(np.arange(2 * nunknowns), 2)
            cols = np.empty_like(rows)
            cols[::2] = np.hstack([np.arange(nunknowns)] * 2)
            cols[1::2] = np.hstack([np.arange(nunknowns, 2 * nunknowns)] * 2)
            return rows, cols

        #assert np.allclose(jac_structure()[0],jac_structure_old()[0])
        #assert np.allclose(jac_structure()[1],jac_structure_old()[1])

        #jac_structure=None
        def hess_structure():
            h = np.zeros((2 * nunknowns, 2 * nunknowns))
            h[:nunknowns, :nunknowns] = np.tril(np.ones(
                (nunknowns, nunknowns)))
            return np.nonzero(h)

        if hess is None:
            hess_structure = None

        from ipopt import minimize_ipopt
        from scipy.optimize._constraints import new_constraint_to_old
        con = new_constraint_to_old(constraints[0], x0)
        res = minimize_ipopt(partial(obj, lamda),
                             x0,
                             method=method,
                             jac=True,
                             options=options,
                             constraints=con,
                             jac_structure=jac_structure,
                             hess_structure=hess_structure,
                             hess=hess)

    return res.x[:nunknowns], res
예제 #14
0
def basis_pursuit_denoising(func, func_jac, func_hess, init_guess, eps,
                            options):

    t = np.zeros_like(init_guess)

    method = get_method(options)
    nunknowns = init_guess.shape[0]

    def constraint_obj(x):
        val = func(x)
        if func_jac == True:
            return val[0]
        return val

    def constraint_jac(x):
        if func_jac == True:
            jac = func(x)[1]
        else:
            jac = func_jac(x)
        return jac

    # if func_hess is None:
    #    constraint_hessian = BFGS()
    # else:
    #    def constraint_hessian(x,v):
    #        H = func_hess(x)
    #        return H*v.sum()
    constraint_hessian = BFGS()

    nonlinear_constraint = NonlinearConstraint(constraint_obj,
                                               0,
                                               eps**2,
                                               jac=constraint_jac,
                                               hess=constraint_hessian,
                                               keep_feasible=False)
    constraints = [nonlinear_constraint]

    # Maximum Number Outer Iterations
    maxiter = options.get('maxiter', 100)
    # Maximum Number Outer Iterations
    maxiter_inner = options.get('maxiter_inner', 1000)
    # Desired Dual Tolerance
    ttol = options.get('dualtol', 1e-6)
    # Verbosity Level
    verbose = options.get('verbose', 1)
    # Initial Penalty Parameter
    r = options.get('r0', 1)
    # Max Penalty Parameter
    rmax = options.get('rmax', 1e6)
    # Optimization Tolerance Update Factor
    tfac = options.get('tfac', 1e-1)
    # Penalty Parameter Update Factor
    rfac = options.get('rfac', 2)
    # Desired Feasibility Tolerance
    ctol = options.get('ctol', 1e-8)
    # Desired Optimality Tolerance
    gtol = options.get('gtol', 1e-8)
    # Initial Dual Tolerance
    ttol0 = options.get('ttol0', 1)
    # Initial Feasiblity Tolerance
    ctol0 = options.get('ctol0', 1e-2)
    # Initial Optimality Tolerance
    gtol0 = options.get('gtol0', 1e-2)
    # Tolerance for termination for change in objective
    ftol = options.get('ftol', 1e-8)

    niter = 0
    x0 = init_guess
    f0 = np.inf
    nfev, njev, nhev = 0, 0, 0
    constr_nfev, constr_njev, constr_nhev = 0, 0, 0
    while True:
        obj = partial(kouri_smooth_l1_norm, t, r)
        jac = partial(kouri_smooth_l1_norm_gradient, t, r)
        #hessp = partial(kouri_smooth_l1_norm_hessp,t,r)
        hessp = None

        if method == 'slsqp':
            options0 = {
                'ftol': gtol0,
                'verbose': max(0, verbose - 2),
                'maxiter': maxiter_inner,
                'disp': (verbose > 2)
            }
        elif method == 'trust-constr':
            options0 = {
                'gtol': gtol0,
                'tol': gtol0,
                'verbose': max(0, verbose - 2),
                'barrier_tol': ctol,
                'maxiter': maxiter_inner,
                'disp': (verbose > 2)
            }
        elif method == 'cobyla':
            options0 = {
                'tol': gtol0,
                'verbose': max(0, verbose - 2),
                'maxiter': maxiter_inner,
                'rhoend': gtol0,
                'rhobeg': 1,
                'disp': (verbose > 2),
                'catol': ctol0
            }
        if method != 'ipopt':
            #init_guess=x0
            res = minimize(obj,
                           init_guess,
                           method=method,
                           jac=jac,
                           hessp=hessp,
                           options=options0,
                           constraints=constraints)
        else:
            from ipopt import minimize_ipopt
            options0 = {
                'tol': gtol0,
                'print_level': max(0, verbose - 1),
                'maxiter': int(maxiter_inner),
                'acceptable_constr_viol_tol': ctol0,
                'derivative_test': 'first-order',
                'nlp_scaling_constr_target_gradient': 1.
            }
            from scipy.optimize._constraints import new_constraint_to_old
            con = new_constraint_to_old(constraints[0], init_guess)
            res = minimize_ipopt(obj,
                                 init_guess,
                                 method=method,
                                 jac=jac,
                                 hessp=hessp,
                                 options=options0,
                                 constraints=con)
            #assert res.success, res

        if method == 'trust-constr':
            assert res.status == 1 or res.status == 2
        elif method == 'slsqp':
            assert res.status == 0
        assert res.success == True

        fdiff = np.linalg.norm(f0 - res.fun)
        xdiff = np.linalg.norm(x0 - res.x)
        t0 = t.copy()
        t = np.maximum(-1, np.minimum(1, t0 + r * res.x))

        tdiff = np.linalg.norm(t0 - t)
        niter += 1
        x0 = res.x.copy()
        f0 = res.fun

        nfev += res.nfev
        if hasattr(res, 'njev'):
            njev += res.njev
        if hasattr(res, 'nhev'):
            nhev += res.nhev

        if verbose > 1:
            #print('  i = %d  tdiff = %11.10e  r = %11.10e  ttol = %3.2e  ctol = %3.2e  gtol = %3.2e  iter = %d'%(niter,tdiff,r,ttol0,ctol0,gtol0,0))
            print(
                '  i = %d  tdiff = %11.10e  fdiff = %11.10e  xdiff = %11.10e  r = %11.10e  ttol = %3.2e  gtol = %3.2e  nfev = %d'
                % (niter, tdiff, fdiff, xdiff, r, ttol0, gtol0, nfev))

        if tdiff < ttol:
            msg = f'ttol {ttol} reached'
            status = 0
            #break

        if fdiff < ftol:
            msg = f'ftol {ftol} reached'
            status = 0
            break

        if niter >= maxiter:
            msg = f'maxiter {maxiter} reached'
            status = 1
            break

        if tdiff > ttol0:
            r = min(r * 2, rmax)
        ttol0, gtol0 = max(tfac * ttol0, ttol), max(tfac * gtol0, gtol)
        ctol0 = max(tfac * ctol0, ctol)

        #constr_nfev only for trust-constr
        #constr_nfev+=res.constr_nfev[0];constr_njev+=res.constr_njev[0];constr_nhev+=res.constr_nhev[0]

    if verbose > 0:
        print(msg)

    res = OptimizeResult(
        fun=res.fun,
        x=res.x,
        nit=niter,
        msg=msg,
        nfev=nfev,
        njev=njev,
        status=status)  #constr_nfev=constr_nfev,constr_njev=constr_njev)
    return res
예제 #15
0
    #                     ))
    # print(norm(grad_dev.get() - approx_fprime(cnf,
    #                     lambda X: riesz(X, pt_dev, cnf_dev, s_dev, pt, n),
    #                     1e-8
    #                     )))

    # print(riesz(cnf, pt_dev, cnf_dev, s_dev, pt, n))
    # print(riesz_grad(cnf, grad_dev, grad3_dev, cnf_dev, s_dev, n)[-1])

    for I in range(iterations):
        if I > 0:
            cnf = 2 * pi * np.random.random((dim - 1) * N)
            cnf_dev = to_gpu(cnf)
        res = minimize_ipopt(lambda X: riesz(X, pt_dev, cnf_dev, s_dev, pt, n),
                             cnf,
                             jac=lambda X: riesz_grad(X, grad_dev, grad3_dev,
                                                      cnf_dev, s_dev, n),
                             bounds=bnds,
                             options={'maxiter': 1000})
        if res.fun < f0:
            print("Status: %s\nEnergy: %10.4f\n" % (res.success, res.fun))
            f0 = res.fun
            x0 = res.x
    if save:
        if not os.path.isdir("out"):
            os.mkdir("out")
        fname = ('out/G_' + str(S) + '_dim_' + str(dim) + '_N_' + str(N) +
                 '.out')
        np.savetxt(fname,
                   sph2cart(x0.reshape((-1, dim - 1))),
                   fmt='%.18f',
                   delimiter='\t')