def getNLP(self): """ Prepares and returns the optimization problem for Ipopt (an instance of ipyopt.Problem). For convenience also does other preprocessing, must be called after all functions are set. Do not destroy the driver after obtaining the problem. """ ConstrainedOptimizationDriver.preprocess(self) conLowerBound = np.zeros([ self._nCon, ]) conUpperBound = np.zeros([ self._nCon, ]) i = len(self._constraintsEQ) conUpperBound[i:(i + len(self._constraintsGT))] = 1e20 # assume row major storage for gradient sparsity rg = range(self._nVar * self._nCon) self._sparseIndices = (np.array([i // self._nVar for i in rg], dtype=int), np.array([i % self._nVar for i in rg], dtype=int)) # create the optimization problem self._nlp = opt.Problem(self._nVar, self.getLowerBound(), self.getUpperBound(), self._nCon, conLowerBound, conUpperBound, self._sparseIndices, 0, self._eval_f, self._eval_grad_f, self._eval_g, self._eval_jac_g) return self._nlp
def generic_problem(module, with_hess: bool = False, **kwargs): n = module.n eval_jac_g_sparsity_indices = sparsity_g(n) eval_h_sparsity_indices = sparsity_h(n) if with_hess: kwargs["eval_h"] = module.h _x_L = x_L(n) _x_U = x_U(n) g_L = numpy.array([0.0]) g_U = numpy.array([4.0]) p = ipyopt.Problem(n, _x_L, _x_U, 1, g_L, g_U, eval_jac_g_sparsity_indices, eval_h_sparsity_indices, module.f, module.grad_f, module.g, module.jac_g, **kwargs) p.set(print_level=0, sb="yes") return p
def main(): # verbose ipyopt.set_loglevel(ipyopt.LOGGING_DEBUG) # define the parameters and their box constraints nvar = 2 x_L = numpy.array([-3, -3], dtype=float) x_U = numpy.array([3, 3], dtype=float) # define the inequality constraints ncon = 0 g_L = numpy.array([], dtype=float) g_U = numpy.array([], dtype=float) # create the nonlinear programming model nlp = ipyopt.Problem( nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g.sparsity_indices, eval_h.sparsity_indices, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, apply_new, ) # define the initial guess x0 = numpy.array([-1.2, 1], dtype=float) # compute the results using ipopt results = nlp.solve(x0) # report the results print(results)
def main(): # pylint: disable=missing-function-docstring # define the parameters and their box constraints nvar = 2 x_L = numpy.array([-3, -3], dtype=float) x_U = numpy.array([3, 3], dtype=float) # define the inequality constraints ncon = 0 g_L = numpy.array([], dtype=float) g_U = numpy.array([], dtype=float) # create the nonlinear programming model nlp = ipyopt.Problem( nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g_sparsity_indices, eval_h_sparsity_indices, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, ) # define the initial guess x0 = numpy.array([-1.2, 1], dtype=float) # compute the results using ipopt results = nlp.solve(x0) # report the results print(results)
def _minimize(self, loss, params, init): import ipyopt if init: assign_values(params=params, values=init) evaluator = self.create_evaluator() criterion = self.create_criterion() # initial values as array xvalues = np.array(run(params)) # get and set the limits lower = np.array([p.lower for p in params]) upper = np.array([p.upper for p in params]) nconstraints = 0 empty_array = np.array([]) nparams = len(params) hessian_sparsity_indices = np.meshgrid(range(nparams), range(nparams)) minimizer_options = self.minimizer_options.copy() def gradient_inplace(x, out): gradient = evaluator.gradient(x) out[:] = gradient ipopt_options = minimizer_options.pop("ipopt").copy() print_level = self.verbosity if print_level == 8: print_level = 9 elif print_level == 9: print_level = 11 elif print_level == 10: if "print_timing_statistics" not in ipopt_options: ipopt_options["print_timing_statistics"] = "yes" ipopt_options["print_level"] = print_level ipopt_options["tol"] = self.tol ipopt_options["max_iter"] = self.get_maxiter() hessian = minimizer_options.pop("hessian") minimizer_kwargs = dict( n=nparams, xL=lower, xU=upper, m=nconstraints, gL=empty_array, gU=empty_array, # no constraints sparsity_indices_jac_g=(empty_array, empty_array), sparsity_indices_hess=hessian_sparsity_indices, eval_f=evaluator.value, eval_grad_f=gradient_inplace, eval_g=lambda x, out: None, eval_jac_g=lambda x, out: None, ) if hessian == "zfit": def hessian_inplace(x, out): hessian = evaluator.hessian(x) out[:] = hessian minimizer_kwargs["eval_h"] = hessian_inplace else: if hessian == "exact": ipopt_options["hessian_approximation"] = hessian else: ipopt_options["hessian_approximation"] = "limited-memory" ipopt_options["limited_memory_update_type"] = hessian # ipopt_options['dual_inf_tol'] = TODO? minimizer = ipyopt.Problem(**minimizer_kwargs) minimizer.set( **{k: v for k, v in ipopt_options.items() if v is not None}) init_tol = min([ math.sqrt(loss.errordef * self.tol), loss.errordef * self.tol * 1e2 ]) # init_tol **= 0.5 internal_tol = self._internal_tol internal_tol = { tol: init_tol if init is None else init for tol, init in internal_tol.items() } valid = True edm = None criterion_value = None valid_message = "" warm_start_options = ( "warm_start_init_point", # 'warm_start_same_structure', "warm_start_entire_iterate", ) # minimizer.set_intermediate_callback(lambda *a, **k: print(a, k) or True) fmin = -999 status = -999 converged = False for i in range(self._internal_maxiter): minimizer.set(**internal_tol) # run the minimization try: xvalues, fmin, status = minimizer.solve( xvalues, # mult_g=constraint_multipliers, # mult_x_L=zl, # mult_x_U=zu ) except MaximumIterationReached: maxiter_reached = True valid = False valid_message = "Maxiter reached, terminated without convergence" else: maxiter_reached = evaluator.maxiter_reached assign_values(params, xvalues) with evaluator.ignore_maxiter(): result_prelim = FitResult.from_ipopt( loss=loss, params=params, values=xvalues, minimizer=self, problem=minimizer, fmin=fmin, converged=converged, status=status, edm=CRITERION_NOT_AVAILABLE, evaluator=evaluator, valid=valid, niter=None, criterion=criterion, message=valid_message, ) converged = criterion.converged(result_prelim) criterion_value = criterion.last_value if isinstance(criterion, EDM): edm = criterion.last_value else: edm = CRITERION_NOT_AVAILABLE if self.verbosity > 5: print_minimization_status( converged=converged, criterion=criterion, evaluator=evaluator, i=i, fmin=fmin, internal_tol=internal_tol, ) if converged or maxiter_reached: break # prepare for next run minimizer.set(**{option: "yes" for option in warm_start_options}) # update the tolerances self._update_tol_inplace(criterion_value=criterion_value, internal_tol=internal_tol) else: valid = False valid_message = f"Invalid, criterion {criterion.name} is {criterion_value}, target {self.tol} not reached." # cleanup of convergence minimizer.set(**{option: "no" for option in warm_start_options}) assign_values(params=params, values=xvalues) return FitResult.from_ipopt( loss=loss, params=params, minimizer=self, values=xvalues, problem=minimizer, fmin=fmin, status=status, edm=edm, criterion=criterion, niter=None, converged=converged, evaluator=evaluator, valid=valid, message=valid_message, )
# | 0 ... 0 * * | # \ 0 ... 0 * / eval_h_sparsity_indices = ( numpy.repeat(numpy.arange(nvar), 2)[:2 * nvar - 1], numpy.array([numpy.arange(nvar), numpy.arange(1, nvar + 1)]).T.flatten()[:2 * nvar - 1], ) nlp = ipyopt.Problem( nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g_sparsity_indices, eval_h_sparsity_indices, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, ) print("Going to call solve with x0 = {}".format(x0)) zl = numpy.zeros(nvar) zu = numpy.zeros(nvar) constraint_multipliers = numpy.zeros(ncon) _x, obj, status = nlp.solve(x0, mult_g=constraint_multipliers, mult_x_L=zl,
x = array_sym("x", n) f = x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2] g = [ x[0] * x[1] * x[2] * x[3], x[0] * x[0] + x[1] * x[1] + x[2] * x[2] + x[3] * x[3] ] c_api = SymNlp(f, g).compile() nlp = ipyopt.Problem( n=n, x_l=numpy.ones(n), x_u=numpy.full(n, 5.0), m=m, g_l=numpy.array([25.0, 40.0]), g_u=numpy.array([2.0e19, 40.0]), **c_api, ) x0 = numpy.array([1.0, 5.0, 5.0, 1.0]) print(f"Going to call solve with x0 = {x0}") zl = numpy.zeros(n) zu = numpy.zeros(n) constraint_multipliers = numpy.zeros(m) _x, obj, status = nlp.solve(x0, mult_g=constraint_multipliers, mult_x_L=zl, mult_x_U=zu)
# | * * 0 0 | # | * * * 0 | # \ * * * * / eval_h_sparsity_indices = ( array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]), ) nlp = ipyopt.Problem( nvar, x_L, x_U, ncon, g_L, g_U, eval_jac_g_sparsity_indices, eval_h_sparsity_indices, capsules["f"], capsules["grad_f"], capsules["g"], capsules["jac_g"], capsules["h"], ) x0 = array([1.0, 5.0, 5.0, 1.0]) print(f"Going to call solve with x0 = {x0}") zl = zeros(nvar) zu = zeros(nvar) constraint_multipliers = zeros(ncon) _x, obj, status = nlp.solve(x0,
def _minimize(self, loss, params, init): try: import ipyopt except ImportError as error: raise ImportError("This requires the ipyopt library (https://gitlab.com/g-braeunlich/ipyopt)" " to be installed. On a 'Linux' environment, you can install zfit with" " `pip install zfit[ipyopt]` (or install ipyopt with pip). For MacOS, there are currently" " no wheels (but will come in the future). In this case, please install ipyopt manually " "to use this minimizer" " or install zfit on a 'Linux' environment.") from error if init: assign_values(params=params, values=init) evaluator = self.create_evaluator() # initial values as array xvalues = np.array(run(params)) # get and set the limits lower = np.array([p.lower for p in params]) upper = np.array([p.upper for p in params]) nconstraints = 0 empty_array = np.array([]) nparams = len(params) hessian_sparsity_indices = np.meshgrid(range(nparams), range(nparams)) minimizer_options = self.minimizer_options.copy() def gradient_inplace(x, out): gradient = evaluator.gradient(x) out[:] = gradient ipopt_options = minimizer_options.pop('ipopt').copy() print_level = self.verbosity if print_level == 8: print_level = 9 elif print_level == 9: print_level = 11 elif print_level == 10: if 'print_timing_statistics' not in ipopt_options: ipopt_options['print_timing_statistics'] = 'yes' ipopt_options['print_level'] = print_level ipopt_options['tol'] = self.tol ipopt_options['max_iter'] = self.get_maxiter() hessian = minimizer_options.pop('hessian') minimizer_kwargs = dict(n=nparams, xL=lower, xU=upper, m=nconstraints, gL=empty_array, gU=empty_array, # no constraints sparsity_indices_jac_g=(empty_array, empty_array), sparsity_indices_hess=hessian_sparsity_indices, eval_f=evaluator.value, eval_grad_f=gradient_inplace, eval_g=lambda x, out: None, eval_jac_g=lambda x, out: None) if hessian == 'zfit': def hessian_inplace(x, out): hessian = evaluator.hessian(x) out[:] = hessian minimizer_kwargs['eval_h'] = hessian_inplace else: if hessian == 'exact': ipopt_options['hessian_approximation'] = hessian else: ipopt_options['hessian_approximation'] = 'limited-memory' ipopt_options['limited_memory_update_type'] = hessian # ipopt_options['dual_inf_tol'] = TODO? minimizer = ipyopt.Problem(**minimizer_kwargs) minimizer.set(**{k: v for k, v in ipopt_options.items() if v is not None}) criterion = self.create_criterion() init_tol = min([math.sqrt(loss.errordef * self.tol), loss.errordef * self.tol * 1e2]) # init_tol **= 0.5 internal_tol = self._internal_tol internal_tol = {tol: init_tol if init is None else init for tol, init in internal_tol.items()} valid = True edm = None criterion_value = None valid_message = "" warm_start_options = ( 'warm_start_init_point', # 'warm_start_same_structure', 'warm_start_entire_iterate' ) # minimizer.set_intermediate_callback(lambda *a, **k: print(a, k) or True) fmin = -999 status = -999 converged = False for i in range(self._internal_maxiter): minimizer.set(**internal_tol) # run the minimization try: xvalues, fmin, status = minimizer.solve(xvalues, # mult_g=constraint_multipliers, # mult_x_L=zl, # mult_x_U=zu ) except MaximumIterationReached: maxiter_reached = True valid = False valid_message = "Maxiter reached, terminated without convergence" else: maxiter_reached = evaluator.maxiter_reached assign_values(params, xvalues) with evaluator.ignore_maxiter(): result_prelim = FitResult.from_ipopt(loss=loss, params=params, values=xvalues, minimizer=self, problem=minimizer, fmin=fmin, converged=converged, status=status, edm=CRITERION_NOT_AVAILABLE, evaluator=evaluator, valid=valid, niter=None, criterion=criterion, message=valid_message) converged = criterion.converged(result_prelim) criterion_value = criterion.last_value if isinstance(criterion, EDM): edm = criterion.last_value else: edm = CRITERION_NOT_AVAILABLE if self.verbosity > 5: print_minimization_status(converged=converged, criterion=criterion, evaluator=evaluator, i=i, fmin=fmin, internal_tol=internal_tol) if converged or maxiter_reached: break # prepare for next run minimizer.set(**{option: 'yes' for option in warm_start_options}) # update the tolerances self._update_tol_inplace(criterion_value=criterion_value, internal_tol=internal_tol) else: valid = False valid_message = f"Invalid, criterion {criterion.name} is {criterion_value}, target {self.tol} not reached." # cleanup of convergence minimizer.set(**{option: 'no' for option in warm_start_options}) assign_values(params=params, values=xvalues) return FitResult.from_ipopt(loss=loss, params=params, minimizer=self, values=xvalues, problem=minimizer, fmin=fmin, status=status, edm=edm, criterion=criterion, niter=None, converged=converged, evaluator=evaluator, valid=valid, message=valid_message)