Exemple #1
1
def IPOPTrunINIT(vars0,mission,includeDrag):
    import ipopt
    import constraints as ct
    import costFunction as CF
    nvar = len(vars0)
    
    
    con1= ct.eval_g(vars0,mission)
    g_L,g_U = ct.bounds(vars0,mission)
    ncon = len(g_L)
    
    jac = ct.eval_jac_g(vars0,mission,'1d')
    
    #print np.shape(jac),ncon,len(vars0)
    check  = CF.fprimeObjectiveMassINIT(vars0,mission)
    x_L,x_U = ct.getXLXU(vars0,mission)
    
    # start IPOPT classess
    class trajectory(object):
        def __init__(self,mission,includeDrag):
            self._mission = mission
            self._includeDrag = includeDrag
        def objective(self,x):
            ret = CF.ObjectiveMassINIT(x,self._mission)
            return ret
        def gradient(self,x):
            ret = CF.fprimeObjectiveMassINIT(x,self._mission)
            return ret
        def constraints(self,x):
            ret = ct.eval_g(x,self._mission,self._includeDrag)
            return ret
        def jacobian(self,x):
            ret = ct.eval_jac_g(x,self._mission,'1d',self._includeDrag)
            return ret
    nlp = ipopt.problem(
                        n=len(vars0),
                        m=ncon,
                        problem_obj=trajectory(mission,includeDrag),
                        lb=x_L,
                        ub=x_U,
                        cl=g_L,
                        cu=g_U
                        )
    
    
    #
    # Set solver options
    #
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('tol', 5e-6)
    nlp.addOption('max_iter',1000)
    nlp.addOption('acceptable_constr_viol_tol',5e-6)
    nlp.addOption('constr_viol_tol',5e-6)
    #nlp.addOption('derivative_test','first-order')
    nlp.addOption('acceptable_tol',5e-6)
    nlp.addOption('expect_infeasible_problem','yes')
    
    x,info = nlp.solve(vars0)
    return x,info
 def optimizeSFA(self, print_level=0, max_iter=50):
     # create problem
     if self.constraint_matrix is None:
         handle = ipopt.problem(n=self.k,
                                m=0,
                                problem_obj=sfaObj(self),
                                lb=self.uprior[0],
                                ub=self.uprior[1])
     else:
         handle = ipopt.problem(n=self.k,
                                m=self.constraint_matrix.shape[0],
                                problem_obj=sfaObj(self),
                                lb=self.uprior[0],
                                ub=self.uprior[1],
                                cl=self.constraint_values[0],
                                cu=self.constraint_values[1])
     # add options
     handle.addOption('print_level', print_level)
     if max_iter is not None: handle.addOption('max_iter', max_iter)
     # initial point
     if self.soln is None:
         beta0 = np.linalg.solve(self.X.T.dot(self.X), self.X.T.dot(self.Y))
         gama0 = np.repeat(0.01, self.k_gama)
         deta0 = np.repeat(0.01, self.k_deta)
         x0 = np.hstack((beta0, gama0, deta0))
     else:
         x0 = self.soln
     # solver the problem
     soln, info = handle.solve(x0)
     # extract the solution
     self.soln = soln
     self.info = info
     self.beta_soln = soln[self.id_beta]
     self.gama_soln = soln[self.id_gama]
     self.deta_soln = soln[self.id_deta]
    def solve(self, x0=None, tee=False):

        if not self._is_composite:
            cyipopt_solver = ipopt.problem(n=self._nlp.nx,
                                           m=self._nlp.ng,
                                           problem_obj=self._problem,
                                           lb=self._nlp.xl(),
                                           ub=self._nlp.xu(),
                                           cl=self._nlp.gl(),
                                           cu=self._nlp.gu())
        else:
            xl = self._nlp.xl()
            xu = self._nlp.xu()
            gl = self._nlp.gl()
            gu = self._nlp.gu()
            nx = int(self._nlp.nx)
            ng = int(self._nlp.ng)
            cyipopt_solver = ipopt.problem(n=nx,
                                           m=ng,
                                           problem_obj=self._problem,
                                           lb=xl.flatten(),
                                           ub=xu.flatten(),
                                           cl=gl.flatten(),
                                           cu=gu.flatten())
        if x0 is None:
            xstart = self._nlp.x_init()
            if self._is_composite:
                xstart = xstart.flatten()
        else:
            assert isinstance(x0, np.ndarray)
            assert x0.size == self._nlp.nx
            xstart = x0

        # this is needed until NLP hessian takes obj_factor as an input
        if not self._nlp._future_libraries:
            cyipopt_solver.addOption('nlp_scaling_method', 'none')

        # add options
        for k, v in self._options.items():
            cyipopt_solver.addOption(k, v)

        if tee:
            x, info = cyipopt_solver.solve(xstart)
        else:
            newstdout = redirect_stdout()
            x, info = cyipopt_solver.solve(xstart)
            os.dup2(newstdout, 1)

        return x, info
Exemple #4
0
    def solve(self, x0=None, tee=False):
        xl = self._problem.x_lb()
        xu = self._problem.x_ub()
        gl = self._problem.g_lb()
        gu = self._problem.g_ub()

        if x0 is None:
            x0 = self._problem.x_init()
        xstart = x0
        
        nx = len(xstart)
        ng = len(gl)

        cyipopt_solver = ipopt.problem(n=nx,
                                       m=ng,
                                       problem_obj=self._problem,
                                       lb=xl,
                                       ub=xu,
                                       cl=gl,
                                       cu=gu
        )

        # add options
        for k, v in self._options.items():
            cyipopt_solver.addOption(k, v)

        if tee:
            x, info = cyipopt_solver.solve(xstart)
        else:
            newstdout = redirect_stdout()
            x, info = cyipopt_solver.solve(xstart)
            os.dup2(newstdout, 1)

        return x, info
Exemple #5
0
 def solve(self, prob):
     cur_conds = prob.conditions
     comps = prob.pure_elements
     nlp = ipopt.problem(
         n=prob.num_vars,
         m=prob.num_constraints,
         problem_obj=prob,
         lb=prob.xl,
         ub=prob.xu,
         cl=prob.cl,
         cu=prob.cu
     )
     length_scale = np.min(np.abs(prob.cl))
     length_scale = max(length_scale, 1e-9)
     nlp.addOption(b'print_level', 0)
     if not self.verbose:
         # suppress the "This program contains Ipopt" banner
         nlp.addOption(b'sb', b'yes')
     nlp.addOption(b'tol', 1e-1)
     nlp.addOption(b'constr_viol_tol', 1e-12)
     # This option improves convergence when using L-BFGS
     nlp.addOption(b'limited_memory_max_history', 100)
     nlp.addOption(b'max_iter', 200)
     x, info = nlp.solve(prob.x0)
     dual_inf = np.max(np.abs(info['mult_g']*info['g']))
     if dual_inf > MAX_SOLVE_DRIVING_FORCE:
         if self.verbose:
             print('Trying to improve poor solution')
         # Constraints are getting tiny; need to be strict about bounds
         if length_scale < 1e-6:
             nlp.addOption(b'compl_inf_tol', 1e-15)
             nlp.addOption(b'bound_relax_factor', 1e-12)
             # This option ensures any bounds failures will fail "loudly"
             # Otherwise we are liable to have subtle mass balance errors
             nlp.addOption(b'honor_original_bounds', b'no')
         else:
             nlp.addOption(b'dual_inf_tol', MAX_SOLVE_DRIVING_FORCE)
         accurate_x, accurate_info = nlp.solve(x)
         if accurate_info['status'] >= 0:
             x, info = accurate_x, accurate_info
     chemical_potentials = -np.array(info['mult_g'])[-len(set(comps) - {'VA'}):]
     if info['status'] == -10:
         # Not enough degrees of freedom; nothing to do
         if len(prob.composition_sets) == 1:
             converged = True
             chemical_potentials[:] = prob.composition_sets[0].energy
         else:
             converged = False
     elif info['status'] < 0:
         if self.verbose:
             print('Calculation Failed: ', cur_conds, info['status_msg'])
         converged = False
     else:
         converged = True
     if self.verbose:
         print('Chemical Potentials', chemical_potentials)
         print(info['mult_x_L'])
         print(x)
         print('Status:', info['status'], info['status_msg'])
     return SolverResult(converged=converged, x=x, chemical_potentials=chemical_potentials)
Exemple #6
0
    def optimize(self, x0=None, print_level=0, max_iter=100, tol=1e-8):
        if x0 is None:
            x0 = np.hstack((self.beta, self.gamma, self.delta))
            if self.use_lprior:
                x0 = np.hstack((x0, np.zeros(self.k)))

        assert x0.size == self.k_total

        opt_problem = ipopt.problem(n=self.k_total,
                                    m=self.num_constraints,
                                    problem_obj=self,
                                    lb=self.uprior[0],
                                    ub=self.uprior[1],
                                    cl=self.cl,
                                    cu=self.cu)

        opt_problem.addOption('print_level', print_level)
        opt_problem.addOption('max_iter', max_iter)
        opt_problem.addOption('tol', tol)
        # opt_problem.addOption('bound_push', 1e-15)
        # opt_problem.addOption('bound_frac', 1e-15)

        soln, info = opt_problem.solve(x0)

        self.soln = soln
        self.info = info
        self.beta = soln[self.idx_beta]
        self.gamma = soln[self.idx_gamma]
        self.delta = soln[self.idx_delta]
Exemple #7
0
def solve_direct(self):

    #start values
    x0 = np.concatenate([
        np.ones((self.par['n_b'])) * self.bar_diam,
        np.zeros((self.par['n_dl']))
    ])

    #parameter bounds - bar diameters and all dislocations
    lb = self.bounds()[0]
    ub = self.bounds()[1]

    #constraints
    cl = self.limits()[0]
    cu = self.limits()[1]

    #define problem for ipopt
    problem_ipopt = ipopt.problem(n=self.par['n_var'],
                                  m=len(cl),
                                  problem_obj=self,
                                  lb=lb,
                                  ub=ub,
                                  cl=cl,
                                  cu=cu)

    #add options for ipopt
    add_option_ipopt(self, problem=problem_ipopt)

    #solve
    out_opt, out_info = problem_ipopt.solve(x0)

    return out_opt, out_info
Exemple #8
0
def IPOPTrunINIT(vars0, mission, includeDrag):
    import ipopt
    import constraints as ct
    import costFunction as CF
    nvar = len(vars0)

    con1 = ct.eval_g(vars0, mission)
    g_L, g_U = ct.bounds(vars0, mission)
    ncon = len(g_L)

    jac = ct.eval_jac_g(vars0, mission, '1d')

    #print np.shape(jac),ncon,len(vars0)
    check = CF.fprimeObjectiveMassINIT(vars0, mission)
    x_L, x_U = ct.getXLXU(vars0, mission)

    # start IPOPT classess
    class trajectory(object):
        def __init__(self, mission, includeDrag):
            self._mission = mission
            self._includeDrag = includeDrag

        def objective(self, x):
            ret = CF.ObjectiveMassINIT(x, self._mission)
            return ret

        def gradient(self, x):
            ret = CF.fprimeObjectiveMassINIT(x, self._mission)
            return ret

        def constraints(self, x):
            ret = ct.eval_g(x, self._mission, self._includeDrag)
            return ret

        def jacobian(self, x):
            ret = ct.eval_jac_g(x, self._mission, '1d', self._includeDrag)
            return ret

    nlp = ipopt.problem(n=len(vars0),
                        m=ncon,
                        problem_obj=trajectory(mission, includeDrag),
                        lb=x_L,
                        ub=x_U,
                        cl=g_L,
                        cu=g_U)

    #
    # Set solver options
    #
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('tol', 5e-6)
    nlp.addOption('max_iter', 1000)
    nlp.addOption('acceptable_constr_viol_tol', 5e-6)
    nlp.addOption('constr_viol_tol', 5e-6)
    #nlp.addOption('derivative_test','first-order')
    nlp.addOption('acceptable_tol', 5e-6)
    nlp.addOption('expect_infeasible_problem', 'yes')

    x, info = nlp.solve(vars0)
    return x, info
Exemple #9
0
    def optimize(self,
                 parameters,
                 loss,
                 max_iters=1,
                 verbose=False,
                 *args,
                 **kwargs):
        """
        Optimize the given loss function with respect to the given parameters.

        Args:
            parameters (np.array): parameters to optimize.
            loss (callable): callable objective / loss function to minimize.
            bounds (tuple, list, np.array): parameter bounds. E.g. bounds=[0, np.inf]
            max_iters (int): number of maximum iterations.
            verbose (bool): if True, it will display information during the optimization process.
            *args: list of arguments to give to the loss function if callable.
            **kwargs: dictionary of arguments to give to the loss function if callable.

        Returns:
            float, torch.Tensor, np.array: loss scalar value.
            object: best parameters
        """
        N = len(parameters)

        # define initial value
        x0 = parameters  # important that the initial value != 0 for the computation of the grad!

        # define (lower and upper) bound constraints
        lb = [-1] * N
        ub = [1] * N

        # define constraints; if upper and lower constraints (resp. cu and cl) are equal then equality constraint
        cl = [1] + [0] * (N - 1)
        cu = [1] + [0] * (N - 1)

        # create ipopt (which contains the objective function, its gradients, and constraints)
        opt = _IPopt(verbose=False)
        opt.add_constraint(NormConstraint())
        opt.add_constraint(OrthogonalConstraint(x))

        # define the nonlinear optimization problem
        self.optimizer = ipopt.problem(n=N,
                                       m=len(cl[:i]),
                                       problem_obj=opt,
                                       lb=lb,
                                       ub=ub,
                                       cl=cl[:i],
                                       cu=cu[:i])

        # solve problem
        x, info = self.optimizer.solve(x0)

        # save the results
        self.best_parameters = x
        self.best_result = info['obj_val']

        return self.best_result, self.best_parameters
Exemple #10
0
 def solve(self, prob):
     cur_conds = prob.conditions
     comps = prob.components
     nlp = ipopt.problem(n=prob.num_vars,
                         m=prob.num_constraints,
                         problem_obj=prob,
                         lb=prob.xl,
                         ub=prob.xu,
                         cl=prob.cl,
                         cu=prob.cu)
     length_scale = np.min(np.abs(prob.cl))
     length_scale = max(length_scale, 1e-9)
     nlp.addOption(b'print_level', 0)
     nlp.addOption(b'tol', 1e-1)
     nlp.addOption(b'constr_viol_tol', 1e-12)
     # This option improves convergence when using L-BFGS
     nlp.addOption(b'limited_memory_max_history', 100)
     nlp.addOption(b'max_iter', 200)
     x, info = nlp.solve(prob.x0)
     dual_inf = np.max(np.abs(info['mult_g'] * info['g']))
     if dual_inf > MAX_SOLVE_DRIVING_FORCE:
         if self.verbose:
             print('Trying to improve poor solution')
         # Constraints are getting tiny; need to be strict about bounds
         if length_scale < 1e-6:
             nlp.addOption(b'compl_inf_tol', 1e-15)
             nlp.addOption(b'bound_relax_factor', 1e-12)
             # This option ensures any bounds failures will fail "loudly"
             # Otherwise we are liable to have subtle mass balance errors
             nlp.addOption(b'honor_original_bounds', b'no')
         else:
             nlp.addOption(b'dual_inf_tol', MAX_SOLVE_DRIVING_FORCE)
         accurate_x, accurate_info = nlp.solve(x)
         if accurate_info['status'] >= 0:
             x, info = accurate_x, accurate_info
     chemical_potentials = -np.array(
         info['mult_g'])[-len(set(comps) - {'VA'}):]
     if info['status'] == -10:
         # Not enough degrees of freedom; nothing to do
         if len(prob.composition_sets) == 1:
             converged = True
             chemical_potentials[:] = prob.composition_sets[0].energy
         else:
             converged = False
     elif info['status'] < 0:
         if self.verbose:
             print('Calculation Failed: ', cur_conds, info['status_msg'])
         converged = False
     else:
         converged = True
     if self.verbose:
         print('Chemical Potentials', chemical_potentials)
         print(info['mult_x_L'])
         print(x)
         print('Status:', info['status'], info['status_msg'])
     return SolverResult(converged=converged,
                         x=x,
                         chemical_potentials=chemical_potentials)
def main():
    #
    # Define the problem
    #
    x0 = [1.0, 5.0, 5.0, 1.0]

    lb = [1.0, 1.0, 1.0, 1.0]
    ub = [5.0, 5.0, 5.0, 5.0]

    cl = [25.0, 40.0]
    cu = [2.0e19, 40.0]

    nlp = ipopt.problem(
                n=len(x0),
                m=len(cl),
                problem_obj=hs071(),
                lb=lb,
                ub=ub,
                cl=cl,
                cu=cu
                )

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('tol', 1e-7)

    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlp.setProblemScaling(
        obj_scaling=2,
        x_scaling=[1, 1, 1, 1]
        )

    nlp.addOption('nlp_scaling_method', 'user-scaling')
    nlp.addOption('linear_solver', 'mumps')
    # nlp.addOption('linear_solver', 'ma57')
    # nlp.addOption('print_level', 5)
    nlp.addOption('file_print_level', 5)
    nlp.addOption('output_file', 'py_ipopt2.out')

    #
    # Solve the problem
    #
    x, info = nlp.solve(x0)

    print("Solution of the primal variables: x=%s\n" % repr(x))

    print("Solution of the dual variables: lambda=%s\n" % repr(info['mult_g']))

    print("Objective=%s\n" % repr(info['obj_val']))

    print("All done")
Exemple #12
0
    def __init__(self, params):

        beta = params['beta']
        k0 = params['k0']
        f0 = params['f0']
        rmin = params['rmin']
        max_iter = params['max_iter']
        self.tag = params['tag']
        self.debug = params['debug']
        self.cantilever_key = params['cantilever']
        self.dir = params['dir']
        to_connect = params['to_connect']
        obj_scale = params['obj_scale']
        pmu = params['pmu']

        self.material = materials.PiezoMumpsMaterial()
        self.cantilever = self.select_cantilever()
        self.la = LaminateAnalysis(self.cantilever, self.material, to_connect,
                                   pmu)
        self.analyser = analysers.CantileverAnalyser(self.la.fem)

        self.sym = symmetry.Symmetry(self.la.fem)
        self.density_filter = density_filter.DensityFilter(self.la.fem, rmin)
        self.projection = projection.Projection(beta)
        self.x0 = self.sym.initial(self.la.fem.mesh.get_densities())

        # Attributes not set in contructor.
        self.xs_prev = None
        self.neta1 = 0.0
        self.k1 = 0.0
        self.f1 = 0.0
        self.solution = None
        self.info = None
        self.dneta_dp = None
        self.df1_dp = None
        self.dk1_dp = None
        self.records = []

        # Initialise the nonlinear optimizer.
        inf = 10e19
        self.nlp = ipopt.problem(n=self.sym.dimension,
                                 m=2,
                                 problem_obj=self,
                                 lb=1e-4 * np.ones(self.sym.dimension),
                                 ub=np.ones(self.sym.dimension),
                                 cl=np.array((-inf, f0)),
                                 cu=np.array((k0, inf)))

        # Configure the nonlinear optimizer.
        log_file = ''.join((self.dir, '/', self.tag, '-log.txt')).encode()
        self.nlp.addOption(b'max_iter', max_iter)
        self.nlp.addOption(b'tol', 1e-5)
        self.nlp.addOption(b'acceptable_tol', 1e-3)
        self.nlp.addOption(b'obj_scaling_factor', obj_scale)
        self.nlp.addOption(b'output_file', log_file)
        self.nlp.addOption(b'expect_infeasible_problem', b'yes')
Exemple #13
0
 def fit(self,
         x_init: np.ndarray,
         data: Optional[Data] = None,
         options: Optional[Dict[str, Any]] = None):
     problem_obj = _IPOPTProblem(self.model, data)
     if has_bounds(self.model) and has_constraints(self.model):
         problem = ipopt.problem(
             n=len(x_init),
             m=len(self.model.C),
             problem_obj=problem_obj,
             lb=self.model.lb,
             ub=self.model.ub,
             cl=self.model.c_lb,
             cu=self.model.c_ub,
         )
     elif has_bounds(self.model):
         problem_obj.constraints = None
         problem_obj.jacobian = None
         problem = ipopt.problem(
             n=len(x_init),
             m=0,
             problem_obj=problem_obj,
             lb=self.model.lb,
             ub=self.model.ub,
         )
     elif has_constraints(self.model):
         problem = ipopt.problem(
             n=len(x_init),
             m=len(self.model.C),
             problem_obj=problem_obj,
             cl=self.model.c_lb,
             cu=self.model.c_ub,
         )
     else:
         problem_obj.constraints = None
         problem_obj.jacobian = None
         problem = ipopt.problem(n=len(x_init),
                                 m=0,
                                 problem_obj=problem_obj)
     for name, val in options['solver_options'].items():
         problem.addOption(name, options['solver_options'][val])
     self.x_opt, self.info = problem.solve(x_init)
     self.fun_val_opt = problem_obj.objective(self.x_opt)
Exemple #14
0
def convex_formulation_hard_constraints():
    ''' Definition the bilinear problem '''
    x0 = [2, 4, 40.0, 10.0]
    
    lb = [1.0, 0.0, -1000.0, -1000.0]
    ub = [3.0, 20.0, 1000.0, 1000.0]
    
    cl = [6.0, 0.0, 0.0, 0.0, 0.0]
    cu = [6.0, +2e19, +2e19, +2e19, +2e19]
    
    nlpConvex = ipopt.problem(
                n=len(x0),
                m=len(cl),
                problem_obj=buildConvexFormulationHardConstraint(x0),
                lb=lb,
                ub=ub,
                cl=cl,
                cu=cu
                )

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlpConvex.addOption('mu_strategy', 'adaptive')
    nlpConvex.addOption('jacobian_approximation', 'finite-difference-values')
    nlpConvex.addOption('hessian_approximation', 'limited-memory')    
    nlpConvex.addOption('tol', 1e-6)
    #nlpConvex.addOption('acceptable_tol', 1e-9)
    nlpConvex.addOption('dual_inf_tol', 1e-6)
    nlpConvex.addOption('constr_viol_tol', 1e-6)
    nlpConvex.addOption('compl_inf_tol', 1e-6)
    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlpConvex.setProblemScaling(
        obj_scaling=1,
        x_scaling=[1.0,1.0,1.0,1.0]
        )
    nlpConvex.addOption('nlp_scaling_method', 'user-scaling')
    
    #
    # Solve the problem
    #
    x, info = nlpConvex.solve(x0)
    
    print "Solution of the primal variables: x=%s\n" % repr(x)
    
    print "Solution of the dual variables: lambda=%s\n" % repr(info['mult_g'])
    
    print "Objective=%s\n" % repr(info['obj_val'])
    
    print colored('torque check: ', 'red'), x[0]*x[1]
Exemple #15
0
def main():
    #
    # Define the problem
    #
    x0 = [1.0, 5.0, 5.0, 1.0]

    lb = [1.0, 1.0, 1.0, 1.0]
    ub = [5.0, 5.0, 5.0, 5.0]

    cl = [25.0, 40.0]
    cu = [2.0e19, 40.0]

    nlp = ipopt.problem(
                n=len(x0),
                m=len(cl),
                problem_obj=hs071(),
                lb=lb,
                ub=ub,
                cl=cl,
                cu=cu
                )

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('tol', 1e-7)

    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlp.setProblemScaling(
        obj_scaling=2,
        x_scaling=[1, 1, 1, 1]
        )
    nlp.addOption('nlp_scaling_method', 'user-scaling')

    #
    # Solve the problem
    #
    x, info = nlp.solve(x0)

    print "Solution of the primal variables: x=%s\n" % repr(x)

    print "Solution of the dual variables: lambda=%s\n" % repr(info['mult_g'])

    print "Objective=%s\n" % repr(info['obj_val'])
Exemple #16
0
    def solve(self, x0=None, tee=False):
        xl = self._problem.x_lb()
        xu = self._problem.x_ub()
        gl = self._problem.g_lb()
        gu = self._problem.g_ub()

        if x0 is None:
            x0 = self._problem.x_init()
        xstart = x0

        nx = len(xstart)
        ng = len(gl)

        cyipopt_solver = ipopt.problem(n=nx,
                                       m=ng,
                                       problem_obj=self._problem,
                                       lb=xl,
                                       ub=xu,
                                       cl=gl,
                                       cu=gu)

        # check if we need scaling
        obj_scaling, x_scaling, g_scaling = self._problem.scaling_factors()
        if obj_scaling is not None or x_scaling is not None or g_scaling is not None:
            # need to set scaling factors
            if obj_scaling is None:
                obj_scaling = 1.0
            if x_scaling is None:
                x_scaling = np.ones(nx)
            if g_scaling is None:
                g_scaling = np.ones(ng)

            cyipopt_solver.setProblemScaling(obj_scaling, x_scaling, g_scaling)

        # add options
        for k, v in self._options.items():
            cyipopt_solver.addOption(k, v)

        if tee:
            x, info = cyipopt_solver.solve(xstart)
        else:
            newstdout = redirect_stdout()
            x, info = cyipopt_solver.solve(xstart)
            os.dup2(newstdout, 1)

        return x, info
Exemple #17
0
def main():
    #
    # Define the problem
    #
    x0 = [1.0, 5.0, 5.0, 1.0]

    lb = [1.0, 1.0, 1.0, 1.0]
    ub = [5.0, 5.0, 5.0, 5.0]

    cl = [25.0, 40.0]
    cu = [2.0e19, 40.0]

    nlp = ipopt.problem(n=len(x0),
                        m=len(cl),
                        problem_obj=hs071(),
                        lb=lb,
                        ub=ub,
                        cl=cl,
                        cu=cu)

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('jacobian_approximation', 'finite-difference-values')
    nlp.addOption('hessian_approximation', 'limited-memory')
    nlp.addOption('tol', 1e-7)

    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlp.setProblemScaling(obj_scaling=2, x_scaling=[1, 1, 1, 1])
    nlp.addOption('nlp_scaling_method', 'user-scaling')

    #
    # Solve the problem
    #
    x, info = nlp.solve(x0)

    print "Solution of the primal variables: x=%s\n" % repr(x)

    print "Solution of the dual variables: lambda=%s\n" % repr(info['mult_g'])

    print "Objective=%s\n" % repr(info['obj_val'])
Exemple #18
0
    def initialize(self, beta0=None, print_level=0, max_iter=20):
        if beta0 is None:
            beta0 = np.repeat(0.1, self.k)

        assert beta0.size == self.k

        opt_problem = ipopt.problem(n=self.k,
                                    m=2 * self.N,
                                    problem_obj=self,
                                    cl=self.c[0],
                                    cu=self.c[1])

        opt_problem.addOption('print_level', print_level)
        opt_problem.addOption('max_iter', max_iter)

        beta_soln, info = opt_problem.solve(beta0)
        self.beta_soln = beta_soln
        self.info = info

        return beta_soln
Exemple #19
0
    def optimize(self,
                 x0=None,
                 print_level=0,
                 max_iter=100,
                 tol=1e-8,
                 acceptable_tol=1e-6,
                 nlp_scaling_method=None,
                 nlp_scaling_min_value=None):
        if x0 is None:
            x0 = np.hstack((self.beta, self.gamma, self.delta))
            if self.use_lprior:
                x0 = np.hstack((x0, np.zeros(self.k)))

        assert x0.size == self.k_total

        opt_problem = ipopt.problem(n=int(self.k_total),
                                    m=int(self.num_constraints),
                                    problem_obj=self,
                                    lb=self.uprior[0],
                                    ub=self.uprior[1],
                                    cl=self.cl,
                                    cu=self.cu)

        opt_problem.addOption('print_level', print_level)
        opt_problem.addOption('max_iter', max_iter)
        opt_problem.addOption('tol', tol)
        opt_problem.addOption('acceptable_tol', acceptable_tol)
        if nlp_scaling_method is not None:
            opt_problem.addOption('nlp_scaling_method', nlp_scaling_method)
        if nlp_scaling_min_value is not None:
            opt_problem.addOption('nlp_scaling_min_value',
                                  nlp_scaling_min_value)

        soln, info = opt_problem.solve(x0)

        self.soln = soln
        self.info = info
        self.beta = soln[self.idx_beta]
        self.gamma = soln[self.idx_gamma]
        self.delta = soln[self.idx_delta]
    def solve(self):
        nlp = ipopt.problem(n=self.n_,
                            m=self.m_,
                            problem_obj=self,
                            lb=self.lowerBounds,
                            ub=self.upperBounds,
                            cl=self.constraintsLowerBounds,
                            cu=self.constraintsUpperBounds)

        nlp.addOption(b'print_level', 5)
        nlp.addOption(b'file_print_level', 12)
        nlp.addOption(b'output_file', b'ipopt_logs.txt')
        # nlp.addOption(b'derivative_test_tol',5.0e-5)
        # nlp.addOption(b'derivative_test', b'first-order')
        # nlp.addOption(b'derivative_test_perturbation', 1.0e-6)
        nlp.addOption(b'tol', 5.0e-6)
        nlp.addOption(b'max_iter', 500)
        nlp.addOption(b'hessian_approximation', b'limited-memory')
        nlp.addOption(b'jac_c_constant', b'yes')

        x0 = self.cost_.get_first_guess()
        return nlp.solve(x0)
Exemple #21
0
def subproblem_ALM(self):

    #parameter bounds - bar diameters and all dislocations
    lb = self.bounds()[0]
    ub = self.bounds()[1]

    #constraints
    cl = self.limits()[0]
    cu = self.limits()[1]

    #shortcut ipopt
    self.par_ALM['m'] = len(cl)

    #define problem for ipopt
    problem_ipopt = ipopt.problem(n=self.par_ALM['n'],
                                  m=self.par_ALM['m'],
                                  problem_obj=self,
                                  lb=lb,
                                  ub=ub,
                                  cl=cl,
                                  cu=cu)

    add_option_ipopt(self, problem=problem_ipopt)

    opt, info = problem_ipopt.solve(self.par_ALM['x'])

    self.par_ALM['x'] = opt
    self.par_ALM['mult_sub'] = info['mult_g']
    self.par_ALM['mult_lb'] = info['mult_x_L']
    self.par_ALM['mult_ub'] = info['mult_x_U']

    if self.verbose:
        print(info['status_msg'])
        print('x\t', self.par_ALM['x'])

    return
Exemple #22
0
        return x[0]

    def jacobian(self, x):
        return [1.0, 0.0]


lb = None  # np.ones([N,1])*(-3)
ub = None  # np.ones([N,1])*(3)
cl = [1.5]
cu = None

p = ipopt.problem(
    n=2,
    m=1,
    problem_obj=nlp(),
    lb=lb,
    ub=ub,
    cl=cl,
    cu=cu
)
p.addOption('print_level', 0)
p.addOption('max_iter', 20)

x0 = [0,0]
xstar, info = p.solve(x0)

print(xstar)



cy_theta = np.linspace(0.5 * pi, -1.5 * pi, Nnode)
cy_thetad = np.zeros_like(cy_theta) + (cy_theta[1] - cy_theta[0]) / interval

model = VirtualCyclingSimulator(cy_theta, Nnode, nDoF, interval, lamda,
                                Human_const, Exo_const, Bike_param)

# the joint angles reference from inverse kinematics
joints_ref = generate_joints(model, cy_theta, Bike_param, Exo_const, Nnode,
                             interval)

nlp = ipopt.problem(n=Nnode * nDoF + (Nnode - 1) * int(nDoF / 2 - 1),
                    m=nDoF * (Nnode - 1),
                    problem_obj=VirtualCyclingSimulator(
                        cy_theta, Nnode, nDoF, interval, lamda, Human_const,
                        Exo_const, Bike_param),
                    lb=lb,
                    ub=ub,
                    cl=cl,
                    cu=cu)

nlp.addOption(b'linear_solver', b'MA86')
nlp.addOption(b'max_iter', 10000)
nlp.addOption(b'hessian_approximation', b'limited-memory')
nlp.addOption(b'tol', 1e-4)
nlp.addOption(b'acceptable_tol', 1e-3)
nlp.addOption(b'max_cpu_time', 1e+5)

x_init = np.zeros(Nnode * nDoF + (Nnode - 1) * int(nDoF / 2 - 1))

x_init[lhip] = joints_ref[:, 0] + 0.001 * np.random.random(Nnode)
Exemple #24
0
    def setup(self, u0):

        N = self.N
        T = self.T
        t0 = self.t0
        x0 = self.x0
        nu = self.nu
        lanes = self.lanes
        obstacle = self.obstacle
        posIdx = self.posIdx
        ns_option = self.ns_option

        if ns == 6:

            lb_Vddot = np.ones([N, 1]) * lb_VddotVal
            lb_Chiddot = np.ones([N, 1]) * lb_ChiddotVal

            ub_Vddot = np.ones([N, 1]) * ub_VddotVal
            ub_Chiddot = np.ones([N, 1]) * ub_ChiddotVal

            lb = np.concatenate([lb_Vddot, lb_Chiddot])
            ub = np.concatenate([ub_Vddot, ub_Chiddot])

        elif ns == 4:

            lb_Vdot = np.ones([N, 1]) * lb_VdotVal
            lb_Chidot = np.ones([N, 1]) * lb_ChidotVal

            ub_Vdot = np.ones([N, 1]) * ub_VdotVal
            ub_Chidot = np.ones([N, 1]) * ub_ChidotVal

            lb = np.concatenate([lb_Vdot, lb_Chidot])
            ub = np.concatenate([ub_Vdot, ub_Chidot])

        lataccel_max = lataccel_maxVal

        # Running Constraints
        # u = u0.flatten(1)
        # x = prob.computeOpenloopSolution(u, N, T, t0, x0)

        if obstacle.Present == True:

            lane1Lines = lanes.lane1Lines
            lane2Lines = lanes.lane2Lines
            acrossLines = lanes.acrossLines

            idx_Vehicle, laneNo = lanes.insideRoadSegment(
                x0[0], x0[1], lane1Lines, lane2Lines, acrossLines)

            if (idx_Vehicle < obstacle.idx_StartSafeZone):
                dyRoadL = delta_yRoad
                dyRoadR = delta_yRoad
            elif (idx_Vehicle >= obstacle.idx_StartSafeZone) and (
                    idx_Vehicle < obstacle.idx_EndSafeZone):
                dyRoadL = delta_yRoad
                dyRoadR = delta_yRoadRelaxed
            elif (idx_Vehicle >= obstacle.idx_StartObstacle) and (
                    idx_Vehicle < obstacle.idx_EndObstacle):
                dyRoadL = delta_yRoad
                dyRoadR = delta_yRoad
            else:
                dyRoadL = delta_yRoad
                dyRoadR = delta_yRoad
        else:
            dyRoadL = delta_yRoad
            dyRoadR = delta_yRoad

        # Running Constraint
        #cl_running = np.concatenate([-1*np.ones(N), 0*np.ones(N)])
        #cu_running = np.concatenate([ 0*np.ones(N), 1*np.ones(N)])
        cl_running = np.concatenate([-100 * np.ones(N), 0 * np.ones(N)])
        cu_running = np.concatenate([0 * np.ones(N), 100 * np.ones(N)])
        cl_tmp1 = np.concatenate([cl_running, [-lataccel_max]])
        cu_tmp1 = np.concatenate([cu_running, [+lataccel_max]])

        # if ns == 6:

        if ns_option == 1:

            # Speed Constraint
            cl_tmp2 = np.concatenate([cl_tmp1, [lb_V]])
            cu_tmp2 = np.concatenate([cu_tmp1, [ub_V]])

            # Terminal Constraint
            cl_tmp3 = np.concatenate([cl_tmp2, [-dyRoadL]])
            cu_tmp3 = np.concatenate([cu_tmp2, [dyRoadR]])

            cl = np.concatenate([cl_tmp3, [-delta_V + V_cmd]])
            cu = np.concatenate([cu_tmp3, [delta_V + V_cmd]])

        elif ns_option == 2:

            # Terminal Constraint
            cl_tmp3 = np.concatenate([cl_tmp1, [-dyRoadL]])
            cu_tmp3 = np.concatenate([cu_tmp1, [dyRoadR]])

            cl = np.concatenate([cl_tmp3, [-delta_V + V_cmd]])
            cu = np.concatenate([cu_tmp3, [delta_V + V_cmd]])

        elif ns_option == 3:

            # Terminal Constraint
            cl = np.concatenate([cl_tmp1, [-dyRoadL]])
            cu = np.concatenate([cu_tmp1, [dyRoadR]])

        # elif ns == 4:
        #
        #     # Terminal Constraint
        #     cl = np.concatenate([cl_tmp1,[-dyRoadL]])
        #     cu = np.concatenate([cu_tmp1,[ dyRoadR]])

        if ncons != len(cl) and ncons != len(cu):
            print('Error: resolve number of constraints')

        nlp = ipopt.problem(n=nu * N,
                            m=len(cl),
                            problem_obj=nlpProb(N, T, t0, x0, ncons, nu, lanes,
                                                obstacle, posIdx, ns_option),
                            lb=lb,
                            ub=ub,
                            cl=cl,
                            cu=cu)
        nlp.addOption('print_level', nlpPrintLevel)
        nlp.addOption('max_iter', nlpMaxIter)
        #nlp.addOption('dual_inf_tol',10.0)  # defaut = 1
        nlp.addOption('constr_viol_tol', 0.1)  # default = 1e-4
        nlp.addOption('compl_inf_tol', 0.1)  # default = 1e-4
        nlp.addOption('acceptable_tol', 0.1)  # default = 0.01
        nlp.addOption('acceptable_constr_viol_tol', 0.1)  # default = 0.01

        return nlp
def rw_ipopt(wh, xmat, targets, xlb, xub, crange, max_iter, ccgoal, objgoal,
             quiet):
    r"""
    Build and solve the reweighting NLP.

    Good general settings seem to be:
        get_ccscale - use ccgoal=1, method='mean'
        get_objscale - use xbase=1.2, objgoal=100
        no other options set, besides obvious ones

    Important resources:
        https://pythonhosted.org/ipopt/reference.html#reference
        https://coin-or.github.io/Ipopt/OPTIONS.html
        ..\cyipopt\ipopt\ipopt_wrapper.py to see code from cyipopt author

    Parameters
    ----------
    wh : float
        DESCRIPTION.
    xmat : ndarray
        DESCRIPTION.
    targets : ndarray
        DESCRIPTION.
    xlb : TYPE, optional
        DESCRIPTION. The default is 0.1.
    xub : TYPE, optional
        DESCRIPTION. The default is 100.
    crange : TYPE, optional
        DESCRIPTION. The default is .03.
    max_iter : TYPE, optional
        DESCRIPTION. The default is 100.
    ccgoal : TYPE, optional
        DESCRIPTION. The default is 1.
    objgoal : TYPE, optional
        DESCRIPTION. The default is 100.
    quiet : TYPE, optional
        DESCRIPTION. The default is True.

    Returns
    -------
    x : TYPE
        DESCRIPTION.
    info : TYPE
        DESCRIPTION.

    """
    n = xmat.shape[0]
    m = xmat.shape[1]

    # xlb = 0.1
    # xub = 100
    # crange = .03
    # max_iter = 100
    # ccgoal = 1
    # objgoal = 100
    # quiet = True

    # constraint coefficients (constant)
    cc = (xmat.T * wh).T

    # scale constraint coefficients and targets
    ccscale = get_ccscale(cc, ccgoal=ccgoal, method='mean')
    # ccscale = 1
    cc = cc * ccscale  # mult by scale to have avg derivative meet our goal
    # print(targets)
    # targets_scaled = targets.copy() * ccscale  # djb do I need to copy?
    print(ccscale)
    targets_scaled = targets * ccscale  # djb do I need to copy?
    # print(targets_scaled)

    # IMPORTANT: define callbacks AFTER we have scaled cc and targets
    # because callbacks must be initialized with scaled cc
    callbacks = Reweight_callbacks(cc, quiet)

    # x vector starting values, and lower and upper bounds
    x0 = np.ones(n)
    lb = np.full(n, xlb)
    ub = np.full(n, xub)

    # constraint lower and upper bounds
    cl = targets_scaled - abs(targets_scaled) * crange
    cu = targets_scaled + abs(targets_scaled) * crange

    nlp = ipopt.problem(n=n,
                        m=m,
                        problem_obj=callbacks,
                        lb=lb,
                        ub=ub,
                        cl=cl,
                        cu=cu)

    # objective function scaling
    # djb should I pass n and callbacks???
    objscale = get_objscale(objgoal=objgoal,
                            xbase=1.2,
                            n=n,
                            callbacks=callbacks)
    # print(objscale)
    nlp.addOption('obj_scaling_factor', objscale)  # multiplier

    # define additional options as a dict
    opts = {
        'print_level': 5,
        'file_print_level': 5,
        'jac_d_constant': 'yes',
        'hessian_constant': 'yes',
        'max_iter': max_iter,
        'mumps_mem_percent': 100,  # default 1000
        'linear_solver': 'mumps'
    }

    # TODO: check against already set options, etc. see ipopt_wrapper.py
    for option, value in opts.items():
        nlp.addOption(option, value)

    if (not quiet):
        print(f'\n {"":10} Iter {"":25} obj {"":22} infeas')

    x, info = nlp.solve(x0)
    return x, info
Exemple #26
0
    def __init__(self, rf):
        self.rf = rf

    def objective(self, x):
        return self.rf.j(x)

    def gradient(self, x):
        return self.rf.dj(x, forget=False)

    def constraints(self, x):
        return [sum(x)]

    def jacobian(self, x):
        return [[1] * len(x)]


m0 = rf.initial_control()
nlp = ipopt.problem(n=len(m0),
                    m=0,
                    problem_obj=Problem(rf),
                    lb=[0] * len(m0),
                    ub=[1] * len(m0),
                    cl=[0],
                    cu=[1000])

nlp.addOption("hessian_approximation", "limited-memory")

nlp.solve(m0)

#maximize(rf, method = "SLSQP", options={'maxiter':100}, bounds=[0, 1], constraints={"type": "ineq", "fun": maxint, "jac": maxint_prime})
Exemple #27
0
rf = ReducedFunctional(config, scale=-1e-6)

class Problem(object):
    def __init__(self, rf):
	self.rf = rf

    def objective(self, x):
	return self.rf.j(x)

    def gradient(self, x):
	return self.rf.dj(x, forget=False)

    def constraints(self, x):
	return [sum(x)]

    def jacobian(self, x):
	return [[1]*len(x)]


m0 = rf.initial_control()
nlp = ipopt.problem(n=len(m0), 
	            m=0, 
		    problem_obj=Problem(rf),
		    lb=[0]*len(m0),
		    ub=[1]*len(m0),
		    cl=[0],
		    cu=[1000])

nlp.addOption("hessian_approximation", "limited-memory")
nlp.solve(m0)
Exemple #28
0
    def solve(self, prob):
        """
        Solve a non-linear problem

        Parameters
        ----------
        prob : pycalphad.core.problem.Problem

        Returns
        -------
        SolverResult

        """
        cur_conds = prob.conditions
        comps = prob.pure_elements
        nlp = ipopt.problem(
            n=prob.num_vars,
            m=prob.num_constraints,
            problem_obj=prob,
            lb=prob.xl,
            ub=prob.xu,
            cl=prob.cl,
            cu=prob.cu
        )
        self.apply_options(nlp)
        length_scale = np.min(np.abs(prob.cl))
        length_scale = max(length_scale, 1e-9)
        # Note: Using the ipopt derivative checker can be tricky at the edges of composition space
        # It will not give valid results for the finite difference approximation
        x, info = nlp.solve(prob.x0)
        dual_inf = np.max(np.abs(info['mult_g']*info['g']))
        if dual_inf > self.infeasibility_threshold:
            if self.verbose:
                print('Trying to improve poor solution')
            # Constraints are getting tiny; need to be strict about bounds
            if length_scale < 1e-6:
                nlp.addOption(b'compl_inf_tol', 1e-3 * float(length_scale))
                nlp.addOption(b'bound_relax_factor', MIN_SITE_FRACTION)
                # This option ensures any bounds failures will fail "loudly"
                # Otherwise we are liable to have subtle mass balance errors
                nlp.addOption(b'honor_original_bounds', b'no')
            else:
                nlp.addOption(b'dual_inf_tol', self.infeasibility_threshold)
            accurate_x, accurate_info = nlp.solve(x)
            if accurate_info['status'] >= 0:
                x, info = accurate_x, accurate_info
        chemical_potentials = prob.chemical_potentials(x)
        if info['status'] == -10:
            # Not enough degrees of freedom; nothing to do
            if len(prob.composition_sets) == 1:
                converged = True
                chemical_potentials[:] = prob.composition_sets[0].energy
            else:
                converged = False
        elif info['status'] < 0:
            if self.verbose:
                print('Calculation Failed: ', cur_conds, info['status_msg'])
            converged = False
        else:
            converged = True
        if self.verbose:
            print('Chemical Potentials', chemical_potentials)
            print(info['mult_x_L'])
            print(x)
            print('Status:', info['status'], info['status_msg'])
        return SolverResult(converged=converged, x=x, chemical_potentials=chemical_potentials)
Exemple #29
0
def main():
    global x_goal
    global y_goal
    global w_goal

    w_curr1 = check_omega(w_goal)

    # Define the problem constraints on the optimized variable vector
    x0 = [0, 0, 0, 0, 0, 0]

    lb = [0, -3, -3, -3, -3, -3]
    ub = [8, 3, 3, 3, 3, 3]

    #define the inequality constraints limit
    cl = [-0.418, -0.428, -0.428, -0.428, abs(w_goal - w_curr1), -0.4, 0, 0]
    cu = [0.428, 0.428, 0.428, 0.428, abs(w_goal - w_curr1), +0.4, 100, 100]

    #define the non linear optimization problem

    nlp = ipopt.problem(n=len(x0),
                        m=len(cl),
                        problem_obj=hs071(),
                        lb=lb,
                        ub=ub,
                        cl=cl,
                        cu=cu)

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlp.addOption(b'mu_strategy', b'adaptive')
    nlp.addOption(b'tol', 1e-7)

    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlp.setProblemScaling(obj_scaling=2, x_scaling=[1, 1, 1, 1, 1, 1])
    nlp.addOption(b'nlp_scaling_method', b'user-scaling')

    #
    # Solve the problem
    #
    x, info = nlp.solve(x0)

    # print("Solution of the primal variables: x=%s\n" % repr(x))
    vel = float(x[0])
    omega = float(x[1])
    vel_msg.x = vel
    vel_msg.y = omega
    vel_msg.z = w_curr1

    #publish the message containing the velocity commands.

    pub.publish(vel_msg)

    goal_msg.x = x_goal
    goal_msg.y = y_goal
    goal_msg.z = w_goal

    pub2.publish(goal_msg)

    if vel < 0.5:
        vel = 0.5

    drive_msg.velocity = vel
    drive_msg.angle = omega * 1.5
    pub3.publish(drive_msg)
Exemple #30
0
    def intermediate(self, alg_mod, iter_count, obj_value, inf_pr, inf_du, mu,
                     d_norm, regularization_size, alpha_du, alpha_pr,
                     ls_trials):

        #
        # Example for the use of the intermediate callback.
        #
        print("Objective value at iteration ", iter_count, " is -", obj_value)


x0 = [1.0, 5.0, 5.0, 1.0]

lb = [1.0, 1.0, 1.0, 1.0]
ub = [5.0, 5.0, 5.0, 5.0]

cl = [25.0, 40.0]
cu = [2.0e19, 40.0]

nlp = ipopt.problem(n=len(x0),
                    m=len(cl),
                    problem_obj=hs071(),
                    lb=lb,
                    ub=ub,
                    cl=cl,
                    cu=cu)

nlp.addOption('mu_strategy', 'adaptive')
nlp.addOption('tol', 1e-7)

x, info = nlp.solve(x0)
    #    constants_dict['xd']  = (0.9 + 0.2*rand_list[11])*constants_dict['xd']  # ShankInertia
    #    constants_dict['yd']  = (0.9 + 0.2*rand_list[12])*constants_dict['yd']  # ShankInertia

    nlp = ipopt.problem(n=num_states * num_nodes + num_nodes * num_open +
                        num_normal * num_control + num_par,
                        m=num_cons * (num_nodes - 1),
                        problem_obj=gait2dpi_model(x_c_meas_vec,
                                                   x_normal_ref,
                                                   vs_meas,
                                                   num_nodes,
                                                   num_states,
                                                   index_open,
                                                   index_control,
                                                   num_normal,
                                                   num_par,
                                                   interval,
                                                   weight_mt,
                                                   weight_ms,
                                                   weight_ts,
                                                   weight_tm,
                                                   weight_tp,
                                                   index_normal,
                                                   index2_imped,
                                                   constants_dict,
                                                   scaling=Scaling),
                        lb=lb,
                        ub=ub,
                        cl=cl,
                        cu=cu)

    nlp.addOption(b'linear_solver', b'MA86')
    nlp.addOption(b'hessian_approximation', b'limited-memory')
    def reweight(self,
                 xlb=0.1,
                 xub=100,
                 crange=.03,
                 max_iter=100,
                 ccgoal=1,
                 objgoal=100,
                 quiet=True):
        r"""
        Build and solve the reweighting NLP.

        Good general settings seem to be:
            get_ccscale - use ccgoal=1, method='mean'
            get_objscale - use xbase=1.2, objgoal=100
            no other options set, besides obvious ones

        Important resources:
            https://pythonhosted.org/ipopt/reference.html#reference
            https://coin-or.github.io/Ipopt/OPTIONS.html
            ..\cyipopt\ipopt\ipopt_wrapper.py to see code from cyipopt author

        Returns
        -------
        x : TYPE
            DESCRIPTION.
        info : TYPE
            DESCRIPTION.

        """
        # constraint coefficients (constant)
        # cc = self._xmat * self._wh[:, None]
        # cc = self._xmat * self._wh
        cc = (self._xmat.T * self._wh).T

        # scale constraint coefficients and targets
        ccscale = self.get_ccscale(cc, ccgoal=ccgoal, method='mean')
        # print(ccscale)
        # ccscale = 1
        cc = cc * ccscale  # mult by scale to have avg derivative meet our goal
        targets = self._targets * ccscale

        # IMPORTANT: define callbacks AFTER we have scaled cc and targets
        # because callbacks must be initialized with scaled cc
        callbacks = Reweight_callbacks(cc, quiet)

        # x vector starting values, and lower and upper bounds
        x0 = np.ones(self._n)
        lb = np.full(self._n, xlb)
        ub = np.full(self._n, xub)

        # constraint lower and upper bounds
        cl = targets - abs(targets) * crange
        cu = targets + abs(targets) * crange

        nlp = ipopt.problem(n=self._n,
                            m=self._m,
                            problem_obj=self.callbacks,
                            lb=lb,
                            ub=ub,
                            cl=cl,
                            cu=cu)

        # objective function scaling
        objscale = self.get_objscale(objgoal=objgoal, xbase=1.2)
        # print(objscale)
        nlp.addOption('obj_scaling_factor', objscale)  # multiplier

        # define additional options as a dict
        opts = {
            'print_level': 5,
            'file_print_level': 5,
            'jac_d_constant': 'yes',
            'hessian_constant': 'yes',
            'max_iter': max_iter,
            'mumps_mem_percent': 100,  # default 1000
            'linear_solver': 'mumps',
        }

        # TODO: check against already set options, etc. see ipopt_wrapper.py
        for option, value in opts.items():
            nlp.addOption(option, value)

        # outfile = 'test4.out'
        # if os.path.exists(outfile):
        #     os.remove(outfile)
        # nlp.addOption('output_file', outfile)
        # nlp.addOption('derivative_test', 'first-order')  # second-order

        # nlp_scaling_method: default gradient-based
        # equilibration-based needs MC19
        # nlp.addOption('nlp_scaling_method', 'equilibration-based')
        # nlp.addOption('nlp_scaling_max_gradient', 1e-4)  # 100 default
        # nlp.addOption('mu_strategy', 'adaptive')  # not good
        # nlp.addOption('mehrotra_algorithm', 'yes')  # not good
        # nlp.addOption('mumps_mem_percent', 100)  # default 1000
        # nlp.addOption('mumps_pivtol', 1e-4)  # default 1e-6; 1e-2 is SLOW
        # nlp.addOption('mumps_scaling', 8)  # 77 default

        x, info = nlp.solve(x0)
        return x, info
Exemple #33
0
def bilinear_formulation():
    ''' Definition the bilinear problem '''

    math = Math()

    pos0 = np.array([2.5, 2.5, 2.5])
    force0 = np.array([-10.0, 10.0, 10.0])

    posDesired = np.array([2.0, 2.0, 2.0])
    forceDesired = np.array([0.0, 0.0, 10.0])
    tauDesired = np.dot(math.skew(posDesired), forceDesired)
    print "initial contact position: ", pos0
    print "initial force: ", force0
    print "Desired torque: ", tauDesired

    x0 = np.hstack([pos0, force0])

    lb = [1.0, 1.0, 1.0, -20.0, -20.0, 0.0]
    ub = [3.0, 3.0, 3.0, 20.0, 20.0, 20.0]

    tau_x_constraints_lb = np.array([20.0])
    tau_x_constraints_ub = np.array([20.0])

    tau_y_constraints_lb = np.array([-20.0])
    tau_y_constraints_ub = np.array([-20.0])

    tau_z_constraints_lb = np.array([0.0])
    tau_z_constraints_ub = np.array([0.0])

    cl = np.hstack(
        [tau_x_constraints_lb, tau_y_constraints_lb, tau_z_constraints_lb])
    cu = np.hstack(
        [tau_x_constraints_ub, tau_y_constraints_ub, tau_z_constraints_ub])

    nlpConvex = ipopt.problem(n=len(x0),
                              m=len(cl),
                              problem_obj=buildBilinearFormulation(x0),
                              lb=lb,
                              ub=ub,
                              cl=cl,
                              cu=cu)

    #
    # Set solver options
    #
    #nlp.addOption('derivative_test', 'second-order')
    nlpConvex.addOption('mu_strategy', 'adaptive')
    nlpConvex.addOption('jacobian_approximation', 'finite-difference-values')
    nlpConvex.addOption('hessian_approximation', 'limited-memory')
    nlpConvex.addOption('tol', 1e-2)

    #
    # Scale the problem (Just for demonstration purposes)
    #
    nlpConvex.setProblemScaling(obj_scaling=1,
                                x_scaling=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
    nlpConvex.addOption('nlp_scaling_method', 'user-scaling')

    #
    # Solve the problem
    #
    sol, info = nlpConvex.solve(x0)

    print "Solution of the primal variables: x=%s\n" % repr(sol)

    print "Solution of the dual variables: lambda=%s\n" % repr(info['mult_g'])

    print "Objective=%s\n" % repr(info['obj_val'])
    l = sol[0:3]
    f = sol[3:6]
    tau = np.dot(math.skew(l), f)
    print colored('-----------> Results check: ', 'red')
    print colored('desired torque: ',
                  'blue'), tauDesired[0], tauDesired[1], tauDesired[2]
    print colored('actual torque: ', 'green'), tau
    print colored('torque error: ', 'red'), np.subtract(tau, tauDesired)
    print colored('foot pos: ', 'green'), l
    print colored('force: ', 'green'), f
Exemple #34
0
    def jacobian(self, xi):
        r, c, d = c_x(xi)
        return coo_matrix((d, (r, c))).toarray()


x0 = nlp_x
lb = [-1e19] * len(x0)
ub = [ 1e19] * len(x0)

cl = [0, 0, 0] + [0, 0, 0, 0] * 10 + [0] + [0, 0]
cu = [0, 0, 0] + [1e19, 0, 0, 0] * 10 + [1e19] + [0, 0]

nlp = ipopt.problem(
            n=len(x0),
            m=len(cl),
            problem_obj=stryk(),
            lb=lb,
            ub=ub,
            cl=cl,
            cu=cu)

#
# Set solver options
#
#nlp.addOption('derivative_test', 'second-order')
nlp.addOption('mu_strategy', 'adaptive')
nlp.addOption('tol', 1e-7)

#
# Scale the problem (Just for demonstration purposes)
#
Exemple #35
0
def linear_mpc_control4(xref, xbar, z0, dref):
    z0 = z0[:NX]
    # [:-4] 是去除最后一个点,保留T个点
    zref = np.concatenate((z0, xref.reshape(-1, 1, order='F')[:, 0][:-8]))

    x0 = zref.tolist()

    # lb = [1.0, 1.0, 1.0, 1.0]
    # ub = [5.0, 5.0, 5.0, 5.0]

    # cl = [25.0, 40.0]
    # cu = [2.0e19, 40.0]

    ub = z0.copy()
    lb = z0.copy()

    cl = [0 for i in range((T - 1) * NX)]
    cu = [0 for i in range((T - 1) * NX)]
    '''
    x0 is the initial value of the variable to be solved
    '''
    # v上限下限
    for i in range(T - 1):
        ub.extend([10e9, 10e9, MAX_SPEED, 10e9])
        lb.extend([-10e9, -10e9, MIN_SPEED, -10e9])
        # x0 += [0,0,0,0]

    # a和steer上限下限
    for i in range(T - 1):
        ub.extend([MAX_ACCEL, MAX_STEER])
        lb.extend([-MAX_ACCEL, -MAX_STEER])
        x0.extend([0, 0])

    # dsteer上下限
    for i in range(T - 2):
        cl.append(-DT * MAX_DSTEER)
        cu.append(DT * MAX_DSTEER)

    # print("x is",len(x0))
    nlp = ipopt.problem(n=len(x0),
                        m=len(cl),
                        problem_obj=nlmpc_problem.nlmpc(zref, T),
                        lb=lb,
                        ub=ub,
                        cl=cl,
                        cu=cu)
    nlp.addOption('mu_strategy', 'adaptive')
    nlp.addOption('tol', 1e-3)
    nlp.addOption('print_level', 0)
    nlp.addOption("max_iter", 100)
    nlp.addOption("warm_start_init_point", "yes")
    # nlp.addOption("mehrotra_algorithm", "yes")

    x, info = nlp.solve(x0)

    z = x[:T * NX]
    u = x[T * NX:]

    ox = z[0::NX]
    oy = z[1::NX]
    ov = z[2::NX]
    oyaw = z[3::NX]

    oa = u[0::2]
    od = u[1::2]
    # print("z0 is\n", z0)
    # print("lb is\n", lb)
    # print("ub is\n", ub )
    # print("zref is\n", zref)
    # print(z)
    # print(u)
    return oa, od, ox, oy, oyaw, ov
def runstub(AGI_STUB, tolerances, nzcc, pufbase_state, log_dir, interim_results_dir):

    stub = AGI_STUB

    constraint_value = 1000  # each constraint will be this number when scaled
    constraints_unscaled = tolerances[tolerances.AGI_STUB == stub].target
    constraint_scales = np.where(constraints_unscaled == 0, 1, abs(constraints_unscaled) / constraint_value)
    constraints = constraints_unscaled / constraint_scales
    # constraints

    # create nzcc for the stub, and on each record create i to index constraints and j to index variables (the x elements)

    nzcc_stub = nzcc[nzcc.AGI_STUB == stub].sort_values(by = ['constraint_name', 'RECID'])

    # NOTE!!: create i and j, each of which will be consecutive integersm where
        #   i gives the index for constraints
        #   j gives the index for the RECID (for the variables)
        #   TODO: There should be better ways to do this in python (like grouop_indeces in R)
        #   Note that the indeces start from 0 here in python. (start from 1 in R)
    #nzcc_stub
    nzcc_stub['i'] = nzcc_stub.constraint_name
    nzcc_stub['j'] = nzcc_stub.RECID
    nzcc_stub.set_index(['i', 'j'], inplace = True)

    rename_dic1 = dict(zip(nzcc_stub.constraint_name.unique(), list(range(len(nzcc_stub.constraint_name.unique())))))
    rename_dic2 = dict(zip(np.sort(nzcc_stub.RECID.unique()), list(range(len(nzcc_stub.RECID.unique())))))

    nzcc_stub.rename(index = rename_dic1, level = 'i', inplace = True)
    nzcc_stub.rename(index = rename_dic2, level = 'j', inplace = True)
    nzcc_stub.reset_index(inplace = True)
    # nzcc_stub[nzcc_stub.i == 1].loc[:, ['i', 'j', 'constraint_name', 'RECID']]

    nzcc_stub['nzcc_unscaled'] = nzcc_stub.nzcc
    nzcc_stub['nzcc'] = nzcc_stub.nzcc_unscaled / np.take(constraint_scales, nzcc_stub.i)


    # Inputs

    inputs = {}
    inputs['p'] = 2
    inputs['wt'] = pufbase_state[pufbase_state.AGI_STUB == stub].sort_values(by = 'RECID').weight_initial # note this is a pd.Series
    inputs['RECID'] = pufbase_state[pufbase_state.AGI_STUB == stub].sort_values(by = 'RECID').RECID
    inputs['constraint_coefficients_sparse'] = nzcc_stub
    inputs['n_variables'] = len(inputs['wt'])
    inputs['n_constraints'] = len(constraints)
    inputs['objscale'] = 1e6 # scaling constant used in the various objective function functions
    inputs['constraint_scales'] = constraint_scales

    xlb = np.repeat(0,   inputs['n_variables']) # arbitrary
    xub = np.repeat(100, inputs['n_variables']) # arbitrary
    x0  = np.repeat(1,   inputs['n_variables'])

    tol = tolerances[tolerances.AGI_STUB == stub].tol_default

    clb = constraints - abs(constraints) * tol
    cub = constraints + abs(constraints) * tol

    clb.fillna(0, inplace = True)
    cub.fillna(0, inplace = True)


    nlp_puf = ipopt.problem(
            n=len(x0),
            m=len(clb),
            problem_obj=puf_obj(inputs),
            lb=xlb,
            ub=xub,
            cl=clb,
            cu=cub
            )


    logfile_name = log_dir + "stub_" + str(stub) + ".out"

    nlp_puf.addOption('print_level', 0)
    nlp_puf.addOption('file_print_level', 5)
    # nlp_puf.addOption('linear_solver', 'ma27') # cyipopt uses MUMPS solver as its default solver 
    # TODO: Try other solvers
    nlp_puf.addOption('max_iter', 100)
    nlp_puf.addOption('mu_strategy', 'adaptive')
    nlp_puf.addOption('output_file', logfile_name)


    x, info = nlp_puf.solve(x0)
    print(info['status_msg'])

    return pd.DataFrame({'AGI_STUB' : stub, 'RECID': inputs['RECID'], 'wt_int' : inputs['wt'], 'x' : x})