Пример #1
0
        df_iter=4,
        legend=solver,
        show=False,
        contol=contol,
        maxTime=maxTime,
        maxFunEvals=maxFunEvals,
        name="NLP_5",
    )

    if solver == "algencan":
        p.gtol = 1e-2
    elif solver == "ralg":
        pass
        # p.debug = 1

    p.debug = 1

    r = p.solve(solver)
    for fn in ("h", "c"):
        if not r.evals.has_key(fn):
            r.evals[fn] = 0  # if no c or h are used in problem
    results[solver] = (
        r.ff,
        p.getMaxResidual(r.xf),
        r.elapsed["solver_time"],
        r.elapsed["solver_cputime"],
        r.evals["f"],
        r.evals["c"],
        r.evals["h"],
    )
    if PLOT:
Пример #2
0
    def minimize(self, **kwargs):
        """ solve the nonlinear problem using OpenOpt

        Returns:
        obj_value, solution

        obj_value -- value of the objective function at the discovered solution
        solution  -- the solution flux vector (indexed like matrix columns)
        """
        if self.iterator is None:
            nlp = NLP(self.obj,
                      self.x0,
                      df=self.d_obj,
                      c=self.nlc,
                      dc=self.d_nlc,
                      A=self.Aineq,
                      Aeq=self.Aeq,
                      b=self.bineq,
                      beq=self.beq,
                      lb=self.lb,
                      ub=self.ub,
                      **kwargs)
            nlp.debug = 1
            nlp.plot = False
            nlp.checkdf()
            if self.nlc is not None:
                nlp.checkdc()

            r = nlp.solve(self.solver)

        else:
            self.rlist = []
            for x0 in self.iterator:
                nlp = NLP(self.obj,
                          x0,
                          df=self.d_obj,
                          c=self.nlc,
                          dc=self.d_nlc,
                          A=self.Aineq,
                          Aeq=self.Aeq,
                          b=self.bineq,
                          beq=self.beq,
                          lb=self.lb,
                          ub=self.ub,
                          **kwargs)
                r = nlp.solve(self.solver)
                if r.istop > 0 and r.ff == r.ff:
                    self.rlist.append(r)

            if self.rlist != []:
                r = min(self.rlist, key=lambda x: x.ff)

        if r.istop <= 0 or r.ff != r.ff:  # check halting condition
            self.obj_value = 0.
            self.solution = []
            self.istop = r.istop
        else:
            self.obj_value = r.ff
            self.solution = r.xf
            self.istop = r.istop

        return self.obj_value, self.solution
Пример #3
0
# that are connected to / used in lincher and some other solvers

# optional: check of user-supplied derivatives
p.checkdf()
p.checkdc()
p.checkdh()

# last but not least:
# please don't forget,
# Python indexing starts from ZERO!!

p.plot = 0
p.iprint = 0
p.df_iter = 4
p.maxTime = 4000
p.debug=1
#r = p.solve('algencan')

r = p.solve('ralg')
#r = p.solve('lincher')

"""
typical output:
OpenOpt checks user-supplied gradient df (size: (50,))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
max(abs(df_user - df_numerical)) = 2.50111104094e-06
(is registered in df number 41)
sum(abs(df_user - df_numerical)) = 4.45203815948e-05
========================