Пример #1
0
###############################################################

lines, results = [], {}
for j, solver in enumerate(solvers):
    p = NLP(ff, startPoint, xlabel = Xlabel, gtol=gtol, diffInt = diffInt, ftol = ftol, maxIter = 1390, plot = PLOT, color = colors[j], iprint = 10, df_iter = 4, legend = solver, show=False,  contol = contol,  maxTime = maxTime,  maxFunEvals = maxFunEvals, name='NLP_bench_1')
    p.constraints = [c1<0,  c2<0,  h1.eq(0),  h2.eq(0), x > lb, x< ub]
    #p.constraints = h1.eq(0)
    
    #p._Prepare()
    #print p.dc(p.x0)
    #print h1.D(startPoint)
    #print h2.D(startPoint)
    #continue
    
    if solver =='algencan':
        p.gtol = 1e-2
    elif solver == 'ralg':
        pass
        #p.debug = 1
    
    #p.debug = 1
    
    r = p.solve(solver)
    for fn in ('h','c'):
        if not r.evals.has_key(fn): r.evals[fn]=0 # if no c or h are used in problem
    results[solver] = (r.ff, p.getMaxResidual(p.xk), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
    if PLOT:
        subplot(2,1,1)
        F0 = ff(startPoint)
        lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))
Пример #2
0
        contol=contol,
        maxTime=maxTime,
        maxFunEvals=maxFunEvals,
        name="NLP_bench_1",
    )
    p.constraints = [c1 < 0, c2 < 0, h1.eq(0), h2.eq(0), x > lb, x < ub]
    # p.constraints = h1.eq(0)

    # p._Prepare()
    # print p.dc(p.x0)
    # print h1.D(startPoint)
    # print h2.D(startPoint)
    # continue

    if solver == "algencan":
        p.gtol = 1e-2
    elif solver == "ralg":
        pass
        # p.debug = 1

    # p.debug = 1

    r = p.solve(solver)
    for fn in ("h", "c"):
        if not r.evals.has_key(fn):
            r.evals[fn] = 0  # if no c or h are used in problem
    results[solver] = (
        r.ff,
        p.getMaxResidual(p.xk),
        r.elapsed["solver_time"],
        r.elapsed["solver_cputime"],
Пример #3
0
p.h = lambda x: (1e4*(x[-1]-1)**4, (x[-2]-1.5)**4)
# dh(x)/dx: non-lin eq constraints gradients (optional):
def DH(x):
    r = zeros((2, p.n))
    r[0, -1] = 1e4*4 * (x[-1]-1)**3
    r[1, -2] = 4 * (x[-2]-1.5)**3
    return r
p.dh = DH

p.contol = 1e-3 # required constraints tolerance, default for NLP is 1e-6

# for ALGENCAN solver gtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gtol means norm of projected gradient of  the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gtol = 1e-5 # gradient stop criterium (default for NLP is 1e-6)


# see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
# that are connected to / used in lincher and some other solvers

# optional: check of user-supplied derivatives
p.checkdf()
p.checkdc()
p.checkdh()

# last but not least:
# please don't forget,
# Python indexing starts from ZERO!!

p.plot = 0