Пример #1
0
 def optimize_openopt(self, solver='interalg'):
     if not have_openopt:
         raise RuntimeError('OpenOpt not available.')
     p = GLP(self.get_cost_function,
             self.par.get_array(),
             lb=self.par.get_lower_bounds(),
             ub=self.par.get_upper_bounds())
     r = p.solve(solver)
     print(r, file=log)
     self.set_parameters_from_array(r.xf)
     print('=== OPTIMIZED PARAMETER SET ===', file=log)
     self.get_cost_function(r.xf)
     return self.par
Пример #2
0
# define objective
f = (x-1.5)**2 + sin(0.8 * y ** 2 + 15)**4 + cos(0.8 * z ** 2 + 15)**4 + (t-7.5)**4

# define some constraints
constraints = [x<1, x>-1, y<1, y>-1, z<1, z>-1, t<1, t>-1, x+2*y>-1.5,  sinh(x)+cosh(z)+sinh(t) <2.0]

# add some more constraints via Python "for" cycle
M = 10
for i in range(M):
    func = i*x+(M-i)*y+sinh(z)+cosh(t)
    constraints.append(func < i+1)

# define start point. You can use variables with length > 1 as well
startPoint = {x:0, y:0, z:0, t:0}

# assign prob
p = GLP(f, startPoint, constraints=constraints,  maxIter = 1e3,  maxFunEvals = 1e5,  maxTime = 5,  maxCPUTime = 5)

#optional: graphic output
#p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1)

# solve
r = p.solve('de', plot=1) # try other solvers: galileo, pswarm

optPoint, optVal = r.xf, r.ff
x, y, z, t = optPoint[x], optPoint[y], optPoint[z], optPoint[t]
# or 
# x, y, z, t = x(optPoint), y(optPoint), z(optPoint), t(optPoint)


Пример #3
0
theta = [0.2, 0.2]

# y = copy.copy(solution[:,1:2])
# y[1::] = y[1::] + numpy.random.normal(loc=0.0,scale=0.1,size=numStep-1)

# odeSIR = odeLossFunc.squareLoss([0.5,1.0/3.0] ,ode,x0,t0,t[1:len(t)],y[1:len(t)],'R')

objSIR = odeLossFunc.squareLoss(theta, ode, x0, t0, t[1::], solution[1::, 1:3],
                                ['I', 'R'])

box = [(0., 2.), (0., 2.)]
npBox = numpy.array(box)
lb = npBox[:, 0]
ub = npBox[:, 1]

pro = GLP(f=objSIR.cost, x0=theta, lb=lb, ub=ub)
pro.plot = True
rGalileo = pro.solve('galileo')

pro = GLP(f=objSIR.cost, x0=theta, lb=lb, ub=ub)
pro.plot = True
rDe = pro.solve('de')

pro = GLP(f=objSIR.cost, x0=theta, lb=lb, ub=ub)
pro.plot = True
rPSwarm = pro.solve('pswarm')

pro = NLP(f=objSIR.cost, df=objSIR.sensitivity, x0=theta, lb=lb, ub=ub)
pro.plot = True
rLincher = pro.solve('lincher')
Пример #4
0
        json.dump(best, fresults)
        fresults.close()

        if DB:
            try:
                DB.results.insert(
                    {
                        "score": best[0],
                        "date": datetime.datetime.now(),
                        "params": vars[beta],
                        "range": rng,
                        "beta": beta,
                        "tested_vars": var_names,
                    }
                )
            except Exception, e:
                print "MONGO INSERT ERROR:%s" % e

    return ret


p = GLP(getscore, x0=startPoint, lb=lbs, ub=ubs, maxIter=100, maxFunEvals=10000)
p.fOpt = 170000  # Optimal value we could have

r = p.maximize("de", iprint=1, plot=0)  # , searchDirectionStrategy="best")
# r = p.maximize('galileo', iprint=1, plot=0, population=5)
# r = p.maximize('gsubg', iprint=1, plot=0)

print "Solution vector: %s" % p.xf
print "Max value: %s" % p.ff
Пример #5
0
from openopt import GLP
from numpy import *

f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
p = GLP(f, lb = -ones(4),  ub = ones(4),  maxIter = 1e3,  maxFunEvals = 1e5,  maxTime = 3,  maxCPUTime = 3)

#optional: graphic output
#p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1)

r = p.solve('de', plot=1)
x_opt,  f_opt = r.xf,  r.ff
Пример #6
0
from openopt import GLP
from numpy import *

f = lambda x: (x[0] - 1.5)**2 + sin(0.8 * x[1]**2 + 15)**4 + cos(0.8 * x[
    2]**2 + 15)**4 + (x[3] - 7.5)**4
p = GLP(f,
        lb=-ones(4),
        ub=ones(4),
        maxIter=1e3,
        maxFunEvals=1e5,
        maxTime=3,
        maxCPUTime=3)

#optional: graphic output
#p.plot = 1

r = p.solve('pswarm', x0=[0, 0, 0, 0], plot=0, debug=1, maxIter=200)
x_opt, f_opt = r.xf, r.ff
Пример #7
0
from openopt import GLP
from numpy import *

f = lambda x: (x * x - 10 * cos(2 * pi * x) + 10).sum()  #Rastrigin function
p = GLP(f,
        lb=-ones(10) * 5.12,
        ub=ones(10) * 5.12,
        maxIter=1e3,
        maxFunEvals=1e5,
        maxTime=10,
        maxCPUTime=10)

r = p.solve('de', plot=0)
x_opt, f_opt = r.xf, r.ff
print x_opt
Пример #8
0
constraints = [
    P(a**2 - z + b * c < 4.7) <
    0.03,  # by default constraint tolerance is 10^-6
    (P(c / b + z > sin(x)) > 0.02)(
        tol=1e-10
    ),  # use tol 10^-10 instead; especially useful for equality constraints  
    mean(b + y) <= 3.5
]

startPoint = {x: 0, y: 0, z: 0, a: A, b: B, c: C}
''' This is multiextremum problem (due to sin, cos etc),
thus we have to use global nonlinear solver capable of handling nonlinear constraints
(BTW having probability functions P() make it even discontinuous for discrete distribution(s) involved)
'''

p = GLP(objective, startPoint, constraints=constraints)
solver = 'de'  # named after "differential evolution", check http://openopt.org/GLP for other available global solvers
r = p.maximize(solver, maxTime=150, maxDistributionSize=100, iprint=50)
'''
------------------------- OpenOpt 0.45 -------------------------
solver: de   problem: unnamed    type: GLP
 iter   objFunVal   log10(MaxResidual/ConTol)   
    0  6.008e+00                      8.40 
   50  7.436e+00                   -100.00 
   93  7.517e+00                   -100.00 
istop: 11 (Non-Success Number > maxNonSuccess = 15)
Solver:   Time Elapsed = 31.58 	CPU Time Elapsed = 30.07
objFunValue: 7.516546 (feasible, max(residuals/requiredTolerances) = 0)
'''
print(r(x, y,
        z))  # [0.99771171590186, -0.15952854483416395, 0.8584877921129496]
Пример #9
0
from openopt import GLP
from numpy import *

f = lambda x: (x[0] - 1.5) ** 2 + sin(0.8 * x[1] ** 2 + 15) ** 4 + cos(0.8 * x[2] ** 2 + 15) ** 4 + (x[3] - 7.5) ** 4
p = GLP(f, lb=-ones(4), ub=ones(4), maxIter=1e3, maxFunEvals=1e5, maxTime=3, maxCPUTime=3)

# optional: graphic output
# p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1)

r = p.solve("de", plot=1)
x_opt, f_opt = r.xf, r.ff
Пример #10
0
from openopt import GLP
from numpy import *
N = 100
aN = arange(N)
f = lambda x: ((x - aN)**2).sum()
p = GLP(f,
        lb=-ones(N),
        ub=N * ones(N),
        maxIter=1e3,
        maxFunEvals=1e5,
        maxTime=10,
        maxCPUTime=300)

#optional: graphic output
#p.plot = 1

r = p.solve('de', plot=1, debug=1, iprint=0)
x_opt, f_opt = r.xf, r.ff
Пример #11
0
from openopt import GLP
from numpy import *

# objective function
# (x0 - 1.5)^2 + sin(0.8 * x1^2 + 15)^4 + cos(0.8 * x2^2 + 15)^4 + (x3 - 7.5)^4 -> min
f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4

# box-bound constraints lb <= x <= ub
lb, ub = -ones(4),  ones(4)

# linear inequality constraints
# x0 + x3 <= 0.15
# x1 + x3 <= 1.5
# as Ax <= b

A = mat('1 0 0 1; 0 1 0 1') # tuple, list, numpy array etc are OK as well
b = [0.15, 1.5] # tuple, list, numpy array etc are OK as well


# non-linear constraints 
# x0^2 + x2^2 <= 0.15
# 1.5 * x0^2 + x1^2 <= 1.5

c = lambda x: (x[0] ** 2 + x[2] ** 2 - 0.15,  1.5 * x[0] ** 2 + x[1] ** 2 - 1.5)


p = GLP(f, lb=lb, ub=ub, A=A, b=b, c=c, maxIter = 250,  maxFunEvals = 1e5,  maxTime = 30,  maxCPUTime = 30)

r = p.solve('de', mutationRate = 0.15, plot=1)
x_opt,  f_opt = r.xf,  r.ff
Пример #12
0
	# print(L_T_tilde(theta_tilde))
	# res = minimize(L_T_tilde, theta_tilde, method = 'COBYLA', constraints = constraints)
	# print(res)
	# assert isfinite(res.fun)
	# theta_tilde = res.x
	# print('p', [p(theta_tilde, i) for i in k_underline])
	# print('alpha', [alpha(theta_tilde, i) for i in k_underline])
	# print('beta', [beta(theta_tilde, i) for i in k_underline])
	
	# # NLopt
	# theta_tilde = opt.optimize(theta_tilde)
	# L_T_tilde_ast = opt.last_optimum_value()
	# print(L_T_tilde_ast)
	# result = opt.last_optimize_result()
	# print(result)
	
	# OpenOpt
	opt_p = GLP(L_T_tilde, theta_tilde, df = grad_L_T_tilde, A = opt_A, b = opt_b, lb = opt_lb, ub = opt_ub)
	res = opt_p.solve('de', maxNonSuccess = 32) # maxNonSuccess = round(exp(len(theta_tilde)))
	print(res.xf, res.ff)
	
	# print('Basin-Hopping')
	# print(L_T_tildeconstrained(theta_tilde))
	# res = basinhopping(L_T_tildeconstrained, theta_tilde)
	# print(res)
	# assert isfinite(res.fun)
	# theta_tilde = res.x
	# print('p', [p(theta_tilde, i) for i in k_underline])
	# print('alpha', [alpha(theta_tilde, i) for i in k_underline])
	# print('beta', [beta(theta_tilde, i) for i in k_underline])
Пример #13
0
def fittingOpenopt(pearr, tmatrix, minR, maxR, lbounds, ubounds, gmaxtime,
                   pfact, res):
    nrgauss = int((len(lbounds) + 1) / 3)
    rvecbins = tmatrix.getMatrix().shape[0]
    myrange = maxR - minR
    xarr = numpy.linspace(minR + myrange / rvecbins / 2,
                          maxR - myrange / rvecbins / 2, rvecbins)
    xxarr = numpy.array([xarr] * nrgauss)

    if pfact == 0:
        print "Will not apply a penalty for gaussian proximity."
        minfuncwrap = lambda x: gaussSQDiff(x, tmatrix.getMatrix(), pearr,
                                            xxarr)
    else:
        print "Will penalize gaussians which are closer than %d times the sum of both sigmas."
        minfuncwrap = lambda x: penalizeCloseGauss(x, tmatrix.getMatrix(),
                                                   pearr, xxarr, pfact)

    lines_distance, lines_efficiency, g_lines, chsql, chisqs, chisqax = createLivePlot(
        nrgauss, pearr, tmatrix, xarr, lbounds, ubounds)

    mycallback = lambda p: plotCallback(p, lines_distance, lines_efficiency,
                                        g_lines, xxarr, tmatrix, chsql, chisqs,
                                        chisqax)

    print "Starting openopt ##########################"
    prob = GLP(minfuncwrap,
               lb=lbounds,
               ub=ubounds,
               callback=mycallback,
               maxFunEvals=1e15,
               maxNonSuccess=200,
               maxIter=1e5,
               maxTime=gmaxtime,
               fEnough=res)
    result = prob.solve('de', population=1000 * len(lbounds))

    # result=prob.solve('asa')
    # result=prob.solve('galileo') # not good
    # result=prob.solve('pswarm')
    # prob = GLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
    # result=prob.solve('isres',population=100*len(lbounds))
    # prob = NLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
    # result=prob.solve('scipy_lbfgsb')
    # result=prob.solve('scipy_tnc')
    # result=prob.solve('bobyqa')
    # result=prob.solve('ptn')
    # result=prob.solve('slmvm1')
    # result=prob.solve('slmvm2')
    # result=prob.solve('ralg')
    # result=prob.solve('scipy_cobyla') #good!!
    # result=prob.solve('mma')
    # result=prob.solve('auglag')
    # result=prob.solve('gsubg')

    xopt = result.xf

    #    prob2 = prob = NLP(minfuncwrap, xopt , lb = lbounds, ub = ubounds, callback = mycallback, maxNonSuccess = 20, maxIter = 1e5, maxTime = gmaxtime)
    #    result = prob2.solve('scipy_cobyla')
    #    xopt = result.xf

    print "Minimum function chisq", result.ff

    nrgauss, a_final, r_final, sig_final = x2parms(xopt)

    print "G\tA\t\tx0\t\tsigma"
    nr = 1
    for a, r, sig in zip(a_final, r_final, sig_final):
        print "%d\t%f\t%f\t%f\t" % (nr, a, r, sig)
        nr += 1

    gaussians = (a_final * numpy.exp(-(xxarr.T - r_final)**2 /
                                     (2.0 * sig_final**2)))
    r_prdist = gaussians.sum(axis=1)
    e_fitprdist = numpy.dot(r_prdist, tmatrix.getMatrix())

    r_prdist /= r_prdist.sum()
    e_fitprdist /= e_fitprdist.sum()

    return r_prdist, xarr, e_fitprdist, (a_final, r_final, sig_final)
# latter (using p.scale) is more recommended
# because it affects xtol for those solvers
# who use OO stop criteria
# (ralg, lincher, nsmm, nssolve and mb some others)
#  xtol will be compared to scaled x shift:
# is || (x[k] - x[k-1]) * scale || < xtol

# You can define scale and diffInt as
# numpy arrays, matrices, Python lists, tuples

p = NLP(f, x0, c=c, scale=[1, coeff], **someModifiedStopCriteria)
r = p.solve('ipopt')
print(
    r.ff, r.xf
)  # "24.999996490694787 [  1.50000004e+01   8.00004473e+09]" - much better
"""
GLP (GLobal Problem from OpenOpt set) example for FuncDesigner model:
searching for global minimum of the func 
(x-1.5)**2 + sin(0.8 * y ** 2 + 15)**4 + cos(0.8 * z ** 2 + 15)**4 + (t-7.5)**4
subjected to some constraints
See http://openopt.org/GLP for more info and examples.
"""
from openopt import GLP
from FuncDesigner import *

x, y, z, t = oovars(4)

# define objective
f = (x - 1.5)**2 + sin(0.8 * y**2 + 15)**4 + cos(0.8 * z**2 + 15)**4 + (t -
                                                                        7.5)**4
Пример #15
0
from openopt import GLP
from numpy import *

f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
p = GLP(f, lb = -ones(4),  ub = ones(4),  maxIter = 1e3,  maxFunEvals = 1e5,  maxTime = 3,  maxCPUTime = 3)

#optional: graphic output
#p.plot = 1

r = p.solve('pswarm', x0=[0, 0, 0, 0], plot=0, debug=1, maxIter=200)
x_opt,  f_opt = r.xf,  r.ff
Пример #16
0
          b=b)
pro.plot = True
rSLSQP = pro.solve('ralg')

pro = NLP(f=objLegrand.cost,
          x0=rPswarm.xf,
          df=objLegrand.sensitivity,
          lb=lb,
          ub=ub,
          A=A,
          b=b)
pro.plot = True
rSLSQP = pro.solve('scipy_slsqp')

# GLP
pro = GLP(f=objLegrand.cost, x0=theta, df=objLegrand.sensitivity, lb=lb, ub=ub)
pro.plot = True
rPswarm = pro.solve('pswarm')

pro2 = GLP(f=objLegrand.cost,
           x0=rPswarm.xf,
           df=objLegrand.sensitivity,
           lb=lb,
           ub=ub)
pro2.plot = True
rPswarm2 = pro2.solve('pswarm')

pro3 = NLP(f=objLegrand.cost,
           x0=rPswarm2.xf,
           df=objLegrand.sensitivity,
           lb=lb,
Пример #17
0
pro.plot = True
rLincher = pro.solve('lincher')

pro = NLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro = NLP(f=objLegrand.cost,x0=resDE['x'],df=objLegrand.sensitivity,lb=lb,ub=ub)
pro.plot = True
rSLSQP = pro.solve('scipy_slsqp')

# GLP
pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro = GLP(f=objLegrand.cost,x0=resDE['x'],df=objLegrand.sensitivity,lb=lb,ub=ub)
pro.plot = True
rPswarm = pro.solve('pswarm',size=50,maxFunEvals=100000,maxIter=1000)


pro2 = GLP(f=objLegrand.cost,x0=rPswarm.xf,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro2.plot = True
rPswarm2 = pro2.solve('pswarm')

pro3 = NLP(f=objLegrand.cost,x0=rPswarm2.xf,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro3.plot = True
rPswarm3 = pro3.solve('ralg')


pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro.plot = True
rGalileo = pro.solve('galileo')

pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro.plot = True
rDe = pro.solve('de')
Пример #18
0
pro.plot = True
rSLSQP = pro.solve('ralg')

pro = NLP(f=objLegrand.cost,x0=rPswarm.xf,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rSLSQP = pro.solve('scipy_slsqp')



objLegrand.cost(rPswarm.xf)
objLegrand.cost()
gSens,output=objLegrand.sensitivity(full_output=True)
solution = output['sens'][:,:6]

# here we pretend the bounds don't exist
pro2 = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro2.plot = True
r2 = pro2.solve('galileo')

pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rDe = pro.solve('de')

pro = GLP(f=objLegrand.cost,x0=theta,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rPswarm = pro.solve('pswarm',size=100)

pro = GLP(f=objLegrand.cost,x0=rPswarm.xf,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rPswarm = pro.solve('pswarm',size=100,maxIter=1000,maxFunEvals=100000)
Пример #19
0
from openopt import GLP
from numpy import *

f = lambda x: (x[0]-1.5)**2 + sin(0.8 * x[1] ** 2 + 15)**4 + cos(0.8 * x[2] ** 2 + 15)**4 + (x[3]-7.5)**4
p = GLP(f, lb = -ones(4),  ub = ones(4),  maxIter = 1e3,  maxFunEvals = 1e5,  maxTime = 3,  maxCPUTime = 3)

#optional: graphic output
#p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1)

r = p.solve('de', plot=1)
x_opt,  f_opt = r.xf,  r.ff
Пример #20
0
from openopt import GLP
from numpy import *
N = 100
aN = arange(N)
f = lambda x: ((x-aN)**2).sum()
p = GLP(f, lb = -ones(N),  ub = N*ones(N),  maxIter = 1e3,  maxFunEvals = 1e5,  maxTime = 10,  maxCPUTime = 300)

#optional: graphic output
#p.plot = 1

r = p.solve('de', plot=1, debug=1, iprint=0)
x_opt,  f_opt = r.xf,  r.ff
Пример #21
0
"""
GLP (GLobal Problem from OpenOpt set) example for FuncDesigner model:
searching for global minimum of the func 
(x-1.5)**2 + sin(0.8 * y ** 2 + 15)**4 + cos(0.8 * z ** 2 + 15)**4 + (t-7.5)**4
subjected to some constraints
See http://openopt.org/GLP for more info and examples.
"""
from openopt import GLP
from FuncDesigner import *

x, y, z, t = oovars(4)

# define objective
f = (x - 1.5)**2 + sin(0.8 * y**2 + 15)**4 + cos(0.8 * z**2 + 15)**4 + (t -
                                                                        7.5)**4

# define some constraints
constraints = [
    x < 1, x > -1, y < 1, y > -1, z < 1, z > -1, t < 1, t > -1,
    x + 2 * y > -1.5,
    sinh(x) + cosh(z) + sinh(t) < 2.0
]

# add some more constraints via Python "for" cycle
M = 10
for i in range(M):
    func = i * x + (M - i) * y + sinh(z) + cosh(t)
    constraints.append(func < i + 1)

# define start point. You can use variables with length > 1 as well
Пример #22
0
objective = 0.15 * mean(f + 2 * x) + x * cos(y + 2 * z) + z * var(b) * std(c) + y * P(a - z + b * sin(c) > 5)

constraints = [
    P(a ** 2 - z + b * c < 4.7) < 0.03,  # by default constraint tolerance is 10^-6
    (P(c / b + z > sin(x)) > 0.02)(tol=1e-10),  # use tol 10^-10 instead; especially useful for equality constraints
    mean(b + y) <= 3.5,
]

startPoint = {x: 0, y: 0, z: 0, a: A, b: B, c: C}

""" This is multiextremum problem (due to sin, cos etc),
thus we have to use global nonlinear solver capable of handling nonlinear constraints
(BTW having probability functions P() make it even discontinuous for discrete distribution(s) involved)
"""

p = GLP(objective, startPoint, constraints=constraints)
solver = "de"  # named after "differential evolution", check http://openopt.org/GLP for other available global solvers
r = p.maximize(solver, maxTime=150, maxDistributionSize=100)
""" output for Intel Atom 1.6 GHz (may differ due to random numbers involved in solver "de")
------------------------- OpenOpt 0.39 -------------------------
solver: de   problem: unnamed    type: GLP
 iter   objFunVal   log10(MaxResidual/ConTol)   
    0  6.008e+00                      8.40 
   10  6.638e+00                   -100.00 
   20  7.318e+00                   -100.00 
   30  7.423e+00                   -100.00 
   40  7.475e+00                   -100.00 
   50  7.490e+00                   -100.00 
   53  7.490e+00                   -100.00 
istop: -9 (max time limit has been reached)
Solver:   Time Elapsed = 150.34 	CPU Time Elapsed = 145.24
Пример #23
0
pro = NLP(f=objLegrand.cost,x0=rPswarm.xf,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rSLSQP = pro.solve('ralg')

pro = NLP(f=objLegrand.cost,x0=rPswarm.xf,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rSLSQP = pro.solve('scipy_slsqp')


# GLP
pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro.plot = True
rPswarm = pro.solve('pswarm')


pro2 = GLP(f=objLegrand.cost,x0=rPswarm.xf,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro2.plot = True
rPswarm2 = pro2.solve('pswarm')

pro3 = NLP(f=objLegrand.cost,x0=rPswarm2.xf,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro3.plot = True
rPswarm3 = pro3.solve('ralg')


pro2 = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub)
pro2.plot = True
r2 = pro2.solve('galileo')

pro = GLP(f=objLegrand.cost,x0=theta,df=objLegrand.sensitivity,lb=lb,ub=ub,A=A,b=b)
pro.plot = True
rDe = pro.solve('de')
Пример #24
0
def fittingOpenopt(pearr, tmatrix, minR, maxR, lbounds, ubounds, gmaxtime, pfact, res):
    nrgauss = int((len(lbounds) + 1) / 3)
    rvecbins = tmatrix.getMatrix().shape[0]
    myrange = maxR - minR
    xarr = numpy.linspace(minR + myrange / rvecbins / 2, maxR - myrange / rvecbins / 2, rvecbins)
    xxarr = numpy.array([xarr] * nrgauss)

    if pfact == 0:
        print "Will not apply a penalty for gaussian proximity."
        minfuncwrap = lambda  x: gaussSQDiff(x, tmatrix.getMatrix(), pearr, xxarr)
    else:
        print "Will penalize gaussians which are closer than %d times the sum of both sigmas."
        minfuncwrap = lambda  x: penalizeCloseGauss(x, tmatrix.getMatrix(), pearr, xxarr, pfact)

    lines_distance, lines_efficiency, g_lines, chsql, chisqs, chisqax = createLivePlot(nrgauss, pearr, tmatrix, xarr, lbounds, ubounds)

    mycallback = lambda p: plotCallback(p, lines_distance, lines_efficiency, g_lines, xxarr, tmatrix, chsql, chisqs, chisqax)

    print "Starting openopt ##########################"
    prob = GLP(minfuncwrap, lb = lbounds, ub = ubounds, callback = mycallback, maxFunEvals = 1e15, maxNonSuccess = 200, maxIter = 1e5, maxTime = gmaxtime, fEnough = res)
    result = prob.solve('de', population = 1000 * len(lbounds))

    # result=prob.solve('asa')
    # result=prob.solve('galileo') # not good
    # result=prob.solve('pswarm')
    # prob = GLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
    # result=prob.solve('isres',population=100*len(lbounds))
    # prob = NLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
    # result=prob.solve('scipy_lbfgsb')
    # result=prob.solve('scipy_tnc')
    # result=prob.solve('bobyqa')
    # result=prob.solve('ptn')
    # result=prob.solve('slmvm1')
    # result=prob.solve('slmvm2')
    # result=prob.solve('ralg')
    # result=prob.solve('scipy_cobyla') #good!!
    # result=prob.solve('mma')
    # result=prob.solve('auglag')
    # result=prob.solve('gsubg')

    xopt = result.xf

#    prob2 = prob = NLP(minfuncwrap, xopt , lb = lbounds, ub = ubounds, callback = mycallback, maxNonSuccess = 20, maxIter = 1e5, maxTime = gmaxtime)
#    result = prob2.solve('scipy_cobyla')
#    xopt = result.xf


    print "Minimum function chisq", result.ff

    nrgauss, a_final, r_final, sig_final = x2parms(xopt)

    print "G\tA\t\tx0\t\tsigma"
    nr = 1
    for a, r, sig in zip(a_final, r_final, sig_final):
        print "%d\t%f\t%f\t%f\t" % (nr, a, r, sig)
        nr += 1

    gaussians = (a_final * numpy.exp(-(xxarr.T - r_final) ** 2 / (2.0 * sig_final ** 2)))
    r_prdist = gaussians.sum(axis = 1)
    e_fitprdist = numpy.dot(r_prdist, tmatrix.getMatrix())

    r_prdist /= r_prdist.sum()
    e_fitprdist /= e_fitprdist.sum()

    return r_prdist, xarr, e_fitprdist, (a_final, r_final, sig_final)
Пример #25
0
# box-bound constraints lb <= x <= ub
lb, ub = -ones(4), ones(4)

# linear inequality constraints
# x0 + x3 <= 0.15
# x1 + x3 <= 1.5
# as Ax <= b

A = mat('1 0 0 1; 0 1 0 1')  # tuple, list, numpy array etc are OK as well
b = [0.15, 1.5]  # tuple, list, numpy array etc are OK as well

# non-linear constraints
# x0^2 + x2^2 <= 0.15
# 1.5 * x0^2 + x1^2 <= 1.5

c = lambda x: (x[0]**2 + x[2]**2 - 0.15, 1.5 * x[0]**2 + x[1]**2 - 1.5)

p = GLP(f,
        lb=lb,
        ub=ub,
        A=A,
        b=b,
        c=c,
        maxIter=250,
        maxFunEvals=1e5,
        maxTime=30,
        maxCPUTime=30)

r = p.solve('de', mutationRate=0.15, plot=1)
x_opt, f_opt = r.xf, r.ff
Пример #26
0
    fresults.close()

    if DB:
      try:
        DB.results.insert({
          "score":best[0],
          "date":datetime.datetime.now(),
          "params":vars[beta],
          "range":rng,
          "beta":beta,
          "tested_vars":var_names
        })
      except Exception, e:
        print "MONGO INSERT ERROR:%s" % e

  return ret



p = GLP(getscore, x0=startPoint, lb=lbs, ub=ubs, maxIter=1000, maxFunEvals=10000)
p.fOpt = 170000 #Optimal value we could have

r = p.maximize('de', iprint=1, plot=0) #, population=10) #, searchDirectionStrategy="best")
#r = p.maximize('galileo', iprint=1, plot=1, population=5)
#r = p.maximize('gsubg', iprint=1, plot=0)

#r = p.maximize('asa', iprint=1, plot=1)

print "Solution vector: %s" % p.xf
print "Max value: %s" % p.ff
Пример #27
0
theta = [0.2,0.2]

# y = copy.copy(solution[:,1:2])
# y[1::] = y[1::] + numpy.random.normal(loc=0.0,scale=0.1,size=numStep-1)

# odeSIR = odeLossFunc.squareLoss([0.5,1.0/3.0] ,ode,x0,t0,t[1:len(t)],y[1:len(t)],'R')

objSIR = odeLossFunc.squareLoss(theta,ode,x0,t0,t[1::],solution[1::,1:3],['I','R'])

box = [(0.,2.),(0.,2.)]
npBox = numpy.array(box)
lb = npBox[:,0]
ub = npBox[:,1]

pro = GLP(f=objSIR.cost,x0=theta,lb=lb,ub=ub)
pro.plot = True
rGalileo = pro.solve('galileo')

pro = GLP(f=objSIR.cost,x0=theta,lb=lb,ub=ub)
pro.plot = True
rDe = pro.solve('de')

pro = GLP(f=objSIR.cost,x0=theta,lb=lb,ub=ub)
pro.plot = True
rPSwarm = pro.solve('pswarm')

pro = NLP(f=objSIR.cost,df=objSIR.sensitivity,x0=theta,lb=lb,ub=ub)
pro.plot = True
rLincher = pro.solve('lincher')