def run(self, plot=True): """ Solves the optimization problem. """ # Initial try p0 = self.get_p0() #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK) lbound = N.array([0.0001] * len(p0)) if self.gridsize == 1: ubound = [10.0] * (self.gridsize * self.nbr_us) else: ubound = [10.0] * (self.gridsize * self.nbr_us ) + [0.20, 0.20, 0.20, 0.20, N.inf] * ( (self.gridsize - 1)) #UPPER BOUND FOR VDP #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys) if self.verbosity >= Multiple_Shooting.NORMAL: print 'Initial parameter vector: ' print p0 print 'Lower bound:', len(lbound) print 'Upper bound:', len(ubound) # Get OpenOPT handler p_solve = NLP(self.f, p0, lb=lbound, ub=ubound, maxFunEvals=self.maxFeval, maxIter=self.maxIter, ftol=self.ftol, maxTime=self.maxTime) #If multiple shooting is preformed or single shooting if self.gridsize > 1: p_solve.h = self.h if plot: p_solve.plot = 1 self.opt = p_solve.solve(self.optMethod) return self.opt
def run(self, plot=True): """ Solves the optimization problem. """ # Initial try p0 = self.get_p0() #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK) lbound = N.array([0.0001]*len(p0)) if self.gridsize == 1: ubound = [10.0]*(self.gridsize*self.nbr_us) else: ubound = [10.0]*(self.gridsize*self.nbr_us) + [0.20,0.20,0.20,0.20,N.inf]*((self.gridsize-1)) #UPPER BOUND FOR VDP #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys) if self.verbosity >= Multiple_Shooting.NORMAL: print 'Initial parameter vector: ' print p0 print 'Lower bound:', len(lbound) print 'Upper bound:', len(ubound) # Get OpenOPT handler p_solve = NLP(self.f,p0,lb = lbound, ub=ubound,maxFunEvals = self.maxFeval, maxIter = self.maxIter, ftol=self.ftol, maxTime=self.maxTime) #If multiple shooting is preformed or single shooting if self.gridsize > 1: p_solve.h = self.h if plot: p_solve.plot = 1 self.opt = p_solve.solve(self.optMethod) return self.opt
r[0] += 15 #incorrect derivative r[8] += 80 #incorrect derivative return r p.df = df p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8] def dc(x): r = zeros((2, p.n)) r[0,0] = 2 * 4 * x[0]**3 r[1,1] = 2 * x[1] r[1,2] = 2 * x[2] + 15 #incorrect derivative return r p.dc = dc p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4) def dh(x): r = zeros((2, p.n)) r[0,-1] = 1e1*4*(x[-1]-1)**3 r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative return r p.dh = dh p.checkdf() p.checkdc() p.checkdh() """ you can use p.checkdF(x) for other point than x0 (F is f, c or h) p.checkdc(myX) or
# other valid c: # p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)] # p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)) # p.c = lambda x: numpy.array(c1(x), c2(x), c3(x)) # def c(x): # return c1(x), c2(x), c3(x) # p.c = c # non-linear equality constraints h(x) = 0 # 1e6*(x[last]-1)**4 = 0 # (x[last-1]-1.5)**4 = 0 #h1 = lambda x: 1e4*(x[-1]-1)**4 #h2 = lambda x: (x[-2]-1.5)**4 #p.h = [h1, h2] h_args = (h, k, l, fq, fqerr, x, z, cosmat_list) p.h = [pos_sum, neg_sum, chisq] # p.h=[pos_sum,neg_sum] p.args.h = h_args p.args.f = (h, k, l, fq, fqerr, x, z, cosmat_list) #p.args.f=h_args # dh(x)/dx: non-lin eq constraints gradients (optional): #def DH(x): # r = zeros((2, p.n)) # r[0, -1] = 1e4*4 * (x[-1]-1)**3 # r[1, -2] = 4 * (x[-2]-1.5)**3 # return r #p.dh = DH # p.dh=[chisq_grad,pos_sum_grad,] p.contol = 1e-2 #3 # required constraints tolerance, default for NLP is 1e-6 # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
ff = lambda x: ((x-M)**2).sum() p = NLP(ff, cos(arange(N))) p.df = lambda x: 2*(x-M) p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8] def dc(x): r = zeros((2, p.n)) r[0,0] = 2 * 4 * x[0]**3 r[1,1] = 2 * x[1] r[1,2] = 2 * x[2] return r p.dc = dc h1 = lambda x: 1e1*(x[-1]-1)**4 h2 = lambda x: (x[-2]-1.5)**4 p.h = lambda x: (h1(x), h2(x)) def dh(x): r = zeros((2, p.n)) r[0,-1] = 1e1*4*(x[-1]-1)**3 r[1,-2] = 4*(x[-2]-1.5)**3 return r p.dh = dh p.lb = -6*ones(N) p.ub = 6*ones(N) p.lb[3] = 5.5 p.ub[4] = 4.5 #r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504) #solver = 'ipopt'
p.c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8] def dc(x): r = zeros((2, p.n)) r[0, 0] = 2 * 4 * x[0]**3 r[1, 1] = 2 * x[1] r[1, 2] = 2 * x[2] return r p.dc = dc h1 = lambda x: 1e1 * (x[-1] - 1)**4 h2 = lambda x: (x[-2] - 1.5)**4 p.h = lambda x: (h1(x), h2(x)) def dh(x): r = zeros((2, p.n)) r[0, -1] = 1e1 * 4 * (x[-1] - 1)**3 r[1, -2] = 4 * (x[-2] - 1.5)**3 return r p.dh = dh p.lb = -6 * ones(N) p.ub = 6 * ones(N) p.lb[3] = 5.5 p.ub[4] = 4.5
# (except maxfun, maxiter) # Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian # so it should be something like 1e-3...1e-5 p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6) #print 'maxiter', p.maxiter #print 'maxfun', p.maxfun p.maxIter=50 # p.maxfun=100 #p.df_iter = 50 p.maxTime = 4000 h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist) if 0: #p.h=[pos_sum,neg_sum] p.h=[pos_sum,neg_sum] p.c=[chisq] # p.h=[pos_sum,neg_sum] p.args.h=h_args p.args.c=h_args p.dh=[pos_sum_grad,neg_sum_grad] p.df=chisq_grad if 1: #p.h=[pos_sum,neg_sum,chisq] p.c=[chisq] p.h=[pos_sum,neg_sum] p.args.h=h_args p.args.c=h_args p.dh=[pos_sum_grad,neg_sum_grad] p.dc=chisq_grad
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)) # p.c = lambda x: numpy.array(c1(x), c2(x), c3(x)) # def c(x): # return c1(x), c2(x), c3(x) # p.c = c # non-linear equality constraints h(x) = 0 # 1e6*(x[last]-1)**4 = 0 # (x[last-1]-1.5)**4 = 0 #h1 = lambda x: 1e4*(x[-1]-1)**4 #h2 = lambda x: (x[-2]-1.5)**4 #p.h = [h1, h2] h_args=(h,k,l,fq,fqerr,x,z,cosmat_list) p.h=[pos_sum,neg_sum,chisq] # p.h=[pos_sum,neg_sum] p.args.h=h_args p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list) #p.args.f=h_args # dh(x)/dx: non-lin eq constraints gradients (optional): #def DH(x): # r = zeros((2, p.n)) # r[0, -1] = 1e4*4 * (x[-1]-1)**3 # r[1, -2] = 4 * (x[-2]-1.5)**3 # return r #p.dh = DH # p.dh=[chisq_grad,pos_sum_grad,] p.contol = 1e-2#3 # required constraints tolerance, default for NLP is 1e-6 # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
""" OpenOpt GUI: function manage() usage example """ from openopt import NLP, manage from numpy import cos, arange, ones, asarray, abs, zeros N = 50 M = 5 p = NLP(lambda x: ((x - M)**2).sum(), cos(arange(N))) p.lb, p.ub = -6 * ones(N), 6 * ones(N) p.lb[3] = 5.5 p.ub[4] = 4.5 p.c = lambda x: [2 * x[0]**4 - 32, x[1]**2 + x[2]**2 - 8] p.h = (lambda x: 1e1 * (x[-1] - 1)**4, lambda x: (x[-2] - 1.5)**4) """ minTime is used here for to provide enough time for user to play with GUI """ minTime = 1.5 # sec p.name = 'GUI_example' p.minTime = minTime """ hence maxIter, maxFunEvals etc will not trigger till minTime only same iter point x_k-1=x_k or some coords = nan can stop calculations
# (except maxfun, maxiter) # Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian # so it should be something like 1e-3...1e-5 p.gradtol = 1e-5 #5 # gradient stop criterium (default for NLP is 1e-6) #print 'maxiter', p.maxiter #print 'maxfun', p.maxfun p.maxIter = 50 # p.maxfun=100 #p.df_iter = 50 p.maxTime = 4000 h_args = (h, k, l, fq, fqerr, x, z, cosmat_list, coslist, flist) if 0: #p.h=[pos_sum,neg_sum] p.h = [pos_sum, neg_sum] p.c = [chisq] # p.h=[pos_sum,neg_sum] p.args.h = h_args p.args.c = h_args p.dh = [pos_sum_grad, neg_sum_grad] p.df = chisq_grad if 1: #p.h=[pos_sum,neg_sum,chisq] p.c = [chisq] p.h = [pos_sum, neg_sum] p.args.h = h_args p.args.c = h_args p.dh = [pos_sum_grad, neg_sum_grad] p.dc = chisq_grad
they will be passed to derivative function as well (if you have supplied it) """ from openopt import NLP from numpy import asfarray f = lambda x, a: (x**2).sum() + a * x[0]**4 x0 = [8, 15, 80] p = NLP(f, x0) #using c(x)<=0 constraints p.c = lambda x, b, c: (x[0]-4)**2 - 1 + b*x[1]**4 + c*x[2]**4 #using h(x)=0 constraints p.h = lambda x, d: (x[2]-4)**2 + d*x[2]**4 - 15 p.args.f = 4 # i.e. here we use a=4 # so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4" p.args.c = (1,2) p.args.h = 15 # Note 1: using tuple p.args.h = (15,) is valid as well # Note 2: if all your funcs use same args, you can just use # p.args = (your args) # Note 3: you could use f = lambda x, a: (...); c = lambda x, a, b: (...); h = lambda x, a: (...)
def run_optimization(self, plot=True, _only_check_gradients=False): """Start/run optimization procedure and the optimum unless. Set the keyword parameter 'plot' to False (default=True) if plotting should not be conducted. """ grid = self.get_grid() model = self.get_model() # Initial try p0 = self.get_p0() # Less than (-0.5 < u < 1) # TODO: These are currently hard coded. They shouldn't be. #NLT = len(grid) * len(model.u) #Alt = N.zeros( (NLT, len(p0)) ) #Alt[:, (len(grid) - 1) * len(model.x):] = -N.eye(len(grid) * # len(model.u)) #blt = -0.5*N.ones(NLT) # TODO: These are currently hard coded. They shouldn't be. #N_xvars = (len(grid) - 1) * len(model.x) #N_uvars = len(grid) * len(model.u) #N_vars = N_xvars + N_uvars #Alt = -N.eye(N_vars) #blt = N.zeros(N_vars) #blt[0:N_xvars] = -N.ones(N_xvars)*0.001 #blt[N_xvars:] = -N.ones(N_uvars)*1; # Get OpenOPT handler p = NLP( self.f, p0, maxIter=1e3, maxFunEvals=1e3, #A=Alt, # See TODO above #b=blt, # See TODO above df=self.df, ftol=1e-4, xtol=1e-4, contol=1e-4) if len(grid) > 1: p.h = self.h p.dh = self.dh if plot: p.plot = 1 p.iprint = 1 if _only_check_gradients: # Check gradients against finite difference quotients p.checkdf(maxViolation=0.05) p.checkdh() return None #opt = p.solve('ralg') # does not work - serious convergence issues opt = p.solve('scipy_slsqp') if plot: plot_control_solutions(model, grid, opt.xf) return opt
my_oofun.args = (...) they will be passed to derivative function as well (if you have supplied it) """ from openopt import NLP from numpy import asfarray f = lambda x, a: (x**2).sum() + a * x[0]**4 x0 = [8, 15, 80] p = NLP(f, x0) #using c(x)<=0 constraints p.c = lambda x, b, c: (x[0] - 4)**2 - 1 + b * x[1]**4 + c * x[2]**4 #using h(x)=0 constraints p.h = lambda x, d: (x[2] - 4)**2 + d * x[2]**4 - 15 p.args.f = 4 # i.e. here we use a=4 # so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4" p.args.c = (1, 2) p.args.h = 15 # Note 1: using tuple p.args.h = (15,) is valid as well # Note 2: if all your funcs use same args, you can just use # p.args = (your args) # Note 3: you could use f = lambda x, a: (...); c = lambda x, a, b: (...); h = lambda x, a: (...)
""" OpenOpt GUI: function manage() usage example """ from openopt import NLP, manage from numpy import cos, arange, ones, asarray, abs, zeros N = 50 M = 5 p = NLP(lambda x: ((x-M)**2).sum(), cos(arange(N))) p.lb, p.ub = -6*ones(N), 6*ones(N) p.lb[3] = 5.5 p.ub[4] = 4.5 p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8] p.h = (lambda x: 1e1*(x[-1]-1)**4, lambda x: (x[-2]-1.5)**4) """ minTime is used here for to provide enough time for user to play with GUI """ minTime = 1.5 # sec p.name = 'GUI_example' p.minTime = minTime """ hence maxIter, maxFunEvals etc will not trigger till minTime only same iter point x_k-1=x_k