def max_variance(self,XB, number = 1, exclude = []): assert XB.shape[0] == self.ndim, 'wrong dimension' X_min = [0.0]*len(XB) X_min[0] = XB[0][0]-1.0 while not is_in(X_min,XB): obj = self.objective prob = pyOpt.Optimization('Variance Maximization',obj) for ix in range(XB.shape[0]): prob.addVar('X%i'%ix,'c',lower=XB[ix,0],upper=XB[ix,1],value=0.) prob.addObj('Estimated Variance') opt_ALPSO = pyOpt.ALPSO(pll_type=None) #opt_ALPSO = pyOpt.ALPSO(pll_type='MP',args=[1.0]) opt_ALPSO.setOption('fileout',0) opt_ALPSO.setOption('maxOuterIter',10) opt_ALPSO.setOption('stopCriteria',1) # opt_ALPSO.setOption('SwarmSize',self.ndim*100) opt_ALPSO.setOption('SwarmSize',self.ndim*20) opt_SLSQP = pyOpt.SLSQP() opt_SLSQP.setOption('IPRINT',-1) opt_SLSQP.setOption('ACC',1e-5) vec = [] for index in range(number*10): print index+1, ' so far: ', len(vec) [YI_min,X_min,Info] = opt_ALPSO(prob) [YI_min,X_min,Info] = opt_SLSQP(prob.solution(index),sens_type='FD') if not is_already_in(X_min,vec+exclude) and is_in(X_min,XB): vec.append(X_min.tolist()) if len(vec) >= number: break if len(vec) == 1: return vec[0] return vec
'c', lower=lower_bound[6], upper=upper_bound[6], value=p1[6]) opt_prob.addVar('nua', 'c', lower=lower_bound[7], upper=upper_bound[7], value=p1[7]) opt_prob.addObj('f') if myrank == 0: print opt_prob #optimize psqp = pyOpt.ALPSO(pll_type='DPM') psqp.setOption('printOuterIters', 1) #psqp.setOption('maxOuterIter',1) #psqp.setOption('stopCriteria',0) psqp.setOption('SwarmSize', swarmSize) psqp(opt_prob) print opt_prob.solution(0) popt = numpy.zeros(len(p1)) for i in opt_prob._solutions[0]._variables: popt[i] = opt_prob._solutions[0]._variables[i].__dict__['value'] model = func_ex(popt, ns, pts_l) ll_opt = dadi.Inference.ll_multinom(model, data) mu = 3.5e-9 if myrank == 0:
def runOptimizer(self, opt_prob): # type: (pyOpt.Optimization) -> np._ArrayLike[float] ''' call global followed by local optimizer, return solution ''' import pyOpt initial = [v.value for v in list(opt_prob.getVarSet().values())] if self.config['useGlobalOptimization']: ### optimize using pyOpt (global) sr = random.SystemRandom() if self.config['globalSolver'] == 'NSGA2': if parallel: opt = pyOpt.NSGA2(pll_type='POA') # genetic algorithm else: opt = pyOpt.NSGA2() if self.config['globalOptSize'] % 4: raise IOError("globalOptSize needs to be a multiple of 4 for NSGA2") opt.setOption('PopSize', self.config['globalOptSize']) # Population Size (a Multiple of 4) opt.setOption('maxGen', self.config['globalOptIterations']) # Maximum Number of Generations opt.setOption('PrintOut', 0) # Flag to Turn On Output to files (0-None, 1-Subset, 2-All) opt.setOption('xinit', 1) # Use Initial Solution Flag (0 - random population, 1 - use given solution) opt.setOption('seed', sr.random()) # Random Number Seed 0..1 (0 - Auto based on time clock) #pCross_real 0.6 Probability of Crossover of Real Variable (0.6-1.0) opt.setOption('pMut_real', 0.5) # Probablity of Mutation of Real Variables (1/nreal) #eta_c 10.0 # Distribution Index for Crossover (5-20) must be > 0 #eta_m 20.0 # Distribution Index for Mutation (5-50) must be > 0 #pCross_bin 0.0 # Probability of Crossover of Binary Variable (0.6-1.0) #pMut_real 0.0 # Probability of Mutation of Binary Variables (1/nbits) self.iter_max = self.config['globalOptSize']*self.config['globalOptIterations'] elif self.config['globalSolver'] == 'ALPSO': if parallel: opt = pyOpt.ALPSO(pll_type='SPM') #augmented lagrange particle swarm optimization else: opt = pyOpt.ALPSO() #augmented lagrange particle swarm optimization opt.setOption('stopCriteria', 0) # stop at max iters opt.setOption('dynInnerIter', 1) # dynamic inner iter number opt.setOption('maxInnerIter', 5) opt.setOption('maxOuterIter', self.config['globalOptIterations']) opt.setOption('printInnerIters', 1) opt.setOption('printOuterIters', 1) opt.setOption('SwarmSize', self.config['globalOptSize']) opt.setOption('xinit', 1) opt.setOption('seed', sr.random()*self.mpi_size) #(self.mpi_rank+1)/self.mpi_size) #opt.setOption('vcrazy', 1e-2) #TODO: how to properly limit max number of function calls? # no. func calls = (SwarmSize * inner) * outer + SwarmSize self.iter_max = opt.getOption('SwarmSize') * opt.getOption('maxInnerIter') * \ opt.getOption('maxOuterIter') + opt.getOption('SwarmSize') self.iter_max = self.iter_max // self.mpi_size else: print("Solver {} not defined".format(self.config['globalSolver'])) sys.exit(1) # run global optimization #try: #reuse history # opt(opt_prob, store_hst=False, hot_start=True) #, xstart=initial) #except NameError: if self.config['verbose']: print('Running global optimization with {}'.format(self.config['globalSolver'])) self.is_global = True opt(opt_prob, store_hst=False) #, xstart=initial) if self.mpi_rank == 0: print(opt_prob.solution(0)) self.gather_solutions() ### pyOpt local if self.config['useLocalOptimization']: print("Runnning local gradient based solver") # TODO: run local optimization for e.g. the three last best results (global solutions # could be more or less optimal within their local minima) # after using global optimization, refine solution with gradient based method init # optimizer (more or less local) if self.config['localSolver'] == 'SLSQP': opt2 = pyOpt.SLSQP() #sequential least squares opt2.setOption('MAXIT', self.config['localOptIterations']) if self.config['verbose']: opt2.setOption('IPRINT', 0) elif self.config['localSolver'] == 'IPOPT': opt2 = pyOpt.IPOPT() opt2.setOption('linear_solver', 'ma57') #mumps or hsl: ma27, ma57, ma77, ma86, ma97 or mkl: pardiso opt2.setOption('max_iter', self.config['localOptIterations']) if self.config['verbose']: opt2.setOption('print_level', 4) #0 none ... 5 max else: opt2.setOption('print_level', 0) #0 none ... 5 max elif self.config['localSolver'] == 'PSQP': opt2 = pyOpt.PSQP() opt2.setOption('MIT', self.config['localOptIterations']) # max iterations #opt2.setOption('MFV', ??) # max function evaluations elif self.config['localSolver'] == 'COBYLA': if parallel: opt2 = pyOpt.COBYLA(pll_type='POA') else: opt2 = pyOpt.COBYLA() opt2.setOption('MAXFUN', self.config['localOptIterations']) # max iterations opt2.setOption('RHOBEG', 0.1) # initial step size if self.config['verbose']: opt2.setOption('IPRINT', 2) self.iter_max = self.local_iter_max # use best constrained solution from last run (might be better than what solver thinks) if len(self.last_best_sol) > 0: for i in range(len(opt_prob.getVarSet())): opt_prob.getVar(i).value = self.last_best_sol[i] if self.config['verbose']: print('Runing local optimization with {}'.format(self.config['localSolver'])) self.is_global = False if self.config['localSolver'] in ['COBYLA', 'CONMIN']: opt2(opt_prob, store_hst=False) else: if parallel: opt2(opt_prob, sens_step=0.1, sens_mode='pgc', store_hst=False) else: opt2(opt_prob, sens_step=0.1, store_hst=False) self.gather_solutions() if self.mpi_rank == 0: sol = opt_prob.solution(0) print(sol) #sol_vec = np.array([sol.getVar(x).value for x in range(0,len(sol.getVarSet()))]) if len(self.last_best_sol) > 0: print("using last best constrained solution instead of given solver solution.") print("testing final solution") #self.iter_cnt = 0 self.objectiveFunc(self.last_best_sol, test=True) print("\n") return self.last_best_sol else: print("No feasible solution found!") sys.exit(-1) else: # parallel sub-processes, close sys.exit(0)
def identifyFeasibleStdFromFeasibleBase(self, xBase): self.xBase_feas = xBase # formulate problem as objective function if self.idf.opt['optInFeasibleParamSpace']: opt = pyOpt.Optimization('Constrained OLS', self.minimizeSolToCADFeasible) else: opt = pyOpt.Optimization('Constrained OLS', self.minimizeSolToCADStd) opt.addObj('u') ''' x_cons = self.mapStdToConsistent(self.idf.model.xStd[self.start_param:self.idf.model.num_model_params]) test = self.mapConsistentToStd(x_cons) print(test - self.idf.model.xStd[self.start_param:self.idf.model.num_model_params]) ''' self.addVarsAndConstraints(opt) # set previous sol as starting point (as primal should be already within constraints for # most solvers to perform well) if self.idf.opt['optInFeasibleParamSpace']: x_cons = self.mapStdToConsistent( self.idf.model.xStd[self.start_param:self.idf.model. num_model_params]) for i in range(len(opt.getVarSet())): #atm, we have 16*no_link + n_dof vars if i < len(x_cons): opt.getVar(i).value = x_cons[i] else: j = i - len(x_cons) opt.getVar(i).value = self.model.xStd[ self.idf.model.num_model_params + j] else: for i in range(len(opt.getVarSet())): opt.getVar(i).value = self.model.xStd[i + self.start_link * self.per_link] if self.idf.opt['verbose']: print(opt) if self.idf.opt['nlOptSolver'] == 'IPOPT': # not necessarily deterministic if self.idf.opt['verbose']: print('Using IPOPT') solver = pyOpt.IPOPT() #solver.setOption('linear_solver', 'ma97') #mumps or hsl: ma27, ma57, ma77, ma86, ma97 or mkl: pardiso #for details, see http://www.gams.com/latest/docs/solvers/ipopt/index.html#IPOPTlinear_solver solver.setOption('max_iter', self.idf.opt['nlOptMaxIterations']) solver.setOption('print_level', 3) #0 none ... 5 max #don't start too far away from inital values (boundaries push even if starting inside feasible set) solver.setOption('bound_push', 0.0000001) solver.setOption('bound_frac', 0.0000001) #don't relax bounds solver.setOption('bound_relax_factor', 0.0) #1e-16) elif self.idf.opt['nlOptSolver'] == 'SLSQP': # solve optimization problem if self.idf.opt['verbose']: print('Using SLSQP') solver = pyOpt.SLSQP(disp_opts=True) solver.setOption('MAXIT', self.idf.opt['nlOptMaxIterations']) if self.idf.opt['verbose']: solver.setOption('IPRINT', 0) elif self.idf.opt['nlOptSolver'] == 'PSQP': # solve optimization problem if self.idf.opt['verbose']: print('Using PSQP') solver = pyOpt.PSQP(disp_opts=True) solver.setOption('MIT', self.idf.opt['nlOptMaxIterations']) if self.idf.opt['verbose']: solver.setOption('IPRINT', 0) elif self.idf.opt['nlOptSolver'] == 'ALPSO': if self.idf.opt['verbose']: print('Using ALPSO') solver = pyOpt.ALPSO(disp_opts=True) solver.setOption('stopCriteria', 0) solver.setOption('dynInnerIter', 1) # dynamic inner iter number solver.setOption('maxInnerIter', 5) solver.setOption('maxOuterIter', self.idf.opt['nlOptMaxIterations']) solver.setOption('printInnerIters', 0) solver.setOption('printOuterIters', 0) solver.setOption('SwarmSize', 100) solver.setOption('xinit', 1) elif self.idf.opt['nlOptSolver'] == 'NSGA2': if self.idf.opt['verbose']: print('Using NSGA2') solver = pyOpt.NSGA2(disp_opts=True) solver.setOption('PopSize', 100) # Population Size (a Multiple of 4) solver.setOption('maxGen', self.config['nlOptMaxIterations'] ) # Maximum Number of Generations solver.setOption( 'PrintOut', 0) # Flag to Turn On Output to files (0-None, 1-Subset, 2-All) solver.setOption( 'xinit', 1 ) # Use Initial Solution Flag (0 - random population, 1 - use given solution) #solver.serion('seed', sr.random()) # Random Number Seed 0..1 (0 - Auto based on time clock) #pCross_real 0.6 Probability of Crossover of Real Variable (0.6-1.0) solver.setOption( 'pMut_real', 0.5) # Probablity of Mutation of Real Variables (1/nreal) #eta_c 10.0 # Distribution Index for Crossover (5-20) must be > 0 #eta_m 20.0 # Distribution Index for Mutation (5-50) must be > 0 #pCross_bin 0.0 # Probability of Crossover of Binary Variable (0.6-1.0) #pMut_real 0.0 # Probability of Mutation of Binary Variables (1/nbits) else: print('Solver unknown') self.opt_prob = opt solver(opt) #run optimizer # set best solution again (is often different than final solver solution) if self.last_best_x is not None: for i in range(len(opt.getVarSet())): opt.getVar(i).value = self.last_best_x[i] else: self.last_best_x = self.model.xStd[self.start_param:] sol = opt.solution(0) if self.idf.opt['verbose']: print(sol) if self.idf.opt['optInFeasibleParamSpace'] and len( self.last_best_x) > len(self.model.xStd[self.start_param:]): # we get consistent parameterized params as solution x_std = self.mapConsistentToStd(self.last_best_x) self.model.xStd[self.start_param:self.idf.model. num_model_params] = x_std else: # we get std vars as solution self.model.xStd[self.start_param:] = self.last_best_x
def optimizePyopt(self, solver, sens, options): # pyOpt requires empty options to be specified as {}, not None if options is None: options = {} eqConstr = ('SLSQP' not in solver and 'CONMIN' not in solver and 'COBYLA' not in solver and 'FILTERSD' not in solver and 'SDPEN' not in solver) # Organize constraints indLinEq = [] indLinIneq = [] indNonlinEq = [] indNonlinIneq = [] for k, bnd in enumerate(self.linBounds): if bnd[0] == bnd[1] and eqConstr: indLinEq.append(k) else: indLinIneq.append(k) indLinEq = np.array(indLinEq) indLinIneq = np.array(indLinIneq) for k, bnd in enumerate(self.nonlinBounds): if bnd[0] == bnd[1] and eqConstr: indNonlinEq.append(k) else: indNonlinIneq.append(k) indNonlinEq = np.array(indNonlinEq) indNonlinIneq = np.array(indNonlinIneq) # pyOpt objective def objectivePyopt(xIn, *args, **kwargs): x = xIn[:self.nvar] f = self.objective(x) g = np.zeros(0, dtype=float) if len(indLinEq) > 0: g = np.r_[g, np.dot(self.linMat[indLinEq, :], x)] if len(indNonlinEq) > 0: g = np.r_[g, self.evalNonlinConstraints(x, 'constr', indNonlinEq)] if len(indLinIneq) > 0: g = np.r_[g, np.dot(self.linMat[indLinIneq, :], x)] if len(indNonlinIneq) > 0: g = np.r_[ g, self.evalNonlinConstraints(x, 'constr', indNonlinIneq)] fail = 0 if f >= self.inf or np.any(g >= self.inf): fail = 1 return f, g, fail # pyOpt gradient def gradientPyopt(xIn, f, g, *args, **kwargs): x = xIn[:self.nvar] df = self.gradient(x) dg = np.zeros((0, self.nvar), dtype=float) if len(indLinEq) > 0: dg = np.r_[dg, self.linMat[indLinEq, :]] if len(indNonlinEq) > 0: dg = np.r_[dg, self.evalNonlinConstraints(x, 'jac', indNonlinEq)] if len(indLinIneq) > 0: dg = np.r_[dg, self.linMat[indLinIneq, :]] if len(indNonlinIneq) > 0: dg = np.r_[dg, self.evalNonlinConstraints(x, 'jac', indNonlinIneq)] fail = 0 if f >= self.inf or np.any(g >= self.inf): fail = 1 return df.reshape((1, -1)), dg, fail # Instantiate optimization problem optProb = pyOpt.Optimization('pyopt', objectivePyopt) # Add objective optProb.addObj('objective') # Add variables optProb.addVarGroup('var', self.nvar, type='c', value=self.varInit, lower=self.varBounds[:, 0], upper=self.varBounds[:, 1]) # Add constraints if len(indLinEq) > 0: optProb.addConGroup('lin-equality', len(indLinEq), type='e', equal=self.linBounds[indLinEq, 0]) if len(indNonlinEq) > 0: optProb.addConGroup('nonlin-equality', len(indNonlinEq), type='e', equal=self.nonlinBounds[indNonlinEq, 0]) if len(indLinIneq) > 0: optProb.addConGroup('lin-inequality', len(indLinIneq), type='i', lower=self.linBounds[indLinIneq, 0], upper=self.linBounds[indLinIneq, 1]) if len(indNonlinIneq) > 0: optProb.addConGroup('nonlin-inequality', len(indNonlinIneq), type='i', lower=self.nonlinBounds[indNonlinIneq, 0], upper=self.nonlinBounds[indNonlinIneq, 1]) # Setup solver if 'SNOPT' in solver: optimizer = pyOpt.SNOPT(options=options) if 'SLSQP' in solver: optimizer = pyOpt.SLSQP(options=options) if 'CONMIN' in solver: optimizer = pyOpt.CONMIN(options=options) if 'ALGENCAN' in solver: optimizer = pyOpt.ALGENCAN(options=options) if 'ALPSO' in solver: optimizer = pyOpt.ALPSO(options=options) if 'ALHSO' in solver: optimizer = pyOpt.ALHSO(options=options) if 'COBYLA' in solver: optimizer = pyOpt.COBYLA(options=options) if 'FILTERSD' in solver: optimizer = pyOpt.FILTERSD(options=options) if 'KOPT' in solver: optimizer = pyOpt.KOPT(options=options) if 'MIDACO' in solver: optimizer = pyOpt.MIDACO(options=options) if 'KSQP' in solver: optimizer = pyOpt.KSQP(options=options) if 'SDPEN' in solver: optimizer = pyOpt.SDPEN(options=options) if 'SOLVOPT' in solver: optimizer = pyOpt.SOLVOPT(options=options) # Run optimization if sens == 'finite-difference': optimizer(optProb, sens_type='FD') else: optimizer(optProb, sens_type=gradientPyopt) # Extract solution j = len(optProb._solutions) - 1 xStar = np.zeros(self.nvar) for k in range(self.nvar): xStar[k] = optProb._solutions[j].getVar(k).value return xStar, objectivePyopt(xStar)[0]
lower=0.0, upper=1.5 * max(Data), value=1.0 * max(Data)) opt_prob.addVar('LifeTime', 'c', lower=0.000000001, upper=30.0, value=15.0) opt_prob.addVar('Offset', 'c', lower=0.0, upper=1.0 * Data[0], value=0.0) opt_prob.addVar('ScatterAmp', 'c', lower=0.0, upper=0.5 * max(Data) / max(AvgIRF), value=0.1 * max(Data) / max(AvgIRF)) opt_prob.addObj('Minimize Chi**2', value=0.0, optimum=0.0) print opt_prob #optimizer = pyOpt.SDPEN() min_func = pyOpt.ALPSO() min_func.setOption('printOuterIters', 1000) min_func.setOption('printInnerIters', 1000) min_func.setOption('SwarmSize', 1000) [fstr, xstr, inform] = min_func(opt_prob, disp_opts=True) print "!------------------------------!" print opt_prob.solution(0) print "!------------------------------!" print fstr print "!------------------------------!" print xstr print "!------------------------------!" print inform