def init_problem(self, n): print(f"Solving Brachistochrone problem with n = {n}...") self.n = n self.x_arr = np.linspace(0, 1, n) # fixed y_inner = np.linspace(1 - 1 / n, 1 / n, n - 2) # initial guess of (inner) values # Optimal/final y points self.y_arr = np.zeros(n) self.y_arr[0] = 1.0 # Optimization problem opt_prob: pyop.Optimization = pyop.Optimization( 'brachistochrone', self.objfunc) # Design variables opt_prob.addVarGroup('y', nVars=n-2, type='c', value=y_inner, \ lower=None, upper=None) # Assign the key value for the objective function opt_prob.addObj('obj') # Optimizer optimizer = pyop.SNOPT() # optimizer.setOption('iPrint',0) path = '/home/seth/school/optimization/output/' # optimizer.setOption('Print file', path+f'SNOPT_print-{n}.out') # optimizer.setOption('Summary file', path+f'SNOPT_summary-{n}.out') return opt_prob, optimizer
def init_problem(self): # Optimization problem self.opt_prob: pyop.Optimization = pyop.Optimization( 'trusty', self.objfunc) # Design variables self.opt_prob.addVarGroup('areas', nVars=10, type='c', value=self.areas, \ lower=0.1, upper=None) # Constraints # yield_compression, yield_tension, area_min stress_yield = np.array([25e3] * 10) stress_yield[8] = 75e3 self.opt_prob.addConGroup('stress_arr', nCon=10, lower=-stress_yield, upper=stress_yield) # Assign the key value for the objective function self.opt_prob.addObj('mass') self.optimizer = pyop.SNOPT() self.optimizer.setOption('iPrint', 0) path = '/home/seth/school/optimization/output/' self.optimizer.setOption('Print file', path + f'SNOPT_print.out') self.optimizer.setOption('Summary file', path + f'SNOPT_summary.out')
def optimize_farmcost(*args): y0 = np.array([9, 10]) funcs, _ = obj_farmcost({"y": y0}) print(f'initial COE: {funcs["coe"]}') # starting point and bounds # objective opt_prob: pyop.Optimization = pyop.Optimization('coe', obj_farmcost) # linear constraint # the constraint y3 > y2 can be formulated as a linear constraint. opt_prob.addVarGroup('y', nVars=2, value=y0, lower=0, upper=10) opt_prob.addConGroup('y3y2', nCon=1, lower=1e-5) opt_prob.addObj('coe') optimizer = pyop.SNOPT() # deterministic optimization sol: pyop.pyOpt_solution.Solution = optimizer(opt_prob, sens='FD') print(f'{sol.fStar.item() = }') print(f'{sol.xStar["y"] = }') obj_farmcost(sol.xStar, plotit=False)
upper = [25e3] * 10 upper[8] = 75e3 # set_trace() # Constraints optProb.addConGroup('con', 10, lower=lower, upper=upper) # Objective optProb.addObj('obj') # Check optimization problem: print(optProb) # Optimizer optimizer = pyopt.SNOPT() sol = optimizer(optProb, storeHistory=f"output/opt_histTruss.hst") # Check Solution print(sol) # start = np.ones(10) * 0.1 # # fit = minimize(solve, # start, # method="SLSQP", # constraints=all_constraints, # options={'disp':True}, # callback=callback) # # #### Output values for debugging
p = np.ones(12) * 1e-4 optProb = pyoptsparse.Optimization('Differential_Flatness', objective) optProb.addVarGroup('xvars', 12, 'c', lower=None, upper=None, value=p) eq_bnd = [0.0, 0.0, 0.0, 2.0, 10.0, 0.0, 0.0, 1.0] optProb.addConGroup('eq_con', 8, lower=eq_bnd, upper=eq_bnd) if use_gamma: optProb.addConGroup( 'ineq_con', 400, lower=0.0, upper=None ) # I can get an answer but the constraints are violated and it runs into numerical difficulties else: optProb.addConGroup('ineq_con', 200, lower=0.0, upper=None) optProb.addObj('obj') opt = pyoptsparse.SNOPT() sol = opt(optProb, sens='CS', storeHistory='constrained.txt') data = { 'px': px_list, 'py': py_list, 'x': x_list, 'y': y_list, 'vx': vx_list, 'vy': vy_list, 'ax': ax_list, 'ay': ay_list, 'Jx': Jx_list, 'Jy': Jy_list, 'gam': gam_list, 'gam2': gam2_list,
def Pyoptsparse_Solve(problem, solver='SNOPT', FD='single', sense_step=1.0E-6, nonderivative_line_search=False): """ This converts your SUAVE Nexus problem into a PyOptsparse optimization problem and solves it. Pyoptsparse has many algorithms, they can be switched out by using the solver input. Assumptions: None Source: N/A Inputs: problem [nexus()] solver [str] FD (parallel or single) [str] sense_step [float] nonderivative_line_search [bool] Outputs: outputs [list] Properties Used: None """ # Have the optimizer call the wrapper mywrap = lambda x: PyOpt_Problem(problem, x) inp = problem.optimization_problem.inputs obj = problem.optimization_problem.objective con = problem.optimization_problem.constraints if FD == 'parallel': from mpi4py import MPI comm = MPI.COMM_WORLD myrank = comm.Get_rank() # Instantiate the problem and set objective try: import pyoptsparse as pyOpt except: raise ImportError('No version of pyOptsparse found') opt_prob = pyOpt.Optimization('SUAVE', mywrap) for ii in range(len(obj)): opt_prob.addObj(obj[ii, 0]) # Set inputs nam = inp[:, 0] # Names ini = inp[:, 1] # Initials bnd = inp[:, 2] # Bounds scl = inp[:, 3] # Scale typ = inp[:, 4] # Type # Pull out the constraints and scale them bnd_constraints = help_fun.scale_const_bnds(con) scaled_constraints = help_fun.scale_const_values(con, bnd_constraints) x = ini / scl for ii in range(0, len(inp)): lbd = (bnd[ii][0] / scl[ii]) ubd = (bnd[ii][1] / scl[ii]) #if typ[ii] == 'continuous': vartype = 'c' #if typ[ii] == 'integer': #vartype = 'i' opt_prob.addVar(nam[ii], vartype, lower=lbd, upper=ubd, value=x[ii]) # Setup constraints for ii in range(0, len(con)): name = con[ii][0] edge = scaled_constraints[ii] if con[ii][1] == '<': opt_prob.addCon(name, upper=edge) elif con[ii][1] == '>': opt_prob.addCon(name, lower=edge) elif con[ii][1] == '=': opt_prob.addCon(name, lower=edge, upper=edge) # Finalize problem statement and run print(opt_prob) if solver == 'SNOPT': opt = pyOpt.SNOPT() CD_step = (sense_step**2.)**(1. / 3. ) #based on SNOPT Manual Recommendations opt.setOption('Function precision', sense_step**2.) opt.setOption('Difference interval', sense_step) opt.setOption('Central difference interval', CD_step) elif solver == 'SLSQP': opt = pyOpt.SLSQP() elif solver == 'FSQP': opt = pyOpt.FSQP() elif solver == 'PSQP': opt = pyOpt.PSQP() elif solver == 'NSGA2': opt = pyOpt.NSGA2(pll_type='POA') elif solver == 'ALPSO': #opt = pyOpt.pyALPSO.ALPSO(pll_type='DPM') #this requires DPM, which is a parallel implementation opt = pyOpt.ALPSO() elif solver == 'CONMIN': opt = pyOpt.CONMIN() elif solver == 'IPOPT': opt = pyOpt.IPOPT() elif solver == 'NLPQLP': opt = pyOpt.NLQPQLP() elif solver == 'NLPY_AUGLAG': opt = pyOpt.NLPY_AUGLAG() if nonderivative_line_search == True: opt.setOption('Nonderivative linesearch') if FD == 'parallel': outputs = opt(opt_prob, sens='FD', sensMode='pgc') elif solver == 'SNOPT' or solver == 'SLSQP': outputs = opt(opt_prob, sens='FD', sensStep=sense_step) else: outputs = opt(opt_prob) return outputs
subject to mu_CL = 0.5 """ uq_systemsize = 2 ndv = 5 UQObj = UQOASExample1Opt(uq_systemsize) collocation_obj = StochasticCollocation(5, "Normal") collocation_con = collocation_obj collocation_grad_obj = StochasticCollocation(5, "Normal", QoI_dimensions=ndv) collocation_grad_con = collocation_grad_obj optProb = pyoptsparse.Optimization('UQ_OASExample1', objfunc) optProb.addVarGroup('xvars', ndv, 'c', lower=-10., upper=15.) optProb.addConGroup('con', 1, lower=0.5, upper=0.5) optProb.addObj('obj') opt = pyoptsparse.SNOPT(optOptions={'Major feasibility tolerance': 1e-10}) sol = opt(optProb, sens=sens) # sol = opt(optProb, sens='FD') # Error Calculation full_integration_val = 0.03428059998452251 reduced_fval = sol.fStar err = abs(full_integration_val - reduced_fval) rel_err = abs((full_integration_val - reduced_fval) / full_integration_val) print sol print "integration val = ", sol.fStar print "error val = ", err print "rel_err val = ", rel_err
def init_problem(self): px = np.zeros(6) + 1e-4 py = np.zeros(6) + 1e-4 self.n = 100 self.t = np.linspace(0, 15, self.n) self.tpos_exp = self.t**np.arange(6).reshape( 6, 1) # exponents for trajectory equation self.tvel_exp = self.t**np.array([0, 0, 1, 2, 3, 4]).reshape( 6, 1) # exponents for veloc terms self.tacc_exp = self.t**np.array([0, 0, 0, 1, 2, 3]).reshape( 6, 1) # exponents for accel terms self.tjerk_exp = self.t**np.array([0, 0, 0, 0, 1, 2]).reshape( 6, 1) # exponents for each jerk term self.cvel = np.arange( 6, dtype=np.float64) # Constant mulipliers for each velocity term self.cacc = np.array( [0, 0, 2, 6, 12, 20], dtype=np.float64) # Constant mulipliers for each acceleration term self.cjerk = np.array( [0, 0, 0, 6, 24, 60], dtype=np.float64) # Constant multipliers for each jerk term # Optimization problem opt_prob: pyop.Optimization = pyop.Optimization( 'differential_flat', self.objfunc) # Design variables opt_prob.addVarGroup('px', nVars=6, type='c', value=px, lower=None, upper=None) opt_prob.addVarGroup('py', nVars=6, type='c', value=py, lower=None, upper=None) x0, y0, xf, yf = [0., 0., 10., 0.] vx0, vy0, vxf, vyf = [0., 2., 0., 1.] self.L = 1.5 # length of car gam_max = np.pi / 4 self.vmax_square = 10**2 # vmax = 10 m/s self.amax_square = 2**2 # amax = 2 m/s**2 #### CONSTRAINTS #### # start and finish constraints opt_prob.addConGroup('initial pos', nCon=2, lower=[x0, y0], upper=[x0, y0]) opt_prob.addConGroup('initial vel', nCon=2, lower=[vx0, vy0], upper=[vx0, vy0]) opt_prob.addConGroup('final pos', nCon=2, lower=[xf, yf], upper=[xf, yf]) opt_prob.addConGroup('final vel', nCon=2, lower=[vxf, vyf], upper=[vxf, vyf]) # constraints over entire trajectory opt_prob.addConGroup('v', nCon=self.n, lower=0, upper=self.vmax_square) opt_prob.addConGroup('a', nCon=self.n, lower=0, upper=self.amax_square) # opt_prob.addConGroup('gam_max', nCon=self.n, lower=-gam_max, upper=gam_max) opt_prob.addConGroup('gam_plus', nCon=self.n, lower=0, upper=None) opt_prob.addConGroup('gam_minus', nCon=self.n, lower=0, upper=None) # Assign the key value for the objective function opt_prob.addObj('obj-min-jerk') # Optimizer optimizer = pyop.SNOPT() # optimizer.setOption('iPrint',0) # optimizer.setOption('iSumm', 0) # path = '/home/seth/school/optimization/output/' # optimizer.setOption('Print file', path+f'SNOPT_print-{n}.out') # optimizer.setOption('Summary file', path+f'SNOPT_summary-{n}.out') return opt_prob, optimizer
def __init__(self): # Plotting histories p0 = np.zeros(6) + 1e-4 self.n = 100 self.t = np.linspace(0, 15, self.n) self.tpos_exp = self.t**np.arange(6).reshape( 6, 1) # exponents for trajectory equation self.tvel_exp = self.t**np.array([0, 0, 1, 2, 3, 4]).reshape( 6, 1) # exponents for veloc terms self.tacc_exp = self.t**np.array([0, 0, 0, 1, 2, 3]).reshape( 6, 1) # exponents for accel terms self.tjerk_exp = self.t**np.array([0, 0, 0, 0, 1, 2]).reshape( 6, 1) # exponents for each jerk term self.cvel = np.arange( 6, dtype=np.float64) # Constant mulipliers for each velocity term self.cacc = np.array( [0, 0, 2, 6, 12, 20], dtype=np.float64) # Constant mulipliers for each acceleration term self.cjerk = np.array( [0, 0, 0, 6, 24, 60], dtype=np.float64) # Constant multipliers for each jerk term # Optimization problem opt_prob: pyop.Optimization = pyop.Optimization( 'differential_flat_quad', self.objfunc) # Design variables opt_prob.addVarGroup('px', nVars=6, type='c', value=p0, lower=None, upper=None) opt_prob.addVarGroup('py', nVars=6, type='c', value=p0, lower=None, upper=None) opt_prob.addVarGroup('pz', nVars=6, type='c', value=p0, lower=None, upper=None) opt_prob.addVarGroup('ps', nVars=6, type='c', value=p0, lower=None, upper=None) # ENU # [ x, y, z, psi ] pos0 = np.array([0., 0., 0., 0.]) posf = np.array([0., 10., 10., 0.]) vel0 = np.array([0., 2., 2., 0.]) velf = np.array([0., 2., 0., 0.]) # x0, y0, z0, psi0 = [0., 0., 0., 0.] # xf, yf, zf, psif = [0., 10., 10., 0.] # vx0, vy0, vz0, w0 = [0., 2., 2., 0.] # vxf, vyf, vzf, wf = [0., 2., 0., 0.] self.L = 1.5 # length of car gam_max = np.pi / 4 self.vmax_square = 10**2 # vmax = 10 m/s self.amax_square = 2**2 # amax = 2 m/s**2 #### CONSTRAINTS #### # start and finish constraints opt_prob.addConGroup('initial pos', nCon=4, lower=pos0, upper=pos0) opt_prob.addConGroup('initial vel', nCon=4, lower=vel0, upper=vel0) opt_prob.addConGroup('final pos', nCon=4, lower=posf, upper=posf) opt_prob.addConGroup('final vel', nCon=4, lower=velf, upper=velf) # constraints over entire trajectory # opt_prob.addConGroup('v', nCon=self.n, lower=0, upper=self.vmax_square) # opt_prob.addConGroup('a', nCon=self.n, lower=0, upper=self.amax_square) # opt_prob.addConGroup('gam_max', nCon=self.n, lower=-gam_max, upper=gam_max) # opt_prob.addConGroup('gam_plus', nCon=self.n, lower=0, upper=None) # opt_prob.addConGroup('gam_minus', nCon=self.n, lower=0, upper=None) # Assign the key value for the objective function opt_prob.addObj('obj-min-jerk') # Optimizer optimizer = pyop.SNOPT() # optimizer.setOption('iPrint',0) # optimizer.setOption('iSumm', 0) path = '/home/seth/school/optimization/output/' optimizer.setOption('Print file', path + f'SNOPT-hw5.out') optimizer.setOption('Summary file', path + f'SNOPT-hw5-summary.out') self.opt_prob: pyop.Optimization = opt_prob self.optimizer: pyop.SNOPT = optimizer
upper=0.01, scale=1.e3, value=sc_sol_dict[dict_val]['thickness_cp']) optProb.addVar('sweep', lower=10., upper=30., value=sc_sol_dict[dict_val]['sweep']) optProb.addVar('alpha', lower=-10., upper=10., value=sc_sol_dict[dict_val]['alpha']) # Constraints optProb.addConGroup('con_failure', 1, upper=0.) optProb.addConGroup('con_L_equals_W', 1, lower=0., upper=0.) optProb.addConGroup('con_thickness_intersects', n_thickness_intersects, upper=0., wrt=['thickness_cp']) optProb.addConGroup('con_CM', n_CM, lower=-0.001, upper=0.001) optProb.addConGroup('con_twist_cp', 3, lower=np.array([-1e20, -1e20, 5.]), upper=np.array([1e20, 1e20, 5.]), wrt=['twist_cp']) # Objective optProb.addObj('obj', scale=1.e-1) opt = pyoptsparse.SNOPT(options = {'Major feasibility tolerance' : 1e-9, 'Verify level' : -1}) sol = opt(optProb, sens=sens_uq) end_time = time.time() elapsed_time = end_time - start_time print(sol) print(sol.fStar) print() print("twist = ", UQObj.QoI.p['oas_scaneagle.wing.geometry.twist']) print("thickness =", UQObj.QoI.p['oas_scaneagle.wing.thickness']) print('\nthickness_cp = ', UQObj.QoI.p['oas_scaneagle.wing.thickness_cp']) print('twist_cp = ', UQObj.QoI.p['oas_scaneagle.wing.twist_cp']) print("sweep = ", UQObj.QoI.p['oas_scaneagle.wing.sweep']) print("aoa = ", UQObj.QoI.p['oas_scaneagle.alpha']) print()
conval = [0] * 2 conval[0] = x[0] + 2. * x[1] + 2. * x[2] - 72.0 conval[1] = -x[0] - 2. * x[1] - 2. * x[2] funcs['con'] = conval fail = False return funcs, fail optProb: pyop.Optimization = pyop.Optimization('TP037', objfunc) # Design variables - can be created individually or as a group optProb.addVarGroup('xvars', nVars=3, type='c', lower=[0, 0, 0], upper=[42, 42, 42], value=10) # Constraints - also individually or group optProb.addConGroup('con', nCon=2, lower=None, upper=0.0) optProb.addObj('obj') print(optProb) opt = pyop.SNOPT() opt.setOption('iPrint', 0) opt.setOption('iSumm', 0) sol = opt(optProb, sens='FD') print(sol)