def __call__(self, optimizer, options=None): """ Run optimization """ system = self._system variables = self._variables opt_prob = OptProblem('Optimization', self.obj_func) for dv_name in variables['dv'].keys(): dv_id = variables['dv'][dv_name]['ID'] value = variables['dv'][dv_name]['value'] lower = variables['dv'][dv_name]['lower'] upper = variables['dv'][dv_name]['upper'] size = system(dv_id).size opt_prob.addVarGroup(dv_name, size, value=value, lower=lower, upper=upper) opt_prob.finalizeDesignVariables() for func_name in variables['func'].keys(): lower = variables['func'][func_name]['lower'] upper = variables['func'][func_name]['upper'] if lower is None and upper is None: opt_prob.addObj(func_name) else: opt_prob.addCon(func_name, lower=lower, upper=upper) if options is None: options = {} opt = Optimizer(optimizer, options=options) sol = opt(opt_prob, sens=self.sens_func) print sol
def __call__(self, optimizer, options=None): """ Run optimization """ system = self._system variables = self._variables opt_prob = OptProblem('Optimization', self.obj_func) for dv_name in variables['dv'].keys(): dv = variables['dv'][dv_name] dv_id = dv['ID'] value = dv['value'] lower = dv['lower'] upper = dv['upper'] size = system.vec['u'](dv_id).shape[0] opt_prob.addVarGroup(dv_name, size, value=value, lower=lower, upper=upper) opt_prob.finalizeDesignVariables() for func_name in variables['func'].keys(): func = variables['func'][func_name] func_id = func['ID'] lower = func['lower'] upper = func['upper'] linear = func['linear'] get_jacs = func['get_jacs'] size = system.vec['u'](func_id).shape[0] if lower is None and upper is None: opt_prob.addObj(func_name) else: if func['get_jacs'] is None: opt_prob.addConGroup(func_name, size, lower=lower, upper=upper) else: jacs_var = get_jacs() dv_names = [] jacs = {} for dv_var in jacs_var: dv_id = self._system.get_id(dv_var) dv_name = self._get_name(dv_id) dv_names.append(dv_name) jacs[dv_name] = jacs_var[dv_var] opt_prob.addConGroup(func_name, size, wrt=dv_names, jac=jacs, linear=linear, lower=lower, upper=upper) if options is None: options = {} opt = Optimizer(optimizer, options=options) opt.setOption('Iterations limit', int(1e6)) #opt.setOption('Verify level', 3) sol = opt(opt_prob, sens=self.sens_func, storeHistory='hist.hst') print sol
dominant_space = DimensionReduction(threshold_factor, exact_Hessian=True) dominant_space.getDominantDirections(QoI, jdist) QoI_func = QoI.eval_QoIGradient funcsSens = {} # funcsSens['obj', 'xvars'] = collocation.normal.mean(mu, sigma, QoI_func) funcsSens['obj', 'xvars'] = collocation.normal.reduced_mean( QoI_func, jdist, dominant_space) fail = False return funcsSens, fail # Optimization Object optProb = Optimization('Paraboloid', objfunc) lower_bound = -20 * np.ones(3) upper_bound = 20 * np.ones(3) optProb.addVarGroup('xvars', 3, 'c', lower=lower_bound, upper=upper_bound, value=10 * np.ones(3)) optProb.addObj('obj') # Optimizer opt = SNOPT(optOptions={'Major feasibility tolerance': 1e-1}) sol = opt(optProb, sens=sens) # Check Solution print(sol)
def optimize(self, sparse=True, tol=None, optOptions={}, storeHistory=False): # set N if sparse: self.N = 50000 else: self.N = 500 # Optimization Object optProb = Optimization("large and sparse", self.objfunc) # Design Variables optProb.addVar("x", lower=-100, upper=150, value=0) optProb.addVarGroup("y", self.N, lower=-10 - np.arange(self.N), upper=np.arange(self.N), value=0) optProb.addVarGroup("z", 2 * self.N, upper=np.arange(2 * self.N), lower=-100 - np.arange(2 * self.N), value=0) # Constraints optProb.addCon("con1", upper=100, wrt=["x"]) optProb.addCon("con2", upper=100) optProb.addCon("con3", lower=4, wrt=["x", "z"]) xJac = np.ones((self.N, 1)) if sparse: yJac = scipy.sparse.spdiags(np.ones(self.N), 0, self.N, self.N) else: yJac = np.eye(self.N) optProb.addConGroup( "lincon", self.N, lower=2 - 3 * np.arange(self.N), linear=True, wrt=["x", "y"], jac={ "x": xJac, "y": yJac }, ) optProb.addObj("obj") # Optimizer try: opt = SNOPT(options=optOptions) except Error: raise unittest.SkipTest("Optimizer not available: SNOPT") sol = opt(optProb, sens=self.sens) # Check Solution if tol is not None: if opt.version != "7.7.7": assert_allclose(sol.objectives["obj"].value, 10.0, atol=tol, rtol=tol) else: assert_allclose(sol.fStar, 10.0, atol=tol, rtol=tol) assert_allclose(sol.variables["x"][0].value, 2.0, atol=tol, rtol=tol) return sol
x = xx['xvars'] funcs = {} funcs['obj'] = -x[0]*x[1]*x[2] conval = [0]*2 conval[0] = x[0] + 2.*x[1] + 2.*x[2] - 72.0 conval[1] = -x[0] - 2.*x[1] - 2.*x[2] funcs['con'] = conval fail = False return funcs, fail # Optimization Object optProb = Optimization('TP037 Constraint Problem', objfunc) # Design Variables optProb.addVarGroup('xvars', 3, 'c',lower=[0,0,0], upper=[42,42,42], value=10) # Constraints optProb.addConGroup('con', 2, lower=None, upper=0.0) # Objective optProb.addObj('obj') # Check optimization problem: print(optProb) # Optimizer opt = OPT(args.opt, options=optOptions) # Solution sol = opt(optProb, sens='CS')
funcs = {} funcs["obj"] = -x[0] * x[1] * x[2] convec = [0] * 2 convec[0] = x[0] + 2.0 * x[1] + 2.0 * x[2] - 72.0 convec[1] = -x[0] - 2.0 * x[1] - 2.0 * x[2] funcs["con"] = convec fail = False return funcs, fail optProb = Optimization("Quickstart - TP037 Constraint Problem", objconfunc) optProb.addVarGroup("xvars", 3, "c", lower=[0, 0, 0], upper=[42, 42, 42], value=10) optProb.addConGroup("con", 2, lower=None, upper=0.0) optProb.addObj("obj") print(optProb) optOptions = {"iPrint": -1} opt = OPT("SNOPT", options=optOptions) sol = opt(optProb, sens="FD") print(sol)
mens0[-1] = 0. sdv1s0[-1] = 0.1 sdv2s0[-1] = 0.5 sdv3s0[-1] = 10. sdv4s0[-1] = 0.5 rats0[-1] = 10. tnss0[-1] = 10. spr1s0[-1] = 0.1 spr2s0[-1] = 0.5 spr3s0[-1] = 10. spr4s0[-1] = 0.5 scl1s0[-1] = 0.1 scl2s0[-1] = 0.5 scl3s0[-1] = 10. optProb.addVarGroup('ment', 3, 'c', lower=None, upper=None, value=ment0) optProb.addVarGroup('sdv1t', ordt, 'c', lower=None, upper=None, value=sdv1t0) optProb.addVarGroup('sdv2t', ordt, 'c', lower=None, upper=None, value=sdv2t0) optProb.addVarGroup('sdv3t', ordt, 'c', lower=None, upper=None, value=sdv3t0) optProb.addVarGroup('sdv4t', ordt, 'c', lower=None, upper=None, value=sdv4t0) optProb.addVarGroup('ratt', ordt, 'c', lower=None, upper=None, value=ratt0) optProb.addVarGroup('tnst', ordt, 'c', lower=None, upper=None, value=tnst0) optProb.addVarGroup('spr1t', ordt, 'c', lower=None, upper=None, value=spr1t0) optProb.addVarGroup('spr2t', ordt, 'c', lower=None, upper=None, value=spr2t0) optProb.addVarGroup('spr3t', ordt, 'c', lower=None, upper=None, value=spr3t0) optProb.addVarGroup('spr4t', ordt, 'c', lower=None, upper=None, value=spr4t0) optProb.addVarGroup('scl1t', ordt, 'c', lower=None, upper=None, value=scl1t0) optProb.addVarGroup('scl2t', ordt, 'c', lower=None, upper=None, value=scl2t0) optProb.addVarGroup('scl3t', ordt, 'c', lower=None, upper=None, value=scl3t0) optProb.addVarGroup('mens', ords, 'c', lower=None, upper=None, value=mens0)
def optimize(self, optName, tol, optOptions={}, storeHistory=False, hotStart=None, x0=[-2, 1.0]): self.nf = 0 # number of function evaluations self.ng = 0 # number of gradient evaluations # Optimization Object optProb = Optimization("HS15 Constraint Problem", self.objfunc) # Design Variables lower = [-5.0, -5.0] upper = [0.5, 5.0] optProb.addVarGroup("xvars", 2, lower=lower, upper=upper, value=x0) # Constraints lower = [1.0, 0.0] upper = [None, None] optProb.addConGroup("con", 2, lower=lower, upper=upper) # Objective optProb.addObj("obj") # Check optimization problem: print(optProb) # Optimizer try: opt = OPT(optName, options=optOptions) except Error: raise unittest.SkipTest("Optimizer not available:", optName) # Solution if storeHistory is not None: if storeHistory is True: self.histFileName = "%s_hs015_Hist.hst" % (optName.lower()) elif isinstance(storeHistory, str): self.histFileName = storeHistory else: self.histFileName = None sol = opt(optProb, sens=self.sens, storeHistory=self.histFileName, hotStart=hotStart) # Test printing solution to screen print(sol) # Check Solution self.fStar1 = 306.5 self.fStar2 = 360.379767 self.xStar1 = (0.5, 2.0) self.xStar2 = (-0.79212322, -1.26242985) dv = sol.getDVs() sol_xvars = [sol.variables["xvars"][i].value for i in range(2)] assert_allclose(sol_xvars, dv["xvars"], atol=tol, rtol=tol) # we check either optimum via try/except try: if optName == "SNOPT" and opt.version != "7.7.7": assert_allclose(sol.objectives["obj"].value, self.fStar1, atol=tol, rtol=tol) else: assert_allclose(sol.fStar, self.fStar1, atol=tol, rtol=tol) assert_allclose(dv["xvars"], self.xStar1, atol=tol, rtol=tol) except AssertionError: if optName == "SNOPT" and opt.version != "7.7.7": assert_allclose(sol.objectives["obj"].value, self.fStar2, atol=tol, rtol=tol) else: assert_allclose(sol.fStar, self.fStar2, atol=tol, rtol=tol) assert_allclose(dv["xvars"], self.xStar2, atol=tol, rtol=tol)
class TestHS15(OptTest): ## Solve test problem HS15 from the Hock & Schittkowski collection. # # min 100 (x2 - x1^2)^2 + (1 - x1)^2 # s.t. x1 x2 >= 1 # x1 + x2^2 >= 0 # x1 <= 0.5 # # The standard start point (-2, 1) usually converges to the standard # minimum at (0.5, 2.0), with final objective = 306.5. # Sometimes the solver converges to another local minimum # at (-0.79212, -1.26243), with final objective = 360.4. ## name = "HS015" DVs = {"xvars"} cons = {"con"} objs = {"obj"} extras = {"extra1", "extra2"} fStar = [ 306.5, 360.379767, ] xStar = [ { "xvars": (0.5, 2.0) }, { "xvars": (-0.79212322, -1.26242985) }, ] tol = { "SLSQP": 1e-8, "NLPQLP": 1e-12, "IPOPT": 1e-4, "ParOpt": 1e-6, "CONMIN": 1e-10, "PSQP": 5e-12, } optOptions = {} def objfunc(self, xdict): self.nf += 1 x = xdict["xvars"] funcs = {} funcs["obj"] = [100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2] conval = np.zeros(2, "D") conval[0] = x[0] * x[1] conval[1] = x[0] + x[1]**2 funcs["con"] = conval # extra keys funcs["extra1"] = 0.0 funcs["extra2"] = 1.0 fail = False return funcs, fail def sens(self, xdict, funcs): self.ng += 1 x = xdict["xvars"] funcsSens = {} funcsSens["obj"] = { "xvars": [ 2 * 100 * (x[1] - x[0]**2) * (-2 * x[0]) - 2 * (1 - x[0]), 2 * 100 * (x[1] - x[0]**2) ] } funcsSens["con"] = {"xvars": [[x[1], x[0]], [1, 2 * x[1]]]} fail = False return funcsSens, fail def setup_optProb(self): # Optimization Object self.optProb = Optimization("HS15 Constraint Problem", self.objfunc) # Design Variables lower = [-5.0, -5.0] upper = [0.5, 5.0] value = [-2, 1.0] self.optProb.addVarGroup("xvars", 2, lower=lower, upper=upper, value=value) # Constraints lower = [1.0, 0.0] upper = [None, None] self.optProb.addConGroup("con", 2, lower=lower, upper=upper) # Objective self.optProb.addObj("obj") def test_snopt(self): self.optName = "SNOPT" self.setup_optProb() store_vars = [ "step", "merit", "feasibility", "optimality", "penalty", "Hessian", "condZHZ", "slack", "lambda" ] optOptions = {"Save major iteration variables": store_vars} self.optimize_with_hotstart(1e-12, optOptions=optOptions) hist = History(self.histFileName, flag="r") data = hist.getValues(callCounters=["last"]) keys = hist.getIterKeys() self.assertIn("isMajor", keys) self.assertEqual(7, data["nMajor"]) for var in store_vars: self.assertIn(var, data.keys()) self.assertEqual(data["Hessian"].shape, (1, 2, 2)) self.assertEqual(data["feasibility"].shape, (1, 1)) self.assertEqual(data["slack"].shape, (1, 2)) self.assertEqual(data["lambda"].shape, (1, 2)) # dv = sol.getDVs() # sol_xvars = [sol.variables["xvars"][i].value for i in range(2)] # assert_allclose(sol_xvars, dv["xvars"], atol=tol, rtol=tol) @parameterized.expand(["SLSQP", "PSQP", "CONMIN", "NLPQLP", "ParOpt"]) def test_optimization(self, optName): self.optName = optName self.setup_optProb() optOptions = self.optOptions.pop(optName, None) sol = self.optimize(optOptions=optOptions) # Check Solution self.assert_solution_allclose(sol, self.tol[optName]) # Check informs self.assert_inform_equal(sol) def test_ipopt(self): self.optName = "IPOPT" self.setup_optProb() optOptions = self.optOptions.pop(self.optName, None) sol = self.optimize(optOptions=optOptions, storeHistory=True) # Check Solution self.assert_solution_allclose(sol, self.tol[self.optName]) # Check informs self.assert_inform_equal(sol) # Check iteration counters hist = History(self.histFileName, flag="r") data_init = hist.read(0) self.assertEqual(0, data_init["iter"]) data_last = hist.read(hist.read("last")) self.assertEqual(11, data_last["iter"] ) # took 12 function evaluations (see test_ipopt.out) # Make sure there is no duplication in objective history data = hist.getValues(names=["obj"]) objhis_len = data["obj"].shape[0] self.assertEqual(12, objhis_len) for i in range(objhis_len - 1): self.assertNotEqual(data["obj"][i], data["obj"][i + 1])
def fit(s,t,length,plot,comp,read_data,opt_print): global xd global pos1tr global pos2tr global pos3tr global pos4tr global pos5tr global pos6tr global pos7tr global pos8tr global pos9tr global pos10tr global pos11tr global pos12tr global pos13tr global pos14tr global pos15tr global pos16tr global pos17tr global pos18tr global pos19tr global pos20tr global pos21tr global pos22tr global pos23tr global pos24tr global pos25tr global pos26tr global pos27tr global pos28tr global pos29tr global pos30tr global velo1tr global velo2tr global velo3tr global velo4tr global velo5tr global velo6tr global velo7tr global velo8tr global velo9tr global velo10tr global velo11tr global velo12tr global velo13tr global velo14tr global velo15tr global velo16tr global velo17tr global velo18tr global velo19tr global velo20tr global velo21tr global velo22tr global velo23tr global velo24tr global velo25tr global velo26tr global velo27tr global velo28tr global velo29tr global velo30tr t2 = t+'.0' wfit = s+'_'+t2 wfit2 = s+'_'+t2 wfit3 = s+'_'+t2 wfit4 = s+'_'+t2 wfit5 = s+'_'+t2 wfit6 = s+'_'+t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15. wind2 = 14. wind3 = 12. wind4 = 16. rad = 3. dia = rad*2. tsr = float(wfit[3]+'.'+wfit[4]+wfit[5]) rot = tsr*wind/rad rot2 = tsr*wind2/rad rot3 = tsr*wind3/rad rot4 = tsr*wind4/rad rot5 = 17. rot6 = 18. wind5 = rot5*rad/tsr wind6 = rot6*rad/tsr if comp == 'mac': # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/'+wfit+'.csv' fdata2 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/'+wfit6+'.csv' elif comp == 'fsl': fdata = '/fslhome/ebtingey/compute/moveForward/Velocity/'+wfit+'.csv' fdata2 = '/fslhome/ebtingey/compute/moveForward/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/fslhome/ebtingey/compute/moveForward/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/fslhome/ebtingey/compute/moveForward/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/fslhome/ebtingey/compute/moveForward/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/fslhome/ebtingey/compute/moveForward/rot18/Velocity/'+wfit6+'.csv' if read_data ==1: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata]),dia,np.array([wind]),opt_print) if read_data ==2: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),opt_print) if read_data ==3: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),opt_print) if read_data ==4: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),opt_print) if read_data ==5: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),opt_print) if read_data ==6: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),opt_print) start = length/30. xd = np.linspace(start,length,30)/dia cvtest = 0.3 pos1tr,pos1ts,velo1tr,velo1ts = train_test_split(pos1d,velo1d,test_size=cvtest) pos2tr,pos2ts,velo2tr,velo2ts = train_test_split(pos2d,velo2d,test_size=cvtest) pos3tr,pos3ts,velo3tr,velo3ts = train_test_split(pos3d,velo3d,test_size=cvtest) pos4tr,pos4ts,velo4tr,velo4ts = train_test_split(pos4d,velo4d,test_size=cvtest) pos5tr,pos5ts,velo5tr,velo5ts = train_test_split(pos5d,velo5d,test_size=cvtest) pos6tr,pos6ts,velo6tr,velo6ts = train_test_split(pos6d,velo6d,test_size=cvtest) pos7tr,pos7ts,velo7tr,velo7ts = train_test_split(pos7d,velo7d,test_size=cvtest) pos8tr,pos8ts,velo8tr,velo8ts = train_test_split(pos8d,velo8d,test_size=cvtest) pos9tr,pos9ts,velo9tr,velo9ts = train_test_split(pos9d,velo9d,test_size=cvtest) pos10tr,pos10ts,velo10tr,velo10ts = train_test_split(pos10d,velo10d,test_size=cvtest) pos11tr,pos11ts,velo11tr,velo11ts = train_test_split(pos11d,velo11d,test_size=cvtest) pos12tr,pos12ts,velo12tr,velo12ts = train_test_split(pos12d,velo12d,test_size=cvtest) pos13tr,pos13ts,velo13tr,velo13ts = train_test_split(pos13d,velo13d,test_size=cvtest) pos14tr,pos14ts,velo14tr,velo14ts = train_test_split(pos14d,velo14d,test_size=cvtest) pos15tr,pos15ts,velo15tr,velo15ts = train_test_split(pos15d,velo15d,test_size=cvtest) pos16tr,pos16ts,velo16tr,velo16ts = train_test_split(pos16d,velo16d,test_size=cvtest) pos17tr,pos17ts,velo17tr,velo17ts = train_test_split(pos17d,velo17d,test_size=cvtest) pos18tr,pos18ts,velo18tr,velo18ts = train_test_split(pos18d,velo18d,test_size=cvtest) pos19tr,pos19ts,velo19tr,velo19ts = train_test_split(pos19d,velo19d,test_size=cvtest) pos20tr,pos20ts,velo20tr,velo20ts = train_test_split(pos20d,velo20d,test_size=cvtest) pos21tr,pos21ts,velo21tr,velo21ts = train_test_split(pos21d,velo21d,test_size=cvtest) pos22tr,pos22ts,velo22tr,velo22ts = train_test_split(pos22d,velo22d,test_size=cvtest) pos23tr,pos23ts,velo23tr,velo23ts = train_test_split(pos23d,velo23d,test_size=cvtest) pos24tr,pos24ts,velo24tr,velo24ts = train_test_split(pos24d,velo24d,test_size=cvtest) pos25tr,pos25ts,velo25tr,velo25ts = train_test_split(pos25d,velo25d,test_size=cvtest) pos26tr,pos26ts,velo26tr,velo26ts = train_test_split(pos26d,velo26d,test_size=cvtest) pos27tr,pos27ts,velo27tr,velo27ts = train_test_split(pos27d,velo27d,test_size=cvtest) pos28tr,pos28ts,velo28tr,velo28ts = train_test_split(pos28d,velo28d,test_size=cvtest) pos29tr,pos29ts,velo29tr,velo29ts = train_test_split(pos29d,velo29d,test_size=cvtest) pos30tr,pos30ts,velo30tr,velo30ts = train_test_split(pos30d,velo30d,test_size=cvtest) ## Optimization optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') param0 = np.array([2.91638655e-04, -1.70286993e-03 , 2.38051673e-02 , -7.65610623e-01,6.40509205e-02 , 6.99046413e-01, 7.83484187e-01 , 4.55408268e-01, 1.18716383e-01 , 2.05484572e+01 , -2.67741935e+00 , 4.43022575e+01,-2.10925147e+00 , 3.30400554e+01]) param_l = np.array([-1.,-1,-1.,-1.,-1,-1.,0.,0.,0.,None,0.,None,0.]) param_u = np.array([1.,1.,1.,1.,1.,1.,None,None,None,0.,None,0.,None]) nparam = np.size(param0) optProb.addVarGroup('param', nparam, 'c', lower=None, upper=None, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary'+s+'_'+t+'.out') res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar['param'] if opt_print == True: print paramf men = np.array([paramf[0],paramf[1],paramf[2]]) spr = np.array([paramf[3],paramf[4],paramf[5],paramf[6]]) scl = np.array([paramf[7],paramf[8],paramf[9]]) rat = np.array([paramf[10],paramf[11]]) tns = np.array([paramf[12],paramf[13]]) cv_error = 0. for i in range(30): name = str(i+1) ind = str(i) exec('if xd['+ind+'] > 0.58:\n\tmen_v,spr_v,scl_v,rat_v,tns_v = paramfit(xd['+ind+'],men,spr,scl,rat,tns)\n\tfor j in range(np.size(pos'+name+'ts)):\n\t\tvel = (-scl_v/(spr_v*sqrt(2.*pi))*exp(-(pos'+name+'ts[j]+men_v)**2/(2.*spr_v**2)))*(1./(1. + exp(rat_v*fabs(pos'+name+'ts[j])-tns_v))) + 1.\n\t\tcv_error = cv_error + (vel-velo'+name+'ts[j])**2') if plot == True: for i in range(30): name = str(i+1) plt.figure(1) plt.subplot(5,6,i+1) color = 'bo' exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') men_v,spr_v,scl_v,rat_v,tns_v = paramfit(xd[i],men,spr,scl,rat,tns) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,tns_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel('Normalized Velocity') plt.ylabel('$y/D$') return men,spr,scl,rat,tns,cv_error
def create_pyopt(analysis, optimizer='snopt', options={}, tol=1e-5): ''' Take the given problem and optimize it with the given optimizer from the pyOptSparse library of optimizers. ''' # Import the optimization problem from pyoptsparse import Optimization, OPT from scipy import sparse class pyOptWrapper: optimizer = None options = {} opt = None prob = None def __init__(self, analysis): self.analysis = analysis def objcon(self, x): self.xcurr = np.array(x['x']) fail, obj, con = self.analysis.evalObjCon(x['x']) funcs = {'objective': obj, 'con': con} return funcs, fail def gobjcon(self, x, funcs): g = np.zeros(x['x'].shape) A = np.zeros((self.analysis.ncon, x['x'].shape[0])) fail = self.analysis.evalObjConGradient(x['x'], g, A) sens = {'objective': {'x': g}, 'con': {'x': A}} return sens, fail # Thin wrapper methods to make this look somewhat like ParOpt def optimize(self): self.opt = OPT(self.optimizer, options=self.options) self.sol = self.opt(self.prob, sens=self.gobjcon) return def setOutputFile(self, fname): if self.optimizer == 'snopt': self.options['Print file'] = fname self.options['Summary file'] = fname + '_summary' self.options['Major optimality tolerance'] = tol elif self.optimizer == 'ipopt': self.options['print_user_options'] = 'yes' self.options['tol'] = tol self.options['nlp_scaling_method'] = 'none' self.options['limited_memory_max_history'] = 25 self.options['bound_relax_factor'] = 0.0 self.options['linear_solver'] = 'ma27' self.options['output_file'] = fname self.options['max_iter'] = 10000 return def setInitBarrierParameter(self, *args): return def getOptimizedPoint(self): return self.xcurr # Set the design variables wrap = pyOptWrapper(analysis) prob = Optimization('topo', wrap.objcon) # Add the linear constraint n = analysis.num_design_vars # Create the sparse matrix for the design variable weights rowp = [0] cols = [] data = [] nrows = analysis.num_elements ncols = analysis.num_design_vars nblock = analysis.num_materials + 1 for i in range(analysis.num_elements): data.append(1.0) cols.append(i * nblock) for j in range(i * nblock + 1, (i + 1) * nblock): data.append(-1.0) cols.append(j) rowp.append(len(cols)) Asparse = {'csr': [rowp, cols, data], 'shape': [nrows, ncols]} lower = np.zeros(analysis.num_elements) upper = np.zeros(analysis.num_elements) prob.addConGroup('lincon', analysis.num_elements, lower=lower, upper=upper, linear=True, wrt=['x'], jac={'x': Asparse}) # Determine the initial variable values and their lower/upper # bounds in the design problem x0 = np.zeros(n) lb = np.zeros(n) ub = np.zeros(n) analysis.getVarsAndBounds(x0, lb, ub) # Set the variable bounds and initial values prob.addVarGroup('x', n, value=x0, lower=lb, upper=ub) # Set the constraints prob.addConGroup('con', analysis.ncon, lower=0.0, upper=None) # Add the objective prob.addObj('objective') # Set the values into the wrapper wrap.optimizer = optimizer wrap.options = options wrap.prob = prob return wrap
"x0": np.array([[2.0 * x0]]), "x1": np.array([[2.0 * x1]]), "x2": np.array([[2.0 * x2, 0]]), "x3": np.array([[2.0 * x3]]), } fail = False return funcsSens, fail # Optimization Object optProb = Optimization("HS071 Constraint Problem", objfunc) # Design Variables x0 = [1.0, 5.0, 5.0, 1.0] optProb.addVarGroup("x0", 1, lower=1, upper=5, value=x0[0]) optProb.addVarGroup("x1", 1, lower=1, upper=5, value=x0[1]) optProb.addVarGroup("x2", 2, lower=1, upper=5, value=x0[2]) optProb.addVarGroup("x3", 1, lower=1, upper=5, value=x0[3]) # Constraints optProb.addConGroup("con1", 1, lower=[25], upper=[1e19]) optProb.addConGroup("con2", 1, lower=[40], upper=[40]) # Objective optProb.addObj("obj") # Check optimization problem: print(optProb) # Optimizer
def fit(s,t,length,plot,comp,read_data,opt_print): global posdntr global poslttr global velodtr t2 = t+'.0' wfit = s+'_'+t2 wfit2 = s+'_'+t2 wfit3 = s+'_'+t2 wfit4 = s+'_'+t2 wfit5 = s+'_'+t2 wfit6 = s+'_'+t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15. wind2 = 14. wind3 = 12. wind4 = 16. rad = 3. dia = rad*2. tsr = float(wfit[3]+'.'+wfit[4]+wfit[5]) rot = tsr*wind/rad rot2 = tsr*wind2/rad rot3 = tsr*wind3/rad rot4 = tsr*wind4/rad rot5 = 17. rot6 = 18. wind5 = rot5*rad/tsr wind6 = rot6*rad/tsr if comp == 'mac': # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/'+wfit+'.csv' fdata2 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/'+wfit6+'.csv' elif comp == 'fsl': fdata = '/fslhome/ebtingey/compute/moveForward/Velocity/'+wfit+'.csv' fdata2 = '/fslhome/ebtingey/compute/moveForward/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/fslhome/ebtingey/compute/moveForward/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/fslhome/ebtingey/compute/moveForward/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/fslhome/ebtingey/compute/moveForward/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/fslhome/ebtingey/compute/moveForward/rot18/Velocity/'+wfit6+'.csv' if read_data ==1: posdn,poslt,velod = starccm_read(np.array([fdata]),dia,np.array([wind]),length,opt_print) if read_data ==2: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),length,opt_print) if read_data ==3: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),length,opt_print) if read_data ==4: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),length,opt_print) if read_data ==5: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),length,opt_print) if read_data ==6: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),length,opt_print) if plot == True: if read_data ==1: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata]),dia,np.array([wind]),opt_print) if read_data ==2: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),opt_print) if read_data ==3: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),opt_print) if read_data ==4: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),opt_print) if read_data ==5: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),opt_print) if read_data ==6: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),opt_print) start = length/30. xd = np.linspace(start,length,30)/dia cvtest = 0.3 posdntr,posdnts,poslttr,posltts,velodtr,velodts = train_test_split(posdn,poslt,velod,test_size=cvtest) ## Optimization optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') men0 = 0. sdv10 = 0.5 sdv20 = 0.1 sdv30 = 10. sdv40 = 0.5 rat0 = 10. spr0 = 10. bow1 = 0.5 bow2 = 0.1 bow3 = 20. bow4 = 1. # bow1 = -1. # bow2 = 1. # bow3 = 1. # bow4 = 1. param0 = np.array([men0,sdv10,sdv20,sdv30,sdv40,rat0,bow4,spr0,bow1,bow2,bow3,0.5,0.1,20.]) param_l = np.array([None,0.,0.,0.,0.,None,0.,None,0.,0.,0.,0.]) param_u = np.array([None,None,None,None,None,0.,None,0.,None,1.,1.,50.]) param_l = np.array([None,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) param_u = np.array([None,10.,1.,50.,None,None,None,None,1.,1.,50.,1.,1.,None]) # param_l = np.array([None,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) # param_u = np.array([None,None,None,None,None,None,None,None,None,None,None,1.,1.,None]) # param_l = np.array([None,0.,0.,0.,0.,0.,None,0.,None,None,None,0.,0.,0.]) # param_u = np.array([None,1.,1.,50.,None,None,None,None,None,None,None,1.,1.,50.]) nparam = np.size(param0) optProb.addVarGroup('param', nparam, 'c', lower=param_l, upper=param_u, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary'+s+'_'+t+'.out') res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar['param'] if opt_print == True: print paramf[0] print paramf[1] print paramf[2] print paramf[3] print paramf[4] print paramf[5] print paramf[6] print paramf[7] print paramf[8] print paramf[9] print paramf[10] print paramf[11] print paramf[12] print paramf[13] men = paramf[0] sdv1 = paramf[1] sdv2 = paramf[2] sdv3 = paramf[3] sdv4 = paramf[4] rat1 = paramf[5] rat2 = paramf[6] spr1 = paramf[7] spr2 = paramf[8] spr3 = paramf[9] spr4 = paramf[10] scl1 = paramf[11] scl2 = paramf[12] scl3 = paramf[13] cv_error = 0. for i in range(np.size(posdnts)): if posdnts[i] > 0.58: vel = veldist(posdnts[i],posltts[i],men,sdv1,sdv2,sdv3,sdv4,rat1,rat2,spr1,spr2,spr3,spr4,scl1,scl2,scl3) cv_error = cv_error + (vel-velodts[i])**2 # men = np.array([paramf[0],paramf[1],paramf[2]]) # spr = np.array([paramf[3],paramf[4],paramf[5],paramf[6]]) # scl = np.array([paramf[7],paramf[8],paramf[9]]) # rat = np.array([paramf[10],paramf[11]]) # spr = np.array([paramf[12],paramf[13]]) # paper = False if plot == True: if paper == True: for i in range(30): name = str(i+1) ind = str(i) plt.figure(1) ax1 = plt.subplot(5,6,i+1) color = 'bo' color2 = 'r-' fs = 15 lab = 'CFD' lab2 = 'Trend' tex = '$x/D$ = '+str("{0:.2f}".format(x[i]/dia)) exec('xfit = np.linspace(min(pos'+name+'/dia)-1.,max(pos'+name+'/dia)+1.,500)') if i == 5: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color,label=lab)') men_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],men,spr,scl,rat,spr) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2,label=lab2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs) else: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') men_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],men,spr,scl,rat,spr) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.text(0.3,0.8,tex,fontsize=fs) if i <= 23: plt.setp(ax1.get_xticklabels(), visible=False) else: plt.xlabel('$y/D$',fontsize=fs) plt.xticks(fontsize=fs) if i == 0 or i == 6 or i == 12 or i == 18 or i ==24: plt.ylabel(r'$u/U_\infty$',fontsize=fs) plt.yticks(fontsize=fs) else: plt.setp(ax1.get_yticklabels(), visible=False) elif paper == False: for i in range(30): name = str(i+1) plt.figure(1) plt.subplot(5,6,i+1) color = 'bo' exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') plt.plot(veldist(xd[i],xfit,men,sdv1,sdv2,sdv3,sdv4,rat1,rat2,spr1,spr2,spr3,spr4,scl1,scl2,scl3),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel('Normalized Velocity') plt.ylabel('$y/D$') return men,sdv1,sdv2,sdv3,sdv4,rat1,rat2,spr1,spr2,spr3,spr4,scl1,scl2,scl3,cv_error
'x0': numpy.array([[2.0 * x0]]), 'x1': numpy.array([[2.0 * x1]]), 'x2': numpy.array([[2.0 * x2, 0]]), 'x3': numpy.array([[2.0 * x3]]) } fail = False return funcsSens, fail # Optimization Object optProb = Optimization('HS071 Constraint Problem', objfunc) # Design Variables x0 = [1.0, 5.0, 5.0, 1.0] optProb.addVarGroup('x0', 1, lower=1, upper=5, value=x0[0]) optProb.addVarGroup('x1', 1, lower=1, upper=5, value=x0[1]) optProb.addVarGroup('x2', 2, lower=1, upper=5, value=x0[2]) optProb.addVarGroup('x3', 1, lower=1, upper=5, value=x0[3]) # Constraints optProb.addConGroup('con1', 1, lower=[25], upper=[1e19]) optProb.addConGroup('con2', 1, lower=[40], upper=[40]) # Objective optProb.addObj('obj') # Check optimization problem: print(optProb) # Optimizer
def __call__(self, optimizer, options=None): """ Run optimization """ system = self._system variables = self._variables opt_prob = OptProblem('Optimization', self.obj_func) for dv_name in variables['dv'].keys(): dv = variables['dv'][dv_name] dv_id = dv['ID'] if dv['value'] is not None: value = dv['value'] else: value = system.vec['u'](dv_id) scale = dv['scale'] lower = dv['lower'] upper = dv['upper'] size = system.vec['u'](dv_id).shape[0] opt_prob.addVarGroup(dv_name, size, value=value, scale=scale, lower=lower, upper=upper) opt_prob.finalizeDesignVariables() for func_name in variables['func'].keys(): func = variables['func'][func_name] func_id = func['ID'] lower = func['lower'] upper = func['upper'] linear = func['linear'] get_jacs = func['get_jacs'] sys = func['sys'] size = system.vec['u'](func_id).shape[0] if lower is None and upper is None: opt_prob.addObj(func_name) else: if get_jacs is not None: jacs_var = get_jacs() dv_names = [] jacs = {} for dv_var in jacs_var: dv_id = self._system.get_id(dv_var) dv_name = self._get_name(dv_id) dv_names.append(dv_name) jacs[dv_name] = jacs_var[dv_var] opt_prob.addConGroup(func_name, size, wrt=dv_names, jac=jacs, linear=linear, lower=lower, upper=upper) elif sys is not None: dv_names = [] for dv_name in variables['dv'].keys(): dv_id = variables['dv'][dv_name]['ID'] if dv_id in sys.vec['u']: dv_names.append(dv_name) opt_prob.addConGroup(func_name, size, wrt=dv_names, lower=lower, upper=upper) else: opt_prob.addConGroup(func_name, size, lower=lower, upper=upper) if options is None: options = {} opt = Optimizer(optimizer, options=options) opt.setOption('Iterations limit', int(1e6)) #opt.setOption('Verify level', 3) sol = opt(opt_prob, sens=self.sens_func, storeHistory='hist.hst') print sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0
def execute(self): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers control the iteration.""" self.pyOpt_solution = None self.run_iteration() opt_prob = Optimization(self.title, self.objfunc) # Add all parameters self.param_type = {} self.nparam = self.total_parameters() param_list = [] #need a counter for lb and ub arrays i_param = 0 for name, param in self.get_parameters().iteritems(): if isinstance(name, tuple): name = name[0] # We need to identify Enums, Lists, Dicts metadata = param.get_metadata()[1] values = param.evaluate() # Assuming uniform enumerated, discrete, or continuous for now. val = values[0] n_vals = len(values) choices = [] if 'values' in metadata and \ isinstance(metadata['values'], (list, tuple, array, set)): vartype = 'd' choices = metadata['values'] elif isinstance(val, bool): vartype = 'd' choices = [True, False] elif isinstance(val, (int, int32, int64)): vartype = 'i' elif isinstance(val, (float, float32, float64)): vartype = 'c' else: msg = 'Only continuous, discrete, or enumerated variables' \ ' are supported. %s is %s.' % (name, type(val)) self.raise_exception(msg, ValueError) self.param_type[name] = vartype if self.n_x is None: lower_bounds = param.get_low() upper_bounds = param.get_high() else: lower_bounds = self.lb[i_param:i_param+n_vals] upper_bounds = self.ub[i_param:i_param+n_vals] i_param += n_vals opt_prob.addVarGroup(name, n_vals, type=vartype, lower=lower_bounds, upper=upper_bounds, value=values, choices=choices) param_list.append(name) # Add all objectives for name, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name opt_prob.addObj(name) # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(linear=True).values() + \ self.get_2sided_constraints(linear=True).values() if len(lcons) > 0: lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons] self.lin_jacs = self.workflow.calc_gradient(param_list, lcon_names, return_format='dict') #print "Linear Gradient" #print self.lin_jacs # Add all equality constraints nlcons = [] for name, con in self.get_eq_constraints().iteritems(): size = con.size lower = zeros((size)) upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper) nlcons.append(name) # Add all inequality constraints for name, con in self.get_ineq_constraints().iteritems(): size = con.size upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper) nlcons.append(name) # Add all double_sided constraints for name, con in self.get_2sided_constraints().iteritems(): size = con.size upper = con.high * ones((size)) lower = con.low * ones((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower) nlcons.append(name) self.objs = self.list_objective_targets() self.nlcons = nlcons # Instantiate the requested optimizer optimizer = self.optimizer try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer self.raise_exception(msg, ImportError) optname = vars()[optimizer] opt = optname() # Set optimization options for option, value in self.options.iteritems(): opt.setOption(option, value) # Execute the optimization problem if self.pyopt_diff: # Use pyOpt's internal finite difference sol = opt(opt_prob, sens='FD', sensStep=self.gradient_options.fd_step) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self.gradfunc) # Print results if self.print_results: print sol # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() param_types = self.param_type for name, param in self.get_parameters().iteritems(): full_name = name if isinstance(name, tuple): name = name[0] val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(full_name, val) self.run_iteration() # Save the most recent solution. self.pyOpt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0
import numpy import argparse from numpy import sin, cos from pyoptsparse import Optimization, SNOPT, ALPSO USE_LINEAR = True from tp109 import objfunc # Import objective from the other file # Optimization Object optProb = Optimization('TP109 Constraint Problem', objfunc) # Design Variables (Removed infinite bounds for ALPSO) lower = [0.0, 0.0, -0.55, -0.55, 196, 196, 196, -400, -400] upper = [2000, 2000, 0.55, 0.55, 252, 252, 252, 800, 800] value = [0, 0, 0, 0, 0, 0, 0, 0, 0] optProb.addVarGroup('xvars', 9, lower=lower, upper=upper, value=value) # Constraints lower = [0, 0, 0, 0, 0, 0, 0, 0] upper = [None, None, 0, 0, 0, 0, 0, 0] if not USE_LINEAR: lower.extend([0, 0]) upper.extend([None, None]) optProb.addConGroup('con', len(lower), lower=lower, upper=upper) # And the 2 linear constriants if USE_LINEAR: jac = numpy.zeros((1, 9)) jac[0, 3] = 1.0 jac[0, 2] = -1.0
# rst Sensitivity function def userfuncsens(xdict, funcs): x = xdict["xvars"] # Extract array funcsSens = {} funcsSens["obj"] = { "xvars": [2 * 100 * (x[1] - x[0] ** 2) * (-2 * x[0]) - 2 * (1 - x[0]), 2 * 100 * (x[1] - x[0] ** 2)] } funcsSens["con"] = {"xvars": [-3 * (x[0] - 1) ** 2, -1]} return funcsSens # rst Optimization problem optProb = Optimization("Rosenbrock function", userfunc) # rst Add objective optProb.addObj("obj") # rst Add design variables optProb.addVarGroup(name="xvars", nVars=2, type="c", value=[3, -3], lower=-5.12, upper=5.12, scale=1.0) # rst Add constraints optProb.addCon("con", upper=0, scale=1.0) # rst Instantiate optimizer optOptions = {} opt = OPT(args.opt, options=optOptions) # rst Solve sol = opt(optProb, sens=userfuncsens, storeHistory="opt.hst") print(sol)
def run(self, problem): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers (i.e., SNOPT) control the iteration. Args ---- problem : `Problem` Our parent `Problem`. """ self.pyopt_solution = None rel = problem.root._probdata.relevance # Metadata Setup self.metadata = create_local_meta(None, self.options['optimizer']) self.iter_count = 0 update_local_meta(self.metadata, (self.iter_count, )) # Initial Run problem.root.solve_nonlinear(metadata=self.metadata) opt_prob = Optimization(self.options['title'], self._objfunc) # Add all parameters param_meta = self.get_desvar_metadata() self.indep_list = indep_list = list(param_meta) param_vals = self.get_desvars() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Figure out parameter subsparsity for paramcomp index connections. # sub_param_conns is empty unless there are some index conns. # full_param_conns gets filled with the connections to the entire # parameter so that those params can be filtered out of the sparse # set if the full path is also relevant sub_param_conns = {} full_param_conns = {} for name in indep_list: pathname = problem.root.unknowns.metadata(name)['pathname'] sub_param_conns[name] = {} full_param_conns[name] = set() for target, info in iteritems(problem.root.connections): src, indices = info if src == pathname: if indices is not None: # Need to map the connection indices onto the desvar # indices if both are declared. dv_idx = param_meta[name].get('indices') indices = set(indices) if dv_idx is not None: indices.intersection_update(dv_idx) ldv_idx = list(dv_idx) mapped_idx = [ ldv_idx.index(item) for item in indices ] sub_param_conns[name][target] = mapped_idx else: sub_param_conns[name][target] = indices else: full_param_conns[name].add(target) # Add all objectives objs = self.get_objectives() self.quantities = list(objs) self.sparsity = OrderedDict() self.sub_sparsity = OrderedDict() for name in objs: opt_prob.addObj(name) self.sparsity[name] = self.indep_list # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(lintype='linear').keys() if len(lcons) > 0: self.lin_jacs = problem.calc_gradient(indep_list, lcons, return_format='dict') #print("Linear Gradient") #print(self.lin_jacs) # Add all equality constraints econs = self.get_constraints(ctype='eq', lintype='nonlinear') con_meta = self.get_constraint_metadata() self.quantities += list(econs) for name in self.get_constraints(ctype='eq'): meta = con_meta[name] size = meta['size'] lower = upper = meta['equals'] # Sparsify Jacobian via relevance rels = rel.relevant[name] wrt = rels.intersection(indep_list) self.sparsity[name] = wrt if meta['linear'] is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: jac = self._build_sparse(name, wrt, size, param_vals, sub_param_conns, full_param_conns, rels) opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac) # Add all inequality constraints incons = self.get_constraints(ctype='ineq', lintype='nonlinear') self.quantities += list(incons) for name in self.get_constraints(ctype='ineq'): meta = con_meta[name] size = meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] # Sparsify Jacobian via relevance rels = rel.relevant[name] wrt = rels.intersection(indep_list) self.sparsity[name] = wrt if meta['linear'] is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: jac = self._build_sparse(name, wrt, size, param_vals, sub_param_conns, full_param_conns, rels) opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac) # Instantiate the requested optimizer optimizer = self.options['optimizer'] try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer raise ImportError(msg) optname = vars()[optimizer] opt = optname() #Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) self._problem = problem # Execute the optimization problem if self.options['pyopt_diff'] is True: # Use pyOpt's internal finite difference fd_step = problem.root.fd_options['step_size'] sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file) self._problem = None # Print results if self.options['print_results'] is True: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: val = dv_dict[name] self.set_desvar(name, val) self.root.solve_nonlinear(metadata=self.metadata) # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0
# from pymatbridge import Matlab # mlab = Matlab('/Applications/MATLAB_R2014b.app/bin/matlab') # mlab.start() # # res = mlab.lsqcurvefit(sheet,param,posdntr,velodtr) # print res # # mlab.stop if fit_opt == 'snopt': optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') optProb.addVarGroup('param', 90, 'c', lower=None, upper=None, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptSheet/SNOPT_print'+str(argv[1])+str(argv[2])+str(argv[3])+'ORD'+str(argv[4])+str(argv[5])+str(argv[6])+str(argv[7])+str(argv[8])+str(argv[9])+str(argv[10])+str(argv[11])+str(argv[12])+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptSheet/SNOPT_summary'+str(argv[1])+str(argv[2])+str(argv[3])+'ORD'+str(argv[4])+str(argv[5])+str(argv[6])+str(argv[7])+str(argv[8])+str(argv[9])+str(argv[10])+str(argv[11])+str(argv[12])+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/CrossVal/SNOPT_Files/SNOPT_print'+str(argv[1])+str(argv[2])+str(argv[3])+'ORD'+str(argv[4])+str(argv[5])+str(argv[6])+str(argv[7])+str(argv[8])+str(argv[9])+str(argv[10])+str(argv[11])+str(argv[12])+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/CrossVal/SNOPT_Files/SNOPT_summary'+str(argv[1])+str(argv[2])+str(argv[3])+'ORD'+str(argv[4])+str(argv[5])+str(argv[6])+str(argv[7])+str(argv[8])+str(argv[9])+str(argv[10])+str(argv[11])+str(argv[12])+'.out') result = opt(optProb, sens=None) res = result.xStar['param'] elif fit_opt == 'scipy': res,_ = curve_fit(sheet,posdntr,velodtr,p0=param0,maxfev=2820000)
return pdict, fail if sens == "none": sens = None if sens == "user": sens = sensfunc if sens == "matrix-free": sens = [objgrad, jprod, jtprod] # Instantiate Optimization Problem optProb = Optimization("Rosenbrock function", objfunc) optProb.addVarGroup("xvars", 2, "c", value=[3, -3], lower=-5.12, upper=5.12, scale=[1.0, 1.0]) optProb.finalizeDesignVariables() if constrained: optProb.addCon("con", upper=0, scale=1.0) optProb.addObj("obj") # Create optimizer opt = OPT(args.opt, options=optOptions) if testHist == "no": # Just run a normal run sol = opt(optProb, sens=sens, sensMode=sensMode) # print(sol.fStar) print(sol)
params_IdepVar_func=add_gauss_params_IndepVarComps, params_IndepVar_args={})) prob.setup() prob['model_params:integrate'] = False prob['model_params:spread_mode'] = 'bastankhah' prob['model_params:yaw_mode'] = 'bastankhah' prob['model_params:n_std_dev'] = 4. # prob['model_params:m'] = 0.33 # prob['model_params:Dw0'] = 1.3 tuning_obj_function(plot=True) # initialize optimization problem optProb = Optimization('Tuning Gaussian Model to SOWFA', tuning_obj_function) optProb.addVarGroup('ke', 1, lower=0.0, upper=1.0, value=0.152, scalar=1) # optProb.addVarGroup('spread_angle', 1, lower=0.0, upper=30.0, value=3.0, scalar=1) optProb.addVarGroup('rotation_offset_angle', 1, lower=0.0, upper=5.0, value=1.5, scalar=1) # optProb.addVarGroup('ky', 1, lower=0.0, upper=20.0, value=0.1, scalar=1E-4) # optProb.addVarGroup('Dw0', 3, lower=np.zeros(3), upper=np.ones(3)*20., value=np.array([1.3, 1.3, 1.3]))#, scalar=1E-2) # optProb.addVarGroup('m', 1, lower=0.1, upper=20.0, value=0.33, scalar=1E-3) # add objective optProb.addObj('obj', scale=1E-3) # initialize optimizer snopt = SNOPT() # run optimizer sol = snopt(optProb, sens='FD')
return pdict, fail if sens == 'none': sens = None if sens == 'user': sens = sensfunc if sens == 'matrix-free': sens = [objgrad, jprod, jtprod] # Instantiate Optimization Problem optProb = Optimization('Rosenbrock function', objfunc) optProb.addVarGroup('xvars', 2, 'c', value=[3, -3], lower=-5.12, upper=5.12, scale=[1.0, 1.0]) optProb.finalizeDesignVariables() if constrained: optProb.addCon('con', upper=0, scale=1.0) optProb.addObj('obj') # Create optimizer opt = OPT(args.opt, options=optOptions) if testHist == 'no': # Just run a normal run sol = opt(optProb, sens=sens, sensMode=sensMode) # print(sol.fStar) print(sol)
def optimize(func, x0, lb, ub, optimizer, A=[], b=[], Aeq=[], beq=[], args=[]): global fcalls # keep track of function calls myself, seems to be an error in pyopt fcalls = 1 # evalute initial point to get size information and determine if gradients included out = func(x0, *args) if len(out) == 4: gradients = True f, c, _, _ = out else: gradients = False f, c = out nx = len(x0) nc = len(c) nlin = len(b) nleq = len(beq) if hasattr(f, "__len__"): nf = len(f) # multiobjective else: nf = 1 def objcon(xdict): global fcalls fcalls += 1 x = xdict['x'] outputs = {} if gradients: f, c, df, dc = func(x, *args) # these gradients aren't directly used in this function but we will save them for later outputs['g-obj'] = df outputs['g-con'] = dc outputs['g-x'] = x else: f, c = func(x, *args) outputs['con'] = c if nf == 1: outputs['obj'] = f else: # multiobjective for i in range(nf): outputs['obj%d' % i] = f[i] fail = False return outputs, fail def grad(xdict, fdict): # check if this was the x-location we just evaluated from func (should never happen) if not np.array_equal(xdict['x'], fdict['g-x']): f, c, df, dc = func(xdict['x'], *args) global fcalls fcalls += 1 else: df = fdict['g-obj'] dc = fdict['g-con'] # populate gradients (the multiobjective optimizers don't use gradients so no change needed here) gout = {} gout['obj'] = {} gout['obj']['x'] = df gout['con'] = {} gout['con']['x'] = dc fail = False return gout, fail # setup problem optProb = Optimization('optimization', objcon) if nf == 1: optProb.addObj('obj') else: # multiobjective for i in range(nf): optProb.addObj('obj%d' % i) optProb.addVarGroup('x', nx, lower=lb, upper=ub, value=x0) # add nonlinear constraints if nc > 0: optProb.addConGroup('con', nc, upper=0.0) # add linear inequality constraints if nlin > 0: optProb.addConGroup('linear-ineq', nlin, upper=b, linear=True, jac={'x': A}) # add linear equality constraints if nleq > 0: optProb.addConGroup('linear-ineq', nleq, upper=beq, lower=beq, linear=True, jac={'x': Aeq}) # check if gradients defined if gradients: sens = grad else: sens = 'FDR' # forward diff with relative step size with warnings.catch_warnings(): # FIXME: ignore the FutureWarning until fixed warnings.simplefilter("ignore") # run optimization sol = optimizer(optProb, sens=sens) # save solution xstar = sol.xStar['x'] fstar = sol.fStar info = {} info['fcalls'] = fcalls info['time'] = sol.optTime if sol.optInform: info['code'] = sol.optInform # FIXME: bug in how output of NLPQLP is returned if optimizer.name == 'NLPQLP': xtemp = xstar xstar = np.zeros(nx) for i in range(nx): xstar[i] = xtemp[i, 0] # FIXME: because of bug exists in all except SNOPT, also none return cstar # if optimizer.name != 'SNOPT': if gradients: fstar, cstar, _, _ = func(xstar, *args) else: fstar, cstar = func(xstar, *args) # FIXME: handle multiobjective NSGA2 if nf > 1 and optimizer.name == 'NSGA-II': xstar = [] fstar = [] cstar = [] with open('nsga2_final_pop.out') as f: # skip first two lines f.readline() f.readline() for line in f: values = line.split() rank = values[nx + nc + nf + 1] if rank == "1": fstar.append(np.array(values[:nf]).astype(np.float)) cstar.append(np.array(values[nf:nf+nc]).astype(np.float)) xstar.append(np.array(values[nf+nc:nf+nc+nx]).astype(np.float)) xstar = np.array(xstar) fstar = np.array(fstar) cstar = -np.array(cstar) # negative sign because of nsga definition if nc > 0: info['max-c-vio'] = max(np.amax(cstar), 0.0) return xstar, fstar, info
prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False, wake_model=floris_wrapper, wake_model_options={'nSamples': 0, 'use_rotor_components': False, 'differentiable': True}, datasize=0, params_IdepVar_func=add_floris_params_IndepVarComps)) prob.setup() prob['model_params:useWakeAngle'] = True # initialize optimization problem optProb = Optimization('Tuning %s Model to SOWFA' % model, tuning_obj_function) if model is 'gauss': # optProb.addVarGroup('ky', 1, lower=0.01, upper=1.0, value=0.022, scalar=1E1) # optProb.addVarGroup('kz', 1, lower=0.01, upper=1.0, value=0.022, scalar=1E1) # optProb.addVarGroup('I', 1, lower=0.04, upper=0.5, value=0.06, scalar=1E1) optProb.addVarGroup('shear_exp', 1, lower=0.01, upper=1.0, value=0.15, scalar=1) # optProb.addVarGroup('yshift', 1, lower=-126.4, upper=126.4, value=0.0)#, scalar=1E-3) elif model is 'floris': # optProb.addVarGroup('pP', 1, lower=0.0, upper=5.0, value=1.5) # , scalar=1E-1) optProb.addVarGroup('kd', 1, lower=0.0, upper=1.0, value=0.15) # , scalar=1E-1) optProb.addVarGroup('initialWakeAngle', 1, lower=-4.0, upper=4.0, value=1.5) # , scalar=1E-1) optProb.addVarGroup('initialWakeDisplacement', 1, lower=-30.0, upper=30.0, value=-4.5) # , scalar=1E-1) optProb.addVarGroup('bd', 1, lower=-1.0, upper=1.0, value=-0.01) # , scalar=1E-1) optProb.addVarGroup('ke', 1, lower=0.0, upper=1.0, value=0.065) # , scalar=1E-1) optProb.addVarGroup('me', 2, lower=np.array([-1.0, 0.0]), upper=np.array([0.0, 0.9]), value=np.array([-0.5, 0.3])) # , scalar=1E-1) optProb.addVarGroup('MU', 2, lower=np.array([0.0, 1.5]), upper=np.array([1.0, 20.0]), value=np.array([0.5, 5.5])) # , scalar=1E-1) optProb.addVarGroup('aU', 1, lower=0.0, upper=20.0, value=5.0) # , scalar=1E-1) optProb.addVarGroup('bU', 1, lower=0.0, upper=5.0, value=1.66) # , scalar=1E-1) optProb.addVarGroup('cos_spread', 1, lower=0.0, upper=10.0, value=2.0) # , scalar=1E-1)
x[1] * x[2] * x[3], x[0] * x[2] * x[3], x[0] * x[1] * x[3], x[0] * x[1] * x[2] ]] funcsSens["con1"] = {"xvars": jac} jac = [[2.0 * x[0], 2.0 * x[1], 2.0 * x[2], 2.0 * x[3]]] funcsSens["con2"] = {"xvars": jac} fail = False return funcsSens, fail # Optimization Object optProb = Optimization("HS071 Constraint Problem", objfunc) # Design Variables x0 = [1.0, 5.0, 5.0, 1.0] optProb.addVarGroup("xvars", 4, lower=1, upper=5, value=x0) # Constraints # optProb.addCon('con1', lower=25, upper=1e19) optProb.addCon("con1", lower=25) # optProb.addCon('con2', lower=40, upper=40) optProb.addCon("con2", lower=40, upper=40) # Objective optProb.addObj("obj") # Check optimization problem: print(optProb) # Optimizer opt = OPT(args.opt, options=optOptions)
class TestOptProb(unittest.TestCase): tol = 1e-12 def objfunc(self, xdict): """ This is a simple quadratic test function with linear constraints. The actual problem doesn't really matter, since we are not testing optimization, but just optProb. However, we need to initialize and run an optimization in order to have optimizer-specific fields in optProb populated, such as jacIndices. This problem is probably not feasible, but that's okay. """ funcs = {} funcs["obj_0"] = 0 for x in xdict.keys(): funcs["obj_0"] += np.sum(np.power(xdict[x], 2)) for iCon, nc in enumerate(self.nCon): conName = "con_{}".format(iCon) funcs[conName] = np.zeros(nc) for x in xdict.keys(): for j in range(nc): funcs[conName][j] = (iCon + 1) * np.sum(xdict[x]) return funcs, False def setup_optProb(self, nObj=1, nDV=[4], nCon=[2], xScale=[1.0], objScale=[1.0], conScale=[1.0], offset=[0.0]): """ This function sets up a general optimization problem, with arbitrary DVs, constraints and objectives. Arbitrary scaling for the various parameters can also be specified. """ self.nObj = nObj self.nDV = nDV self.nCon = nCon self.xScale = xScale self.objScale = objScale self.conScale = conScale self.offset = offset # Optimization Object self.optProb = Optimization("Configurable Test Problem", self.objfunc) self.x0 = {} # Design Variables for iDV in range(len(nDV)): n = nDV[iDV] lower = np.random.uniform(-5, 2, n) upper = np.random.uniform(5, 20, n) x0 = np.random.uniform(lower, upper) dvName = "x{}".format(iDV) self.x0[dvName] = x0 self.optProb.addVarGroup( dvName, n, lower=lower, upper=upper, value=x0, scale=xScale[iDV], offset=offset[iDV], ) # Constraints for iCon in range(len(nCon)): nc = nCon[iCon] lower = np.random.uniform(-5, 2, nc) upper = np.random.uniform(5, 6, nc) self.optProb.addConGroup( "con_{}".format(iCon), nc, lower=lower, upper=upper, scale=conScale[iCon], ) # Objective for iObj in range(nObj): self.optProb.addObj("obj_{}".format(iObj), scale=objScale[iObj]) # Finalize self.optProb.printSparsity() # run optimization # we don't care about outputs, but this performs optimizer-specific re-ordering # of constraints so we need this to test mappings opt = OPT("slsqp", options={"IFILE": "optProb_SLSQP.out"}) opt(self.optProb, "FD") def test_setDV_getDV(self): """ We just test that setDV and getDV work, even with scaling """ self.setup_optProb( nObj=1, nDV=[4, 8], nCon=[2, 3], xScale=[4, 1], objScale=[0.3], conScale=[0.1, 8], offset=[3, 7], ) # test getDV first x0 = self.optProb.getDVs() assert_dict_allclose(x0, self.x0) # now set, get, and compare newDV = {"x0": np.arange(4), "x1": np.arange(8)} self.optProb.setDVs(newDV) outDV = self.optProb.getDVs() assert_dict_allclose(newDV, outDV) def test_setDV_VarGroup(self): """ Test that setDV works with a subset of VarGroups """ self.setup_optProb( nObj=1, nDV=[4, 8], nCon=[2, 3], xScale=[4, 1], objScale=[0.3], conScale=[0.1, 8], offset=[3, 7], ) oldDV = self.optProb.getDVs() # set values for only one VarGroup newDV = {"x0": np.arange(4)} self.optProb.setDVs(newDV) outDV = self.optProb.getDVs() # check x0 changed assert_allclose(newDV["x0"], outDV["x0"]) # check x1 is the same assert_allclose(oldDV["x1"], outDV["x1"]) def test_mappings(self): """ This test checks the various mapping and process helper functions in pyOpt_optimization. In this function we just set up an optimization problem, and the actual test is done in `map_check_value`. """ nDV = [4, 8, 1] nCon = [2, 3, 1, 1] self.setup_optProb( nObj=1, nDV=nDV, nCon=nCon, xScale=[np.random.rand(i) for i in nDV], objScale=[0.3], conScale=[np.random.rand(i) for i in nCon], offset=[np.random.rand(i) * np.arange(i) for i in nDV], ) # first test X x = self.optProb.getDVs() self.map_check_value("X", x) # next we check the objective funcs, _ = self.objfunc(x) obj_funcs = {} for key in funcs.keys(): if "obj" in key: obj_funcs[key] = funcs[key] self.map_check_value("Obj", obj_funcs) # lastly we check the constraints funcs, _ = self.objfunc(x) con_funcs = {} for key in funcs.keys(): if "con" in key: con_funcs[key] = funcs[key] self.map_check_value("Con", con_funcs) def map_check_value(self, key, val): """ This function checks all the mapping and process functions in both directions, for a given key = {'X', 'Con', 'Obj'} and val in dictionary format. """ # dictionary of function handles to test map_funcs = { "X": [self.optProb._mapXtoOpt, self.optProb._mapXtoUser], "X_Dict": [self.optProb._mapXtoOpt_Dict, self.optProb._mapXtoUser_Dict], "Con": [self.optProb._mapContoOpt, self.optProb._mapContoUser], "Con_Dict": [self.optProb._mapContoOpt_Dict, self.optProb._mapContoUser_Dict], "Obj": [self.optProb._mapObjtoOpt, self.optProb._mapObjtoUser], "Obj_Dict": [self.optProb._mapObjtoOpt_Dict, self.optProb._mapObjtoUser_Dict], } process_funcs = { "X": {"vec": self.optProb.processXtoVec, "dict": self.optProb.processXtoDict}, "Con": {"vec": self.optProb.processContoVec, "dict": self.optProb.processContoDict}, "Obj": {"vec": self.optProb.processObjtoVec, "dict": self.optProb.processObjtoDict}, } def processValue(key, val, output): """helper function since some functions have optional arguments that are needed""" if key == "Con": return process_funcs[key][output](val, scaled=False, natural=True) elif key == "Obj": return process_funcs[key][output](val, scaled=False) else: return process_funcs[key][output](val) # test dict to vec mappings vec = processValue(key, val, "vec") dictionary = processValue(key, vec, "dict") assert_dict_allclose(val, dictionary) # test mappings using dictionaries val_opt = map_funcs[key + "_Dict"][0](val) val_user = map_funcs[key + "_Dict"][1](val_opt) assert_dict_allclose(val_user, val) assert_dict_not_allclose(val_user, val_opt) # test mappings using vectors val = processValue(key, val, "vec") val_opt = map_funcs[key][0](val) val_user = map_funcs[key][1](val_opt) assert_allclose(val_user, val, atol=self.tol, rtol=self.tol) assert_not_allclose(val_user, val_opt) # check that the scaling was actually done correctly # we only check this for the array version because # it's much simpler if key == "X": scale = np.hstack(self.xScale) offset = np.hstack(self.offset) assert_allclose(val_opt, (val_user - offset) * scale) else: if key == "Obj": scale = np.hstack(self.objScale) else: scale = np.hstack(self.conScale) assert_allclose(val_opt, val_user * scale) def test_finalize(self): """ Check that multiple finalize calls don't mess up the optProb """ self.setup_optProb(nObj=1, nDV=[4, 8], nCon=[2, 3], xScale=[1.0, 1.0], conScale=[1.0, 1.0], offset=[0, 0]) assert_optProb_size(self.optProb, 1, 12, 5) self.optProb.addObj("obj2") assert_optProb_size(self.optProb, 2, 12, 5) self.optProb.addVar("DV2") assert_optProb_size(self.optProb, 2, 13, 5) self.optProb.addCon("CON2") assert_optProb_size(self.optProb, 2, 13, 6)
] }, } fail = False return funcsSens, fail # Optimization Object optProb = Optimization("HS15 Constraint Problem", objfunc) # Design Variables lower = [-5, -5] upper = [0.5, 5] value = [-2, 1] optProb.addVarGroup("xvars", 2, lower=lower, upper=upper, value=value) # Constraints lower = [1, 0] upper = [None, None] optProb.addConGroup("con", 2, lower=lower, upper=upper) # Objective optProb.addObj("obj") # Check optimization problem: print(optProb) # Optimizer opt = OPT(args.opt, options=optOptions)
class TestRosenbrock(OptTest): ## Solve unconstrained Rosenbrock problem. # This problem is scalable w.r.t. design variables number. # We select a problem with 4 design variables, but the # location and value of the minimum do not change with DV # dimensionality # # # min 100 * (x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i]) ** 2 # # The minimum is located at x=(1,....,1) where x # is an arbitrarily sized vector depending on the number N # of design variables. # At the optimum, the function is f(x) = 0. # We select a random initial point for our test. ## name = "Rosenbrock" N = 4 objs = {"obj"} cons = set() DVs = {"xvars"} fStar = 0.0 xStar = {"xvars": np.ones(N)} # Tolerances tol = { "SNOPT": 1e-6, "IPOPT": 1e-6, "NLPQLP": 1e-6, "SLSQP": 1e-6, "CONMIN": 1e-9, "PSQP": 1e-8, "ParOpt": 1e-8, } optOptions = { "SLSQP": { "ACC": 1e-10 }, "NLPQLP": { "accuracy": 1e-10 }, } def objfunc(self, xdict): self.nf += 1 x = xdict["xvars"] funcs = {} funcs["obj"] = 0 for i in range(len(x) - 1): funcs["obj"] += 100 * (x[i + 1] - x[i]**2)**2 + (1 - x[i])**2 fail = False return funcs, fail def sens(self, xdict, funcs): self.ng += 1 x = xdict["xvars"] funcsSens = {} grads = np.zeros(len(x)) for i in range(len(x) - 1): grads[i] += 2 * (200 * x[i]**3 - 200 * x[i] * x[i + 1] + x[i] - 1) grads[i + 1] += 200 * (x[i + 1] - x[i]**2) funcsSens["obj"] = {"xvars": grads} fail = False return funcsSens, fail def setup_optProb(self): # Optimization Object self.optProb = Optimization("Rosenbrock Problem", self.objfunc) np.random.seed(10) value = np.random.normal(size=self.N) lower = np.ones(self.N) * -50 upper = np.ones(self.N) * 50 self.optProb.addVarGroup("xvars", self.N, lower=lower, upper=upper, value=value) # Objective self.optProb.addObj("obj") def test_snopt(self): self.optName = "SNOPT" self.setup_optProb() store_vars = [ "step", "merit", "feasibility", "optimality", "penalty", "Hessian", "condZHZ", "slack", "lambda" ] optOptions = { "Save major iteration variables": store_vars, } self.optimize_with_hotstart(1e-8, optOptions=optOptions) hist = History(self.histFileName, flag="r") data = hist.getValues(callCounters=["last"]) keys = hist.getIterKeys() self.assertIn("isMajor", keys) self.assertEqual(36, data["nMajor"]) for var in store_vars: self.assertIn(var, data.keys()) self.assertEqual(data["Hessian"].shape, (1, 4, 4)) self.assertEqual(data["feasibility"].shape, (1, 1)) self.assertEqual(data["slack"].shape, (1, 1)) self.assertEqual(data["lambda"].shape, (1, 1)) def test_snopt_hotstart_starting_from_grad(self): self.optName = "SNOPT" self.setup_optProb() histName = f"{self.id()}.hst" # Optimize without hot start and store the history self.optimize(storeHistory=histName, hotStart=False) # Load the history dictionary hist = SqliteDict(histName) # Delete the last two keys in the dictionary lastKey = hist["last"] for i in range(2): del hist[str(int(lastKey) - i)] hist.commit() # Optimize starting from the modified history file # The first call will be a gradient evaluation self.optimize(storeHistory=False, hotStart=histName) # Check that we had two function evaluations # The first is from a recursive call and the second is the 'last' call we deleted self.assertEqual(self.nf, 2) # Also check that we had two gradient evaluations # The first is from a call we deleted and the second is the call after 'last' self.assertEqual(self.ng, 2) @parameterized.expand( ["IPOPT", "SLSQP", "PSQP", "CONMIN", "NLPQLP", "ParOpt"]) def test_optimization(self, optName): self.optName = optName self.setup_optProb() optOptions = self.optOptions.pop(optName, None) self.optimize_with_hotstart(self.tol[optName], optOptions=optOptions)
# funcsSens['con', 'x'] = -1.0 # funcsSens['con', 'y'] = 1.0 fail = False return funcsSens, fail con_jac = {} con_jac["x"] = np.array(-1.0) con_jac["y"] = np.array(1.0) # Optimization Object optProb = Optimization("Paraboloid", objfunc) # Design Variables optProb.addVarGroup("x", 1, type="c", lower=-50.0, upper=50.0, value=0.0) optProb.addVarGroup("y", 1, type="c", lower=-50.0, upper=50.0, value=0.0) optProb.finalizeDesignVariables() # Objective optProb.addObj("obj") # Equality Constraint optProb.addConGroup("con", 1, lower=-15.0, upper=-15.0, wrt=["x", "y"], linear=True, jac=con_jac)
def fit(s, t, length, plot, comp, read_data, opt_print): global posdn global poslt global velod t2 = t + ".0" wfit = s + "_" + t2 wfit2 = s + "_" + t2 wfit3 = s + "_" + t2 wfit4 = s + "_" + t2 wfit5 = s + "_" + t2 wfit6 = s + "_" + t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15.0 wind2 = 14.0 wind3 = 12.0 wind4 = 16.0 rad = 3.0 dia = rad * 2.0 tsr = float(wfit[3] + "." + wfit[4] + wfit[5]) rot = tsr * wind / rad rot2 = tsr * wind2 / rad rot3 = tsr * wind3 / rad rot4 = tsr * wind4 / rad rot5 = 17.0 rot6 = 18.0 wind5 = rot5 * rad / tsr wind6 = rot6 * rad / tsr if comp == "mac": # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/" + wfit + ".csv" fdata2 = ( "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/" + wfit2 + ".csv" ) fdata3 = ( "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/" + wfit3 + ".csv" ) fdata4 = ( "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/" + wfit4 + ".csv" ) fdata5 = ( "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/" + wfit5 + ".csv" ) fdata6 = ( "/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/" + wfit6 + ".csv" ) elif comp == "fsl": fdata = "/fslhome/ebtingey/compute/moveForward/Velocity/" + wfit + ".csv" fdata2 = "/fslhome/ebtingey/compute/moveForward/vel14/Velocity/" + wfit2 + ".csv" fdata3 = "/fslhome/ebtingey/compute/moveForward/vel12/Velocity/" + wfit3 + ".csv" fdata4 = "/fslhome/ebtingey/compute/moveForward/vel16/Velocity/" + wfit4 + ".csv" fdata5 = "/fslhome/ebtingey/compute/moveForward/rot17/Velocity/" + wfit5 + ".csv" fdata6 = "/fslhome/ebtingey/compute/moveForward/rot18/Velocity/" + wfit6 + ".csv" elif comp == "win": fdata = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//Velocity Sections//" + wfit + ".csv" ) fdata2 = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel14//Velocity//" + wfit2 + ".csv" ) fdata3 = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel12//Velocity//" + wfit3 + ".csv" ) fdata4 = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel16//Velocity//" + wfit4 + ".csv" ) fdata5 = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot17//Velocity//" + wfit5 + ".csv" ) fdata6 = ( "C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot18//Velocity//" + wfit6 + ".csv" ) if read_data == 1: posdn, poslt, velod = starccm_read(np.array([fdata]), dia, np.array([wind]), length, opt_print) if read_data == 2: posdn, poslt, velod = starccm_read(np.array([fdata, fdata2]), dia, np.array([wind, wind2]), length, opt_print) if read_data == 3: posdn, poslt, velod = starccm_read( np.array([fdata, fdata2, fdata3]), dia, np.array([wind, wind2, wind3]), length, opt_print ) if read_data == 4: posdn, poslt, velod = starccm_read( np.array([fdata, fdata2, fdata3, fdata4]), dia, np.array([wind, wind2, wind3, wind4]), length, opt_print ) if read_data == 5: posdn, poslt, velod = starccm_read( np.array([fdata, fdata2, fdata3, fdata4, fdata5]), dia, np.array([wind, wind2, wind3, wind4, wind5]), length, opt_print, ) if read_data == 6: posdn, poslt, velod = starccm_read( np.array([fdata, fdata2, fdata3, fdata4, fdata5, fdata6]), dia, np.array([wind, wind2, wind3, wind4, wind5, wind6]), length, opt_print, ) if plot == True: if read_data == 1: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata]), dia, np.array([wind]), opt_print ) if read_data == 2: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata, fdata2]), dia, np.array([wind, wind2]), opt_print ) if read_data == 3: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata, fdata2, fdata3]), dia, np.array([wind, wind2, wind3]), opt_print ) if read_data == 4: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata, fdata2, fdata3, fdata4]), dia, np.array([wind, wind2, wind3, wind4]), opt_print ) if read_data == 5: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata, fdata2, fdata3, fdata4, fdata5]), dia, np.array([wind, wind2, wind3, wind4, wind5]), opt_print, ) if read_data == 6: pos1d, pos2d, pos3d, pos4d, pos5d, pos6d, pos7d, pos8d, pos9d, pos10d, pos11d, pos12d, pos13d, pos14d, pos15d, pos16d, pos17d, pos18d, pos19d, pos20d, pos21d, pos22d, pos23d, pos24d, pos25d, pos26d, pos27d, pos28d, pos29d, pos30d, velo1d, velo2d, velo3d, velo4d, velo5d, velo6d, velo7d, velo8d, velo9d, velo10d, velo11d, velo12d, velo13d, velo14d, velo15d, velo16d, velo17d, velo18d, velo19d, velo20d, velo21d, velo22d, velo23d, velo24d, velo25d, velo26d, velo27d, velo28d, velo29d, velo30d = starccm_read2( np.array([fdata, fdata2, fdata3, fdata4, fdata5, fdata6]), dia, np.array([wind, wind2, wind3, wind4, wind5, wind6]), opt_print, ) start = length / 30.0 xd = np.linspace(start, length, 30) / dia ## Optimization optProb = Optimization("VAWTWake_Velo", obj_func) optProb.addObj("obj") spr10 = 10.0 pow10 = 10.0 pow20 = 0.5 pow30 = 1.0 spr20 = 2.0 skw0 = 0.0 scl10 = 0.5 scl20 = 0.1 scl30 = 10.0 spr10 = 10.0 pow10 = 5.0 pow20 = 0.5 pow30 = 1.0 spr20 = 2.0 skw0 = 0.0 scl10 = 0.5 scl20 = 0.1 scl30 = 20.0 spr10 = 213.8593169 pow10 = 10.39210953 pow20 = 2.086951239 pow30 = 0.035659319 spr20 = 0.007589688 skw0 = 10.63462155 scl10 = 0.537566448 scl20 = 0.041077603 scl30 = 56.74689143 spr10 = 100.0 pow10 = 10.0 pow20 = 0.5 pow30 = 0.0 # 1.0 spr20 = 20.0 skw0 = 0.0 scl10 = 0.5 scl20 = 0.1 scl30 = 10.0 param0 = np.array([spr10, pow10, pow20, pow30, spr20, skw0, scl10, scl20, scl30]) param_l = np.array([0.0, 0.0, 0.0, 0.0, 0.0, None, 0.0, 0.0, 0.0]) param_u = np.array([None, None, None, None, None, None, 1.0, 1.0, None]) nparam = np.size(param0) optProb.addVarGroup("param", nparam, "c", lower=param_l, upper=param_u, value=param0) opt = SNOPT() opt.setOption("Scale option", 2) if comp == "mac": opt.setOption( "Print file", "/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print" + s + "_" + t + ".out", ) opt.setOption( "Summary file", "/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary" + s + "_" + t + ".out", ) elif comp == "fsl": opt.setOption("Print file", "/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print" + s + "_" + t + ".out") opt.setOption( "Summary file", "/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary" + s + "_" + t + ".out" ) elif comp == "win": opt.setOption( "Print file", "C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//optVel//SNOPT_print" + s + "_" + t + ".out", ) opt.setOption( "Summary file", "C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//OptVel//SNOPT_summary" + s + "_" + t + ".out", ) res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar["param"] if opt_print == True: print paramf[0] print paramf[1] print paramf[2] print paramf[3] print paramf[4] print paramf[5] print paramf[6] print paramf[7] print paramf[8] spr1 = paramf[0] pow1 = paramf[1] pow2 = paramf[2] pow3 = paramf[3] spr2 = paramf[4] skw = paramf[5] scl1 = paramf[6] scl2 = paramf[7] scl3 = paramf[8] paper = False if plot == True: if paper == True: for i in range(30): name = str(i + 1) ind = str(i) plt.figure(1) ax1 = plt.subplot(5, 6, i + 1) color = "bo" color2 = "r-" fs = 15 lab = "CFD" lab2 = "Trend" tex = "$x/D$ = " + str("{0:.2f}".format(x[i] / dia)) exec ("xfit = np.linspace(min(pos" + name + "/dia)-1.,max(pos" + name + "/dia)+1.,500)") if i == 5: exec ("xfit = np.linspace(min(pos" + name + "d)-1.,max(pos" + name + "d)+1.,500)") exec ("plt.plot(velo" + name + "d,pos" + name + "d,color,label=lab)") skw_v, spr_v, scl_v, rat_v, spr_v = paramfit(xd[i], skw, spr, scl, rat, spr) plt.plot(veldist(xfit, skw_v, spr_v, scl_v, rat_v, spr_v), xfit, "r-", linewidth=2, label=lab2) plt.xlim(0.0, 1.5) # plt.ylim(-4.,4.) plt.legend(loc="upper left", bbox_to_anchor=(1, 1), fontsize=fs) else: exec ("xfit = np.linspace(min(pos" + name + "d)-1.,max(pos" + name + "d)+1.,500)") exec ("plt.plot(velo" + name + "d,pos" + name + "d,color)") skw_v, spr_v, scl_v, rat_v, spr_v = paramfit(xd[i], skw, spr, scl, rat, spr) plt.plot(veldist(xfit, skw_v, spr_v, scl_v, rat_v, spr_v), xfit, "r-", linewidth=2) plt.xlim(0.0, 1.5) # plt.ylim(-4.,4.) plt.text(0.3, 0.8, tex, fontsize=fs) if i <= 23: plt.setp(ax1.get_xticklabels(), visible=False) else: plt.xlabel("$y/D$", fontsize=fs) plt.xticks(fontsize=fs) if i == 0 or i == 6 or i == 12 or i == 18 or i == 24: plt.ylabel(r"$u/U_\infty$", fontsize=fs) plt.yticks(fontsize=fs) else: plt.setp(ax1.get_yticklabels(), visible=False) elif paper == False: for i in range(30): name = str(i + 1) plt.figure(1) plt.subplot(5, 6, i + 1) color = "bo" exec ("xfit = np.linspace(min(pos" + name + "d)-1.,max(pos" + name + "d)+1.,500)") exec ("plt.plot(velo" + name + "d,pos" + name + "d,color)") plt.plot( veldist(xd[i], xfit, spr1, pow1, pow2, pow3, spr2, skw, scl1, scl2, scl3), xfit, "r-", linewidth=2 ) plt.xlim(0.0, 1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel("Normalized Velocity") plt.ylabel("$y/D$") return spr1, pow1, pow2, pow3, spr2, skw, scl1, scl2, scl3
def run(self): """ Excute pyOptsparse. Note that pyOpt controls the execution, and the individual optimizers (e.g., SNOPT) control the iteration. Returns ------- boolean Failure flag; True if failed to converge, False is successful. """ problem = self._problem model = problem.model relevant = model._relevant self.pyopt_solution = None self._total_jac = None self.iter_count = 0 fwd = problem._mode == 'fwd' optimizer = self.options['optimizer'] # Only need initial run if we have linear constraints or if we are using an optimizer that # doesn't perform one initially. con_meta = self._cons model_ran = False if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]): with RecordingDebugging(optimizer, self.iter_count, self) as rec: # Initial Run model._solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 model_ran = True self.iter_count += 1 # compute dynamic simul deriv coloring or just sparsity if option is set if coloring_mod._use_sparsity: if self.options['dynamic_simul_derivs']: coloring_mod.dynamic_simul_coloring(self, run_model=not model_ran, do_sparsity=True) elif self.options['dynamic_derivs_sparsity']: coloring_mod.dynamic_sparsity(self) opt_prob = Optimization(self.options['title'], self._objfunc) # Add all design variables param_meta = self._designvars self._indep_list = indep_list = list(param_meta) param_vals = self.get_design_var_values() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Add all objectives objs = self.get_objective_values() for name in objs: opt_prob.addObj(name) self._quantities.append(name) # Calculate and save derivatives for any linear constraints. lcons = [key for (key, con) in iteritems(con_meta) if con['linear']] if len(lcons) > 0: _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict') # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation # of linear constraints! to_remove = [] for jacdct in itervalues(_lin_jacs): for n, subjac in iteritems(jacdct): if isinstance(subjac, np.ndarray): # we can safely use coo_matrix to automatically convert the ndarray # since our linear constraint jacs are constant, so zeros won't become # nonzero during the optimization. mat = coo_matrix(subjac) if mat.row.size > 0: # convert to 'coo' format here to avoid an emphatic warning # by pyoptsparse. jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape} # Add all equality constraints for name, meta in iteritems(con_meta): if meta['equals'] is None: continue size = meta['size'] lower = upper = meta['equals'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[n] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac) self._quantities.append(name) # Add all inequality constraints for name, meta in iteritems(con_meta): if meta['equals'] is not None: continue size = meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[n] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac) self._quantities.append(name) # Instantiate the requested optimizer try: _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0) opt = getattr(_tmp, optimizer)() except Exception as err: # Change whatever pyopt gives us to an ImportError, give it a readable message, # but raise with the original traceback. msg = "Optimizer %s is not available in this installation." % optimizer reraise(ImportError, ImportError(msg), sys.exc_info()[2]) # Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) # Execute the optimization problem if self.options['gradient method'] == 'pyopt_fd': # Use pyOpt's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) elif self.options['gradient method'] == 'snopt_fd': if self.options['optimizer'] == 'SNOPT': # Use SNOPT's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) else: msg = "SNOPT's internal finite difference can only be used with SNOPT" raise Exception(msg) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file, hotStart=self.hotstart_file) # Print results if self.options['print_results']: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: self.set_design_var(name, dv_dict[name]) with RecordingDebugging(self.options['optimizer'], self.iter_count, self) as rec: model._solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 self.iter_count += 1 # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.fail = False # These are various failed statuses. if exit_status > 2: self.fail = True except KeyError: # optimizers other than pySNOPT may not populate this dict pass return self.fail
def train_pyoptsparse(params={}, tune_search=False): # Process params # params = process_params(params) torch.manual_seed(7) objective = Objective(params) objective.train() shapes = {k:p.shape for k, p in objective.named_parameters()} # def objfun(x): # x_ = {k:torch.from_numpy(x[k].reshape(shapes[k])) for k in x} # objective.load_state_dict(x_, strict=False) # objective.zero_grad() # loss = objective() # loss.backward() # funcs = { # 'obj':loss.detach().numpy(), # 'grads':{ # 'obj': {k:p.grad.flatten().numpy() for k, p in objective.named_parameters() if p.requires_grad} # } # } # # import ipdb; ipdb.set_trace() # return funcs, False # def sensfun(x, funcs): # return funcs['grads'] obj = PyTorchObjective(objective) def objfun(x): # ipdb.set_trace() return {'obj': obj.fun(x['x'])}, 0 def sensfun(x, funcs): return {'obj':{'x':obj.jac(x['x'])}} problem = Optimization('nn', objfun) # import ipdb; ipdb.set_trace() problem.addVarGroup('x', value=obj.x0, nVars=obj.x0.shape[0]) # for k, p in objective.named_parameters(): # if p.requires_grad: # nVars = 1 # for n in p.shape: # nVars *= n # problem.addVarGroup(k, nVars=nVars, value=p.detach().numpy().flatten()) problem.addObj('obj') name = 'train' opt = OPT('SNOPT', options={'Print file':f'{name}_snopt_print.out', 'Summary file':f'{name}_snopt_summary.opt', 'Major iterations limit':300}) sol = opt(problem, sens=sensfun, storeHistory='train.hst') model = objective.model torch.save(model.state_dict(), os.path.join("models","model.mdl")) with open(os.path.join("models","model.json"), "w") as f: json.dump(params, f, indent=4) torch.save(model, os.path.join("models","model.load")) print(sol.optInform) print(sol.objFun(sol.xStar)[0]['obj']) ipdb.set_trace() return params, model
class TestSphere(OptTest): ## Solve unconstrained Sphere problem. # This problem is scalable w.r.t. design variables number. # We select a problem with 4 design variables, but the # location and value of the minimum do not change with DV # dimensionality # # # min Sum(x[i] ** 2) # # The minimum is located at x=(0,....,0) where x # is an arbitrarily sized vector depending on the number N # of design variables. # At the optimum, the function is f(x) = 0. # We select a random initial point for our test. ## name = "Sphere" N = 4 objs = {"obj"} cons = set() DVs = {"xvars"} fStar = 0.0 xStar = {"xvars": np.zeros(N)} # Tolerances tol = {"ALPSO": 1e-3} optOptions = { "ALPSO": { # sphere "SwarmSize": 20, "maxOuterIter": 10, "c1": 1.0, # Cognitive Parameter "c2": 1.25, # Social Parameter "stopCriteria": 0, # 0: maxOuterIter, 1: convergence "seed": 1235, } } def objfunc(self, xdict): self.nf += 1 x = xdict["xvars"] funcs = {"obj": np.dot(x, x)} fail = False return funcs, fail def sens(self, xdict, _funcs): self.ng += 1 x = xdict["xvars"] funcsSens = {"obj": {"xvars": 2 * np.ones(len(x))}} fail = False return funcsSens, fail def setup_optProb(self): # Optimization Object self.optProb = Optimization("Sphere Problem", self.objfunc) np.random.seed(10) value = np.random.normal(size=self.N) lower = np.ones(self.N) * -50 upper = np.ones(self.N) * 50 self.optProb.addVarGroup("xvars", self.N, lower=lower, upper=upper, value=value) # Objective self.optProb.addObj("obj") @parameterized.expand(["ALPSO"]) def test_optimization(self, optName): self.optName = optName self.setup_optProb() optOptions = self.optOptions.pop(optName, None) self.optimize_with_hotstart(self.tol[optName], optOptions=optOptions)
def run(self): """ Excute pyOptsparse. Note that pyOpt controls the execution, and the individual optimizers (e.g., SNOPT) control the iteration. Returns ------- boolean Failure flag; True if failed to converge, False is successful. """ problem = self._problem() model = problem.model relevant = model._relevant self.pyopt_solution = None self._total_jac = None self.iter_count = 0 fwd = problem._mode == 'fwd' optimizer = self.options['optimizer'] self._quantities = [] self._check_for_missing_objective() # Only need initial run if we have linear constraints or if we are using an optimizer that # doesn't perform one initially. con_meta = self._cons model_ran = False if optimizer in run_required or np.any( [con['linear'] for con in self._cons.values()]): with RecordingDebugging(self._get_name(), self.iter_count, self) as rec: # Initial Run model.run_solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 model_ran = True self.iter_count += 1 # compute dynamic simul deriv coloring or just sparsity if option is set if c_mod._use_total_sparsity: coloring = None if self._coloring_info['coloring'] is None and self._coloring_info[ 'dynamic']: coloring = c_mod.dynamic_total_coloring( self, run_model=not model_ran, fname=self._get_total_coloring_fname()) if coloring is not None: # if the improvement wasn't large enough, don't use coloring pct = coloring._solves_info()[-1] info = self._coloring_info if info['min_improve_pct'] > pct: info['coloring'] = info['static'] = None simple_warning( "%s: Coloring was deactivated. Improvement of %.1f%% was less " "than min allowed (%.1f%%)." % (self.msginfo, pct, info['min_improve_pct'])) comm = None if isinstance(problem.comm, FakeComm) else problem.comm opt_prob = Optimization(self.options['title'], weak_method_wrapper(self, '_objfunc'), comm=comm) # Add all design variables input_meta = self._designvars self._indep_list = indep_list = list(input_meta) input_vals = self.get_design_var_values() for name, meta in input_meta.items(): size = meta['global_size'] if meta['distributed'] else meta['size'] opt_prob.addVarGroup(name, size, type='c', value=input_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Add all objectives objs = self.get_objective_values() for name in objs: opt_prob.addObj(name) self._quantities.append(name) # Calculate and save derivatives for any linear constraints. lcons = [key for (key, con) in con_meta.items() if con['linear']] if len(lcons) > 0: _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict') # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation # of linear constraints! to_remove = [] for jacdct in _lin_jacs.values(): for n, subjac in jacdct.items(): if isinstance(subjac, np.ndarray): # we can safely use coo_matrix to automatically convert the ndarray # since our linear constraint jacs are constant, so zeros won't become # nonzero during the optimization. mat = coo_matrix(subjac) if mat.row.size > 0: # convert to 'coo' format here to avoid an emphatic warning # by pyoptsparse. jacdct[n] = { 'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape } # Add all equality constraints for name, meta in con_meta.items(): if meta['equals'] is None: continue size = meta['global_size'] if meta['distributed'] else meta['size'] lower = upper = meta['equals'] if fwd: wrt = [ v for v in indep_list if name in relevant[input_meta[v]['ivc_source']] ] else: rels = relevant[name] wrt = [ v for v in indep_list if input_meta[v]['ivc_source'] in rels ] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[input_meta[n]['ivc_source']] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac) self._quantities.append(name) # Add all inequality constraints for name, meta in con_meta.items(): if meta['equals'] is not None: continue size = meta['global_size'] if meta['distributed'] else meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] if fwd: wrt = [ v for v in indep_list if name in relevant[input_meta[v]['ivc_source']] ] else: rels = relevant[name] wrt = [ v for v in indep_list if input_meta[v]['ivc_source'] in rels ] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[input_meta[n]['ivc_source']] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac) self._quantities.append(name) # Instantiate the requested optimizer try: _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0) opt = getattr(_tmp, optimizer)() except Exception as err: # Change whatever pyopt gives us to an ImportError, give it a readable message, # but raise with the original traceback. msg = "Optimizer %s is not available in this installation." % optimizer raise ImportError(msg) # Process any default optimizer-specific settings. if optimizer in DEFAULT_OPT_SETTINGS: for name, value in DEFAULT_OPT_SETTINGS[optimizer].items(): if name not in self.opt_settings: self.opt_settings[name] = value # Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) # Execute the optimization problem if self.options['gradient method'] == 'pyopt_fd': # Use pyOpt's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.model.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) elif self.options['gradient method'] == 'snopt_fd': if self.options['optimizer'] == 'SNOPT': # Use SNOPT's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.model.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) else: raise Exception( "SNOPT's internal finite difference can only be used with SNOPT" ) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=weak_method_wrapper(self, '_gradfunc'), storeHistory=self.hist_file, hotStart=self.hotstart_file) # Print results if self.options['print_results']: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: self.set_design_var(name, dv_dict[name]) with RecordingDebugging(self._get_name(), self.iter_count, self) as rec: try: model.run_solve_nonlinear() except AnalysisError: model._clear_iprint() rec.abs = 0.0 rec.rel = 0.0 self.iter_count += 1 # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.fail = False # These are various failed statuses. if optimizer == 'IPOPT': if exit_status not in {0, 1}: self.fail = True elif exit_status > 2: self.fail = True except KeyError: # optimizers other than pySNOPT may not populate this dict pass # revert signal handler to cached version sigusr = self.options['user_terminate_signal'] if sigusr is not None: signal.signal(sigusr, self._signal_cache) self._signal_cache = None # to prevent memory leak test from failing return self.fail
def execute(self): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers control the iteration.""" self.pyOpt_solution = None self.run_iteration() opt_prob = Optimization(self.title, self.objfunc) # Add all parameters self.param_type = {} self.nparam = self.total_parameters() param_list = [] for name, param in self.get_parameters().iteritems(): # We need to identify Enums, Lists, Dicts metadata = param.get_metadata()[1] values = param.evaluate() # Assuming uniform enumerated, discrete, or continuous for now. val = values[0] choices = [] if 'values' in metadata and \ isinstance(metadata['values'], (list, tuple, array, set)): vartype = 'd' choices = metadata['values'] elif isinstance(val, bool): vartype = 'd' choices = [True, False] elif isinstance(val, (int, int32, int64)): vartype = 'i' elif isinstance(val, (float, float32, float64)): vartype = 'c' else: msg = 'Only continuous, discrete, or enumerated variables' \ ' are supported. %s is %s.' % (name, type(val)) self.raise_exception(msg, ValueError) self.param_type[name] = vartype lower_bounds = param.get_low() upper_bounds = param.get_high() opt_prob.addVarGroup(name, len(values), type=vartype, lower=lower_bounds, upper=upper_bounds, value=values, choices=choices) param_list.append(name) # Add all objectives for name, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name opt_prob.addObj(name) # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(linear=True).values() + \ self.get_2sided_constraints(linear=True).values() if len(lcons) > 0: lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons] self.lin_jacs = self.workflow.calc_gradient(param_list, lcon_names, return_format='dict') # Add all equality constraints nlcons = [] for name, con in self.get_eq_constraints().iteritems(): size = con.size lower = zeros((size)) upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper) nlcons.append(name) # Add all inequality constraints for name, con in self.get_ineq_constraints().iteritems(): size = con.size upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper) nlcons.append(name) # Add all double_sided constraints for name, con in self.get_2sided_constraints().iteritems(): size = con.size upper = con.high * ones((size)) lower = con.low * ones((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower) nlcons.append(name) self.objs = self.list_objective_targets() self.nlcons = nlcons # Instantiate the requested optimizer optimizer = self.optimizer try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer self.raise_exception(msg, ImportError) optname = vars()[optimizer] opt = optname() # Set optimization options for option, value in self.options.iteritems(): opt.setOption(option, value) # Execute the optimization problem if self.pyopt_diff: # Use pyOpt's internal finite difference sol = opt(opt_prob, sens='FD', sensStep=self.gradient_options.fd_step) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self.gradfunc) # Print results if self.print_results: print sol # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() param_types = self.param_type for name, param in self.get_parameters().iteritems(): val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(name, val) self.run_iteration() # Save the most recent solution. self.pyOpt_solution = sol
def optimize(self, optName, optOptions={}, storeHistory=False, places=5, hotStart=None): self.nf = 0 # number of function evaluations self.ng = 0 # number of gradient evaluations # Optimization Object optProb = Optimization('HS15 Constraint Problem', self.objfunc) # Design Variables lower = [-5.0, -5.0] upper = [0.5, 5.0] value = [-2, 1.0] optProb.addVarGroup('xvars', 2, lower=lower, upper=upper, value=value) # Constraints lower = [1.0, 0.0] upper = [None, None] optProb.addConGroup('con', 2, lower=lower, upper=upper) # Objective optProb.addObj('obj') # Check optimization problem: print(optProb) # Optimizer try: opt = OPT(optName, options=optOptions) except: raise unittest.SkipTest('Optimizer not available:', optName) # Solution if storeHistory is not None: if storeHistory == True: self.histFileName = '%s_hs015_Hist.hst' % (optName.lower()) elif isinstance(storeHistory, str): self.histFileName = storeHistory else: self.histFileName = None sol = opt(optProb, sens=self.sens, storeHistory=self.histFileName, hotStart=hotStart) # Test printing solution to screen print(sol) # Check Solution fobj = sol.objectives['obj'].value diff = np.min(np.abs([fobj - 306.5, fobj - 360.379767])) self.assertAlmostEqual(diff, 0.0, places=places) xstar1 = (0.5, 2.0) xstar2 = (-0.79212322, -1.26242985) x1 = sol.variables['xvars'][0].value x2 = sol.variables['xvars'][1].value dv = sol.getDVs() self.assertAlmostEqual(x1, dv['xvars'][0], places=10) self.assertAlmostEqual(x2, dv['xvars'][1], places=10) diff = np.min(np.abs([xstar1[0] - x1, xstar2[0] - x1])) self.assertAlmostEqual(diff, 0.0, places=places) diff = np.min(np.abs([xstar1[1] - x2, xstar2[1] - x2])) self.assertAlmostEqual(diff, 0.0, places=places)
sdv10[-1] = 0.1 sdv20[-1] = 0.5 sdv30[-1] = 10. sdv40[-1] = 0.5 rat0[-1] = 10. tns0[-1] = 10. spr10[-1] = 0.1 spr20[-1] = 0.5 spr30[-1] = 10. spr40[-1] = 0.5 scl10[-1] = 0.1 scl20[-1] = 0.5 scl30[-1] = 10. optProb.addVarGroup('men', 3*ords, 'c', lower=None, upper=None, value=men0) optProb.addVarGroup('sdv1', ordt*ords, 'c', lower=None, upper=None, value=sdv10) optProb.addVarGroup('sdv2', ordt*ords, 'c', lower=None, upper=None, value=sdv20) optProb.addVarGroup('sdv3', ordt*ords, 'c', lower=None, upper=None, value=sdv30) optProb.addVarGroup('sdv4', ordt*ords, 'c', lower=None, upper=None, value=sdv40) optProb.addVarGroup('rat', ordt*ords, 'c', lower=None, upper=None, value=rat0) optProb.addVarGroup('tns', ordt*ords, 'c', lower=None, upper=None, value=tns0) optProb.addVarGroup('spr1', ordt*ords, 'c', lower=None, upper=None, value=spr10) optProb.addVarGroup('spr2', ordt*ords, 'c', lower=None, upper=None, value=spr20) optProb.addVarGroup('spr3', ordt*ords, 'c', lower=None, upper=None, value=spr30) optProb.addVarGroup('spr4', ordt*ords, 'c', lower=None, upper=None, value=spr40) optProb.addVarGroup('scl1', ordt*ords, 'c', lower=None, upper=None, value=scl10) optProb.addVarGroup('scl2', ordt*ords, 'c', lower=None, upper=None, value=scl20) optProb.addVarGroup('scl3', ordt*ords, 'c', lower=None, upper=None, value=scl30)
def run(self): """ Excute pyOptsparse. Note that pyOpt controls the execution, and the individual optimizers (e.g., SNOPT) control the iteration. Returns ------- boolean Failure flag; True if failed to converge, False is successful. """ problem = self._problem model = problem.model relevant = model._relevant self.pyopt_solution = None self.iter_count = 0 fwd = problem._mode == 'fwd' # Metadata Setup self.metadata = create_local_meta(self.options['optimizer']) with Recording(self.options['optimizer'], self.iter_count, self) as rec: # Initial Run model._solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 self.iter_count += 1 opt_prob = Optimization(self.options['title'], self._objfunc) # Add all design variables param_meta = self._designvars self._indep_list = indep_list = list(param_meta) param_vals = self.get_design_var_values() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Add all objectives objs = self.get_objective_values() for name in objs: opt_prob.addObj(name) self._quantities.append(name) # Calculate and save derivatives for any linear constraints. con_meta = self._cons lcons = [ key for (key, con) in iteritems(con_meta) if con['linear'] is True ] if len(lcons) > 0: _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict') # Add all equality constraints self.active_tols = {} eqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta) if con['equals'] is not None) for name, meta in iteritems(eqcons): size = meta['size'] lower = upper = meta['equals'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=_lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt) self._quantities.append(name) # Add all inequality constraints iqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta) if con['equals'] is None) for name, meta in iteritems(iqcons): size = meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=_lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt) self._quantities.append(name) # Instantiate the requested optimizer optimizer = self.options['optimizer'] try: _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0) opt = getattr(_tmp, optimizer)() except ImportError: msg = "Optimizer %s is not available in this installation." % optimizer raise ImportError(msg) # Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) self.opt_prob = opt_prob self.opt = opt # Execute the optimization problem if self.options['gradient method'] == 'pyopt_fd': # Use pyOpt's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) elif self.options['gradient method'] == 'snopt_fd': if self.options['optimizer'] == 'SNOPT': # Use SNOPT's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) else: msg = "SNOPT's internal finite difference can only be used with SNOPT" raise Exception(msg) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file, hotStart=self.hotstart_file) # Print results if self.options['print_results']: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: val = dv_dict[name] self.set_design_var(name, val) with Recording(self.options['optimizer'], self.iter_count, self) as rec: model._solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 self.iter_count += 1 # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.fail = False # These are various failed statuses. if exit_status > 2: self.fail = True except KeyError: # Nothing is here, so something bad happened! self.fail = True return self.fail
def run(self): """ Excute pyOptsparse. Note that pyOpt controls the execution, and the individual optimizers (e.g., SNOPT) control the iteration. Returns ------- boolean Failure flag; True if failed to converge, False is successful. """ problem = self._problem model = problem.model relevant = model._relevant self.pyopt_solution = None self._total_jac = None self.iter_count = 0 fwd = problem._mode == 'fwd' optimizer = self.options['optimizer'] # Only need initial run if we have linear constraints or if we are using an optimizer that # doesn't perform one initially. con_meta = self._cons model_ran = False if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]): with RecordingDebugging(self._get_name(), self.iter_count, self) as rec: # Initial Run model.run_solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 model_ran = True self.iter_count += 1 # compute dynamic simul deriv coloring or just sparsity if option is set if coloring_mod._use_sparsity: if self.options['dynamic_simul_derivs']: coloring_mod.dynamic_simul_coloring(self, run_model=not model_ran, do_sparsity=True) elif self.options['dynamic_derivs_sparsity']: coloring_mod.dynamic_sparsity(self) opt_prob = Optimization(self.options['title'], self._objfunc) # Add all design variables param_meta = self._designvars self._indep_list = indep_list = list(param_meta) param_vals = self.get_design_var_values() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Add all objectives objs = self.get_objective_values() for name in objs: opt_prob.addObj(name) self._quantities.append(name) # Calculate and save derivatives for any linear constraints. lcons = [key for (key, con) in iteritems(con_meta) if con['linear']] if len(lcons) > 0: _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict') # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation # of linear constraints! to_remove = [] for jacdct in itervalues(_lin_jacs): for n, subjac in iteritems(jacdct): if isinstance(subjac, np.ndarray): # we can safely use coo_matrix to automatically convert the ndarray # since our linear constraint jacs are constant, so zeros won't become # nonzero during the optimization. mat = coo_matrix(subjac) if mat.row.size > 0: # convert to 'coo' format here to avoid an emphatic warning # by pyoptsparse. jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape} # Add all equality constraints for name, meta in iteritems(con_meta): if meta['equals'] is None: continue size = meta['size'] lower = upper = meta['equals'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[n] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac) self._quantities.append(name) # Add all inequality constraints for name, meta in iteritems(con_meta): if meta['equals'] is not None: continue size = meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] if fwd: wrt = [v for v in indep_list if name in relevant[v]] else: rels = relevant[name] wrt = [v for v in indep_list if v in rels] if meta['linear']: jac = {w: _lin_jacs[name][w] for w in wrt} opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=jac) else: if name in self._res_jacs: resjac = self._res_jacs[name] jac = {n: resjac[n] for n in wrt} else: jac = None opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac) self._quantities.append(name) # Instantiate the requested optimizer try: _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0) opt = getattr(_tmp, optimizer)() except Exception as err: # Change whatever pyopt gives us to an ImportError, give it a readable message, # but raise with the original traceback. msg = "Optimizer %s is not available in this installation." % optimizer reraise(ImportError, ImportError(msg), sys.exc_info()[2]) # Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) # Execute the optimization problem if self.options['gradient method'] == 'pyopt_fd': # Use pyOpt's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) elif self.options['gradient method'] == 'snopt_fd': if self.options['optimizer'] == 'SNOPT': # Use SNOPT's internal finite difference # TODO: Need to get this from OpenMDAO # fd_step = problem.root.deriv_options['step_size'] fd_step = 1e-6 sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file, hotStart=self.hotstart_file) else: msg = "SNOPT's internal finite difference can only be used with SNOPT" raise Exception(msg) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file, hotStart=self.hotstart_file) # Print results if self.options['print_results']: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: self.set_design_var(name, dv_dict[name]) with RecordingDebugging(self._get_name(), self.iter_count, self) as rec: model.run_solve_nonlinear() rec.abs = 0.0 rec.rel = 0.0 self.iter_count += 1 # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.fail = False # These are various failed statuses. if exit_status > 2: self.fail = True except KeyError: # optimizers other than pySNOPT may not populate this dict pass return self.fail
def run(self, problem): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers (i.e., SNOPT) control the iteration. Args ---- problem : `Problem` Our parent `Problem`. """ self.pyopt_solution = None rel = problem.root._probdata.relevance # Metadata Setup self.metadata = create_local_meta(None, self.options['optimizer']) self.iter_count = 0 update_local_meta(self.metadata, (self.iter_count,)) # Initial Run problem.root.solve_nonlinear(metadata=self.metadata) opt_prob = Optimization(self.options['title'], self._objfunc) # Add all parameters param_meta = self.get_desvar_metadata() self.indep_list = indep_list = list(iterkeys(param_meta)) param_vals = self.get_desvars() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Add all objectives objs = self.get_objectives() self.quantities = list(iterkeys(objs)) self.sparsity = OrderedDict() #{} for name in objs: opt_prob.addObj(name) self.sparsity[name] = self.indep_list # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(lintype='linear').keys() if len(lcons) > 0: self.lin_jacs = problem.calc_gradient(indep_list, lcons, return_format='dict') #print("Linear Gradient") #print(self.lin_jacs) # Add all equality constraints econs = self.get_constraints(ctype='eq', lintype='nonlinear') con_meta = self.get_constraint_metadata() self.quantities += list(iterkeys(econs)) for name in self.get_constraints(ctype='eq'): size = con_meta[name]['size'] lower = upper = con_meta[name]['equals'] # Sparsify Jacobian via relevance wrt = rel.relevant[name].intersection(indep_list) self.sparsity[name] = wrt if con_meta[name]['linear'] is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt) # Add all inequality constraints incons = self.get_constraints(ctype='ineq', lintype='nonlinear') self.quantities += list(iterkeys(incons)) for name in self.get_constraints(ctype='ineq'): size = con_meta[name]['size'] # Bounds - double sided is supported lower = con_meta[name]['lower'] upper = con_meta[name]['upper'] # Sparsify Jacobian via relevance wrt = rel.relevant[name].intersection(indep_list) self.sparsity[name] = wrt if con_meta[name]['linear'] is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt) # Instantiate the requested optimizer optimizer = self.options['optimizer'] try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer raise ImportError(msg) optname = vars()[optimizer] opt = optname() #Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) self._problem = problem # Execute the optimization problem if self.options['pyopt_diff'] is True: # Use pyOpt's internal finite difference fd_step = problem.root.fd_options['step_size'] sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file) self._problem = None # Print results if self.options['print_results'] is True: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: val = dv_dict[name] self.set_desvar(name, val) self.root.solve_nonlinear(metadata=self.metadata) # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0
def fit(s,t,length,plot,comp,read_data,opt_print): global posdn global poslt global velod t2 = t+'.0' wfit = s+'_'+t2 wfit2 = s+'_'+t2 wfit3 = s+'_'+t2 wfit4 = s+'_'+t2 wfit5 = s+'_'+t2 wfit6 = s+'_'+t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15. wind2 = 14. wind3 = 12. wind4 = 16. rad = 3. dia = rad*2. tsr = float(wfit[3]+'.'+wfit[4]+wfit[5]) rot = tsr*wind/rad rot2 = tsr*wind2/rad rot3 = tsr*wind3/rad rot4 = tsr*wind4/rad rot5 = 17. rot6 = 18. wind5 = rot5*rad/tsr wind6 = rot6*rad/tsr if comp == 'mac': # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/'+wfit+'.csv' fdata2 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/'+wfit6+'.csv' elif comp == 'fsl': fdata = '/fslhome/ebtingey/compute/moveForward/Velocity/'+wfit+'.csv' fdata2 = '/fslhome/ebtingey/compute/moveForward/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/fslhome/ebtingey/compute/moveForward/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/fslhome/ebtingey/compute/moveForward/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/fslhome/ebtingey/compute/moveForward/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/fslhome/ebtingey/compute/moveForward/rot18/Velocity/'+wfit6+'.csv' elif comp == 'win': fdata = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//Velocity Sections//'+wfit+'.csv' fdata2 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel14//Velocity//'+wfit2+'.csv' fdata3 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel12//Velocity//'+wfit3+'.csv' fdata4 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel16//Velocity//'+wfit4+'.csv' fdata5 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot17//Velocity//'+wfit5+'.csv' fdata6 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot18//Velocity//'+wfit6+'.csv' if read_data ==1: posdn,poslt,velod = starccm_read(np.array([fdata]),dia,np.array([wind]),length,opt_print) if read_data ==2: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),length,opt_print) if read_data ==3: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),length,opt_print) if read_data ==4: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),length,opt_print) if read_data ==5: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),length,opt_print) if read_data ==6: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),length,opt_print) if plot == True: if read_data ==1: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata]),dia,np.array([wind]),opt_print) if read_data ==2: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),opt_print) if read_data ==3: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),opt_print) if read_data ==4: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),opt_print) if read_data ==5: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),opt_print) if read_data ==6: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),opt_print) start = length/30. xd = np.linspace(start,length,30)/dia ## Optimization optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') spr10 = 10.0 pow10 = 5.0 pow20 = 0.5 pow30 = 1.0 spr20 = 2.0 skw0 = 0.0 odr0 = 2.0 scl10 = 0.5 scl20 = 0.1 scl30 = 20.0 # spr10 = 213.8593169 # pow10 = 10.39210953 # pow20 = 2.086951239 # pow30 = 0.035659319 # spr20 = 0.007589688 # skw0 = 10.63462155 # odr0 = 2.0 # scl10 = 0.537566448 # scl20 = 0.041077603 # scl30 = 56.74689143 param0 = np.array([spr10,pow10,pow20,pow30,spr20,skw0,odr0,scl10,scl20,scl30]) param_l = np.array([0.,0.,0.,0.,0.,None,0.,0.,0.,0.]) param_u = np.array([None,None,None,None,None,None,None,1.,1.,None]) nparam = np.size(param0) optProb.addVarGroup('param', nparam, 'c', lower=param_l, upper=param_u, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'win': opt.setOption('Print file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//optVel//SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//OptVel//SNOPT_summary'+s+'_'+t+'.out') res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar['param'] if opt_print == True: print paramf[0] print paramf[1] print paramf[2] print paramf[3] print paramf[4] print paramf[5] print paramf[6] print paramf[7] print paramf[8] print paramf[9] spr1 = paramf[0] pow1 = paramf[1] pow2 = paramf[2] pow3 = paramf[3] spr2 = paramf[4] skw = paramf[5] odr = paramf[6] scl1 = paramf[7] scl2 = paramf[8] scl3 = paramf[9] paper = False if plot == True: if paper == True: for i in range(30): name = str(i+1) ind = str(i) plt.figure(1) ax1 = plt.subplot(5,6,i+1) color = 'bo' color2 = 'r-' fs = 15 lab = 'CFD' lab2 = 'Trend' tex = '$x/D$ = '+str("{0:.2f}".format(x[i]/dia)) exec('xfit = np.linspace(min(pos'+name+'/dia)-1.,max(pos'+name+'/dia)+1.,500)') if i == 5: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color,label=lab)') skw_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],skw,spr,scl,rat,spr) plt.plot(veldist(xfit,skw_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2,label=lab2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs) else: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') skw_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],skw,spr,scl,rat,spr) plt.plot(veldist(xfit,skw_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.text(0.3,0.8,tex,fontsize=fs) if i <= 23: plt.setp(ax1.get_xticklabels(), visible=False) else: plt.xlabel('$y/D$',fontsize=fs) plt.xticks(fontsize=fs) if i == 0 or i == 6 or i == 12 or i == 18 or i ==24: plt.ylabel(r'$u/U_\infty$',fontsize=fs) plt.yticks(fontsize=fs) else: plt.setp(ax1.get_yticklabels(), visible=False) elif paper == False: for i in range(30): name = str(i+1) plt.figure(1) plt.subplot(5,6,i+1) color = 'bo' exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') plt.plot(veldist(xd[i],xfit,spr1,pow1,pow2,pow3,spr2,skw,odr,scl1,scl2,scl3),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel('Normalized Velocity') plt.ylabel('$y/D$') return spr1,pow1,pow2,pow3,spr2,skw,odr,scl1,scl2,scl3
def fit(s,t,length,plot,comp,read_data,opt_print): global posdn global poslt global velod t2 = t+'.0' wfit = s+'_'+t2 wfit2 = s+'_'+t2 wfit3 = s+'_'+t2 wfit4 = s+'_'+t2 wfit5 = s+'_'+t2 wfit6 = s+'_'+t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15. wind2 = 14. wind3 = 12. wind4 = 16. rad = 3. dia = rad*2. tsr = float(wfit[3]+'.'+wfit[4]+wfit[5]) rot = tsr*wind/rad rot2 = tsr*wind2/rad rot3 = tsr*wind3/rad rot4 = tsr*wind4/rad rot5 = 17. rot6 = 18. wind5 = rot5*rad/tsr wind6 = rot6*rad/tsr if comp == 'mac': # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/'+wfit+'.csv' fdata2 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/'+wfit6+'.csv' elif comp == 'fsl': fdata = '/fslhome/ebtingey/compute/moveForward/Velocity/'+wfit+'.csv' fdata2 = '/fslhome/ebtingey/compute/moveForward/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/fslhome/ebtingey/compute/moveForward/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/fslhome/ebtingey/compute/moveForward/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/fslhome/ebtingey/compute/moveForward/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/fslhome/ebtingey/compute/moveForward/rot18/Velocity/'+wfit6+'.csv' elif comp == 'win': fdata = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//Velocity Sections//'+wfit+'.csv' fdata2 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel14//Velocity//'+wfit2+'.csv' fdata3 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel12//Velocity//'+wfit3+'.csv' fdata4 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel16//Velocity//'+wfit4+'.csv' fdata5 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot17//Velocity//'+wfit5+'.csv' fdata6 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot18//Velocity//'+wfit6+'.csv' if read_data ==1: posdn,poslt,velod = starccm_read(np.array([fdata]),dia,np.array([wind]),length,opt_print) if read_data ==2: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),length,opt_print) if read_data ==3: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),length,opt_print) if read_data ==4: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),length,opt_print) if read_data ==5: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),length,opt_print) if read_data ==6: posdn,poslt,velod = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),length,opt_print) if plot == True: if read_data ==1: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata]),dia,np.array([wind]),opt_print) if read_data ==2: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),opt_print) if read_data ==3: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),opt_print) if read_data ==4: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),opt_print) if read_data ==5: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),opt_print) if read_data ==6: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read2(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),opt_print) start = length/30. xd = np.linspace(start,length,30)/dia ## Optimization optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') men0 = 0. sdv10 = 0.5 sdv20 = 0.1 sdv30 = 10. sdv40 = 0.5 rat0 = 10. wdt0 = 10. spr10 = 0.5 spr20 = 0.1 spr30 = 20. spr40 = 1. scl10 = 0.5 scl20 = 0.1 scl30 = 40. # men0 = 0.107980482 # sdv10 = 5.09E-01 # sdv20 = 0.056288195 # sdv30 = 50 # sdv40 = 0.5 # rat0 = 13.19127977 # wdt0 = 14.20436344 # spr10 = 1 # spr20 = 0.010825 # spr30 = 132.4282087 # spr40 = 1 # scl10 = 0.365635251 # scl20 = 0.082475724 # scl30 = 37.61946447 men0 = -0.0138439642406 sdv10 = 0.0 sdv20 = 0.17803796067 sdv30 = 9.69044107271 sdv40 = 0.50982003115 rat0 = 0.0 wdt0 = 10.0 spr10 = 0.998862596849 spr20 = 1.47011550439e-05 spr30 = 22.5386579407 spr40 = 1.0 scl10 = 0.380051328623 scl20 = 0.134712758388 scl30 = 45.7788575653 men0 = -0.0384248691061 sdv10 = 0.0 sdv20 = 0.17803796067 sdv30 = 9.69044107271 sdv40 = 0.81447171018 rat0 = 0.0 wdt0 = 10.0 spr10 = 0.0232236137751 spr20 = 0.0 spr30 = 12.6315271162 spr40 = 9.75322269238 scl10 = 0.357152390403 scl20 = 0.135756021609 scl30 = 33.8432403717 param0 = np.array([men0,sdv10,sdv20,sdv30,sdv40,rat0,wdt0,spr10,spr20,spr30,spr40,scl10,scl20,scl30]) param_l = np.array([None,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) param_u = np.array([None,10.,1.,50.,None,None,None,1.,1.,50.,None,1.,1.,None]) # param_l = np.array([None,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) # param_u = np.array([None,10.,1.,None,None,None,None,None,None,None,None,1.,1.,None]) # param_l = np.array([None,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) # param_u = np.array([None,None,None,None,None,None,None,None,None,None,None,1.,1.,None]) nparam = np.size(param0) optProb.addVarGroup('param', nparam, 'c', lower=param_l, upper=param_u, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'win': opt.setOption('Print file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//optVel//SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//OptVel//SNOPT_summary'+s+'_'+t+'.out') res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar['param'] if opt_print == True: print paramf[0] print paramf[1] print paramf[2] print paramf[3] print paramf[4] print paramf[5] print paramf[6] print paramf[7] print paramf[8] print paramf[9] print paramf[10] print paramf[11] print paramf[12] print paramf[13] men = paramf[0] sdv1 = paramf[1] sdv2 = paramf[2] sdv3 = paramf[3] sdv4 = paramf[4] rat = paramf[5] wdt = paramf[6] spr1 = paramf[7] spr2 = paramf[8] spr3 = paramf[9] spr4 = paramf[10] scl1 = paramf[11] scl2 = paramf[12] scl3 = paramf[13] paper = False if plot == True: if paper == True: for i in range(30): name = str(i+1) ind = str(i) plt.figure(1) ax1 = plt.subplot(5,6,i+1) color = 'bo' color2 = 'r-' fs = 15 lab = 'CFD' lab2 = 'Trend' tex = '$x/D$ = '+str("{0:.2f}".format(x[i]/dia)) exec('xfit = np.linspace(min(pos'+name+'/dia)-1.,max(pos'+name+'/dia)+1.,500)') if i == 5: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color,label=lab)') men_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],men,spr,scl,rat,spr) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2,label=lab2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs) else: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') men_v,spr_v,scl_v,rat_v,spr_v = paramfit(xd[i],men,spr,scl,rat,spr) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,spr_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.text(0.3,0.8,tex,fontsize=fs) if i <= 23: plt.setp(ax1.get_xticklabels(), visible=False) else: plt.xlabel('$y/D$',fontsize=fs) plt.xticks(fontsize=fs) if i == 0 or i == 6 or i == 12 or i == 18 or i ==24: plt.ylabel(r'$u/U_\infty$',fontsize=fs) plt.yticks(fontsize=fs) else: plt.setp(ax1.get_yticklabels(), visible=False) elif paper == False: for i in range(30): name = str(i+1) plt.figure(1) plt.subplot(5,6,i+1) color = 'bo' exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') plt.plot(veldist(xd[i],xfit,men,sdv1,sdv2,sdv3,sdv4,rat,wdt,spr1,spr2,spr3,spr4,scl1,scl2,scl3),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel('Normalized Velocity') plt.ylabel('$y/D$') return men,sdv1,sdv2,sdv3,sdv4,rat,wdt,spr1,spr2,spr3,spr4,scl1,scl2,scl3
power_iso = powerval(Cp,dens,velf,turb_dia) x0 = xt y0 = yt area = 2 xlow = points[0]-spacing*area xupp = points[-1]+spacing*area ylow = points[0]-spacing*area yupp = points[-1]+spacing*area optProb = Optimization('VAWT_Power', obj_func) optProb.addObj('obj') n = np.size(x0) optProb.addVarGroup('xvars', n, 'c', lower=xlow, upper=xupp, value=x0) optProb.addVarGroup('yvars', n, 'c', lower=ylow, upper=yupp, value=y0) num_cons_sep = (n-1)*n/2 optProb.addConGroup('sep', num_cons_sep, lower=0, upper=None) opt = SNOPT() opt.setOption('Scale option',0) res = opt(optProb, sens=None) print res pow = np.array(-1*res.fStar) xf = res.xStar['xvars'] yf = res.xStar['yvars'] power_turb = funcs['power_turb'] veleff = funcs['veleff']
def fit(s,t,length,plot,comp,read_data,opt_print): global xd global pos1d global pos2d global pos3d global pos4d global pos5d global pos6d global pos7d global pos8d global pos9d global pos10d global pos11d global pos12d global pos13d global pos14d global pos15d global pos16d global pos17d global pos18d global pos19d global pos20d global pos21d global pos22d global pos23d global pos24d global pos25d global pos26d global pos27d global pos28d global pos29d global pos30d global velo1d global velo2d global velo3d global velo4d global velo5d global velo6d global velo7d global velo8d global velo9d global velo10d global velo11d global velo12d global velo13d global velo14d global velo15d global velo16d global velo17d global velo18d global velo19d global velo20d global velo21d global velo22d global velo23d global velo24d global velo25d global velo26d global velo27d global velo28d global velo29d global velo30d t2 = t+'.0' wfit = s+'_'+t2 wfit2 = s+'_'+t2 wfit3 = s+'_'+t2 wfit4 = s+'_'+t2 wfit5 = s+'_'+t2 wfit6 = s+'_'+t2 length2 = length length3 = length length4 = length length5 = length length6 = length wind = 15. wind2 = 14. wind3 = 12. wind4 = 16. rad = 3. dia = rad*2. tsr = float(wfit[3]+'.'+wfit[4]+wfit[5]) rot = tsr*wind/rad rot2 = tsr*wind2/rad rot3 = tsr*wind3/rad rot4 = tsr*wind4/rad rot5 = 17. rot6 = 18. wind5 = rot5*rad/tsr wind6 = rot6*rad/tsr if comp == 'mac': # fdata = '/Users/ning1/Documents/Flow Lab/STAR-CCM+/NACA0021/MoveForward/test.csv' fdata = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/Velocity Sections/'+wfit+'.csv' fdata2 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/Users/ning1/Documents/FLOW Lab/STAR-CCM+/NACA0021/MoveForward/CrossValidate/rot18/Velocity/'+wfit6+'.csv' elif comp == 'fsl': fdata = '/fslhome/ebtingey/compute/moveForward/Velocity/'+wfit+'.csv' fdata2 = '/fslhome/ebtingey/compute/moveForward/vel14/Velocity/'+wfit2+'.csv' fdata3 = '/fslhome/ebtingey/compute/moveForward/vel12/Velocity/'+wfit3+'.csv' fdata4 = '/fslhome/ebtingey/compute/moveForward/vel16/Velocity/'+wfit4+'.csv' fdata5 = '/fslhome/ebtingey/compute/moveForward/rot17/Velocity/'+wfit5+'.csv' fdata6 = '/fslhome/ebtingey/compute/moveForward/rot18/Velocity/'+wfit6+'.csv' elif comp == 'win': fdata = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//Velocity Sections//'+wfit+'.csv' fdata2 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel14//Velocity//'+wfit2+'.csv' fdata3 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel12//Velocity//'+wfit3+'.csv' fdata4 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//vel16//Velocity//'+wfit4+'.csv' fdata5 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot17//Velocity//'+wfit5+'.csv' fdata6 = 'C://Users//TingeyPC//Documents//zStar-CCM//STAR-CCM//NACA0021//MoveForward//CrossValidate//rot18//Velocity//'+wfit6+'.csv' if read_data ==1: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata]),dia,np.array([wind]),opt_print) if read_data ==2: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2]),dia,np.array([wind,wind2]),opt_print) if read_data ==3: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3]),dia,np.array([wind,wind2,wind3]),opt_print) if read_data ==4: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4]),dia,np.array([wind,wind2,wind3,wind4]),opt_print) if read_data ==5: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5]),dia,np.array([wind,wind2,wind3,wind4,wind5]),opt_print) if read_data ==6: pos1d,pos2d,pos3d,pos4d,pos5d,pos6d,pos7d,pos8d,pos9d,pos10d,pos11d,pos12d,pos13d,pos14d,pos15d,pos16d,pos17d,pos18d,pos19d,pos20d,pos21d,pos22d,pos23d,pos24d,pos25d,pos26d,pos27d,pos28d,pos29d,pos30d,velo1d,velo2d,velo3d,velo4d,velo5d,velo6d,velo7d,velo8d,velo9d,velo10d,velo11d,velo12d,velo13d,velo14d,velo15d,velo16d,velo17d,velo18d,velo19d,velo20d,velo21d,velo22d,velo23d,velo24d,velo25d,velo26d,velo27d,velo28d,velo29d,velo30d = starccm_read(np.array([fdata,fdata2,fdata3,fdata4,fdata5,fdata6]),dia,np.array([wind,wind2,wind3,wind4,wind5,wind6]),opt_print) start = length/30. xd = np.linspace(start,length,30)/dia ## Optimization optProb = Optimization('VAWTWake_Velo', obj_func) optProb.addObj('obj') param0 = np.array([2.91638655e-04, -1.70286993e-03 , 2.38051673e-02 , 7.65610623e-01,6.40509205e-02 , 6.99046413e-01, 7.83484187e-01 , 4.55408268e-01, 1.18716383e-01 , 2.05484572e+01 , -2.67741935e+00 , 4.43022575e+01,-2.10925147e+00 , 3.30400554e+01]) # param0 = np.array([-0.000168794,0.005413905,-0.151372907,0.194333896,-29.52514866,-5.555943879,0.350540531,106.116331,0.000304188,17.4218187,30.06169144,47.3929253,32.97190468,33.4188042]) # param0 = np.array([-0.430957043,0.202977737,0.20361798,-0.818862214,-1.568705068,0.753568492,0.255557818,0.162385094,3.7137047,19.67995783,31.32756058,77.24259087,13.72492175,48.22628446]) # param0 = np.array([-0.0002347,0.0094285,-0.2262108,0.0847296,0.7634520,5.0331194,0.3964091,6.0135974,0.0052853,19.9466507,-2.0322239,47.3381111,-1.1426171,27.5907093]) param_l = np.array([-1.,-1,-1.,-1.,-1,-1.,0.,0.,0.,None,0.,None,0.]) param_u = np.array([1.,1.,1.,1.,1.,1.,None,None,None,0.,None,0.,None]) param_l = np.array([None,None,None,0.,0.,0.,0.,0.,0.,0.,None,0.,None,0.]) param_u = np.array([None,None,None,None,None,None,None,None,None,None,0.,None,0.,None]) nparam = np.size(param0) optProb.addVarGroup('param', nparam, 'c', lower=param_l, upper=param_u, value=param0) opt = SNOPT() opt.setOption('Scale option',2) if comp == 'mac': opt.setOption('Print file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/Users/ning1/Documents/FLOW Lab/VAWTWakeModel/wake_model/data/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'fsl': opt.setOption('Print file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','/fslhome/ebtingey/compute/VAWTWakeModel/OptVel/SNOPT_summary'+s+'_'+t+'.out') elif comp == 'win': opt.setOption('Print file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//optVel//SNOPT_print'+s+'_'+t+'.out') opt.setOption('Summary file','C://Users//TingeyPC//Documents//FLOW Lab//VAWTWakeModel//wake_model//data//OptVel//SNOPT_summary'+s+'_'+t+'.out') res = opt(optProb, sens=None) if opt_print == True: print res pow = res.fStar paramf = res.xStar['param'] if opt_print == True: print paramf[0] print paramf[1] print paramf[2] print paramf[3] print paramf[4] print paramf[5] print paramf[6] print paramf[7] print paramf[8] print paramf[9] print paramf[10] print paramf[11] print paramf[12] print paramf[13] men = np.array([paramf[0],paramf[1],paramf[2]]) spr = np.array([paramf[3],paramf[4],paramf[5],paramf[6]]) scl = np.array([paramf[7],paramf[8],paramf[9]]) rat = np.array([paramf[10],paramf[11]]) tns = np.array([paramf[12],paramf[13]]) paper = False if plot == True: if paper == True: for i in range(30): name = str(i+1) ind = str(i) plt.figure(1) ax1 = plt.subplot(5,6,i+1) color = 'bo' color2 = 'r-' fs = 15 lab = 'CFD' lab2 = 'Trend' tex = '$x/D$ = '+str("{0:.2f}".format(x[i]/dia)) exec('xfit = np.linspace(min(pos'+name+'/dia)-1.,max(pos'+name+'/dia)+1.,500)') if i == 5: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color,label=lab)') men_v,spr_v,scl_v,rat_v,tns_v = paramfit(xd[i],men,spr,scl,rat,tns) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,tns_v),xfit,'r-',linewidth=2,label=lab2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs) else: exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') men_v,spr_v,scl_v,rat_v,tns_v = paramfit(xd[i],men,spr,scl,rat,tns) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,tns_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) plt.text(0.3,0.8,tex,fontsize=fs) if i <= 23: plt.setp(ax1.get_xticklabels(), visible=False) else: plt.xlabel('$y/D$',fontsize=fs) plt.xticks(fontsize=fs) if i == 0 or i == 6 or i == 12 or i == 18 or i ==24: plt.ylabel(r'$u/U_\infty$',fontsize=fs) plt.yticks(fontsize=fs) else: plt.setp(ax1.get_yticklabels(), visible=False) elif paper == False: for i in range(30): name = str(i+1) plt.figure(1) plt.subplot(5,6,i+1) color = 'bo' exec('xfit = np.linspace(min(pos'+name+'d)-1.,max(pos'+name+'d)+1.,500)') exec('plt.plot(velo'+name+'d,pos'+name+'d,color)') men_v,spr_v,scl_v,rat_v,tns_v = paramfit(xd[i],men,spr,scl,rat,tns) plt.plot(veldist(xfit,men_v,spr_v,scl_v,rat_v,tns_v),xfit,'r-',linewidth=2) plt.xlim(0.,1.5) # plt.ylim(-4.,4.) # plt.legend(loc=1) plt.xlabel('Normalized Velocity') plt.ylabel('$y/D$') return men,spr,scl,rat,tns
opt_problem.setAssembler(assembler, func) if step % 2 == 1: # Solve the analysis problem at the first step opt_problem.evalObjCon(opt_problem.x * opt_problem.x_scale) else: # Create the optimization problem prob = Optimization('topo', opt_problem.objcon) # Add the variable group n = opt_problem.nvars x0 = np.zeros(n) lb = np.zeros(n) ub = np.zeros(n) opt_problem.getVarsAndBounds(x0, lb, ub) prob.addVarGroup('x', n, value=x0, lower=lb, upper=ub) # Add the constraints prob.addConGroup('con', opt_problem.ncon, lower=0.0, upper=None) # Add the objective prob.addObj('objective') fname = 'results/opt%02d.out' % (step) if args.optimizer == 'snopt': options['Print file'] = fname options['Summary file'] = fname + '_summary' elif args.optimizer == 'ipopt': options['output_file'] = fname elif args.optimizer == 'paropt': options['filename'] = fname
def optimize_mesh(hvals, errors, mass_errors, lower, upper, Ntarget, p, d, mass_error_ratio=0.25): def objfunc(xdict): '''Evaluate the objective/constraint''' x = xdict['x'] # Minimize predicted error fobj = 0.0 ratio = 0.0 for i, h in enumerate(hvals): fobj += ((1.0 - mass_error_ratio) * errors[i] * (x[i] / h)**s + mass_error_ratio * mass_errors[i] * (x[i] / h)**2) ratio += (x[i] / h)**(-d) # Set the objective and constraint funcs = {} funcs['fobj'] = fobj funcs['ratio'] = ratio / Ntarget fail = 0 return funcs, fail def gobjfunc(xdict, funcs): '''Evaluate the objective/constraint derivatives''' x = xdict['x'] # Minimize predicted error gobj = np.zeros(x.shape) gratio = np.zeros(x.shape) for i, h in enumerate(hvals): gobj[i] = ((1.0 - mass_error_ratio) * s * (errors[i] / h) * (x[i] / h)**(s - 1) + 2 * mass_error_ratio * (mass_errors[i] / h) * (x[i] / h)) gratio[i] = -(d / h) * (x[i] / h)**(-d - 1.0) gratio[:] /= Ntarget # Set the objective and constraint sens = {'fobj': {'x': gobj}, 'ratio': {'x': gratio}} fail = 0.0 return sens, fail # Create the optimization problem mesh_prob = Optimization('mesh', objfunc, comm=MPI.COMM_SELF) # Add the variable group x0 = 0.5 * (lower + upper) mesh_prob.addVarGroup('x', len(hvals), value=x0, lower=lower, upper=upper) # Add the constraints mesh_prob.addConGroup('ratio', 1, lower=1.0, upper=1.0) # Add the objective mesh_prob.addObj('fobj') # The Optimizer is IPOPT options = {} options['print_user_options'] = 'yes' options['tol'] = 1e-6 options['bound_relax_factor'] = 0.0 options['linear_solver'] = 'ma27' options['output_file'] = 'results/mesh_opt.out' options['max_iter'] = 500 # Create the optimizer and optimize it! opt = OPT('ipopt', options=options) sol = opt(mesh_prob, sens=gobjfunc) return sol.xStar['x']
num = 1000 # aep_array = np.zeros(num) # mean_damage_array = np.zeros(num) # max_damage_array = np.zeros(num) for k in range(num): turbineX, turbineY = random_start(nTurbs, 126.4, xmin, xmax, ymin, ymax) """Optimization""" optProb = Optimization('Wind_Farm_AEP', obj_func) optProb.addObj('AEP') # optProb.addObj('damage') optProb.addVarGroup('turbineX', nTurbs, type='c', lower=min(xBounds), upper=max(xBounds), value=turbineX) optProb.addVarGroup('turbineY', nTurbs, type='c', lower=min(yBounds), upper=max(yBounds), value=turbineY) num_cons_sep = (nTurbs - 1) * nTurbs / 2 optProb.addConGroup('sep', num_cons_sep, lower=0., upper=None) optProb.addConGroup('bound', nTurbs, lower=0., upper=None) optProb.addConGroup('damage', nTurbs, lower=None, upper=0.74) opt = SNOPT()
def run(self, problem): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers (i.e., SNOPT) control the iteration. Args ---- problem : `Problem` Our parent `Problem`. """ self.pyopt_solution = None rel = problem.root._probdata.relevance # Metadata Setup self.metadata = create_local_meta(None, self.options['optimizer']) self.iter_count = 0 update_local_meta(self.metadata, (self.iter_count,)) # Initial Run with problem.root._dircontext: problem.root.solve_nonlinear(metadata=self.metadata) opt_prob = Optimization(self.options['title'], self._objfunc) # Add all parameters param_meta = self.get_desvar_metadata() self.indep_list = indep_list = list(param_meta) param_vals = self.get_desvars() for name, meta in iteritems(param_meta): opt_prob.addVarGroup(name, meta['size'], type='c', value=param_vals[name], lower=meta['lower'], upper=meta['upper']) opt_prob.finalizeDesignVariables() # Figure out parameter subsparsity for paramcomp index connections. # sub_param_conns is empty unless there are some index conns. # full_param_conns gets filled with the connections to the entire # parameter so that those params can be filtered out of the sparse # set if the full path is also relevant sub_param_conns = {} full_param_conns = {} for name in indep_list: pathname = problem.root.unknowns.metadata(name)['pathname'] sub_param_conns[name] = {} full_param_conns[name] = set() for target, info in iteritems(problem.root.connections): src, indices = info if src == pathname: if indices is not None: # Need to map the connection indices onto the desvar # indices if both are declared. dv_idx = param_meta[name].get('indices') indices = set(indices) if dv_idx is not None: indices.intersection_update(dv_idx) ldv_idx = list(dv_idx) mapped_idx = [ldv_idx.index(item) for item in indices] sub_param_conns[name][target] = mapped_idx else: sub_param_conns[name][target] = indices else: full_param_conns[name].add(target) # Add all objectives objs = self.get_objectives() self.quantities = list(objs) self.sparsity = OrderedDict() self.sub_sparsity = OrderedDict() for name in objs: opt_prob.addObj(name) self.sparsity[name] = self.indep_list # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(lintype='linear').keys() if len(lcons) > 0: self.lin_jacs = problem.calc_gradient(indep_list, lcons, return_format='dict') #print("Linear Gradient") #print(self.lin_jacs) # Add all equality constraints econs = self.get_constraints(ctype='eq', lintype='nonlinear') con_meta = self.get_constraint_metadata() self.quantities += list(econs) for name in self.get_constraints(ctype='eq'): meta = con_meta[name] size = meta['size'] lower = upper = meta['equals'] # Sparsify Jacobian via relevance rels = rel.relevant[name] wrt = rels.intersection(indep_list) self.sparsity[name] = wrt if meta['linear']: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: jac = self._build_sparse(name, wrt, size, param_vals, sub_param_conns, full_param_conns, rels) opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac) # Add all inequality constraints incons = self.get_constraints(ctype='ineq', lintype='nonlinear') self.quantities += list(incons) for name in self.get_constraints(ctype='ineq'): meta = con_meta[name] size = meta['size'] # Bounds - double sided is supported lower = meta['lower'] upper = meta['upper'] # Sparsify Jacobian via relevance rels = rel.relevant[name] wrt = rels.intersection(indep_list) self.sparsity[name] = wrt if meta['linear']: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=wrt, jac=self.lin_jacs[name]) else: jac = self._build_sparse(name, wrt, size, param_vals, sub_param_conns, full_param_conns, rels) opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac) # Instantiate the requested optimizer optimizer = self.options['optimizer'] try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer raise ImportError(msg) optname = vars()[optimizer] opt = optname() #Set optimization options for option, value in self.opt_settings.items(): opt.setOption(option, value) self._problem = problem # Execute the optimization problem if self.options['pyopt_diff']: # Use pyOpt's internal finite difference fd_step = problem.root.fd_options['step_size'] sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file) self._problem = None # Print results if self.options['print_results']: print(sol) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() for name in indep_list: val = dv_dict[name] self.set_desvar(name, val) with self.root._dircontext: self.root.solve_nonlinear(metadata=self.metadata) # Save the most recent solution. self.pyopt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0
r_nrel = np.array([2.8667, 5.6000, 8.3333, 11.7500, 15.8500, 19.9500, 24.0500, 28.1500, 32.2500, 36.3500, 40.4500, 44.5500, 48.6500, 52.7500, 56.1667, 58.9000, 61.6333, 63.0]) # radial positions (m) rad = r_nrel*r_ratio # Initialize input variables rotorDiameter = np.ones(nturb)*rotor_diameter generator_efficiency = np.ones(nturb)*0.944 yaw = np.ones((nwind,nturb))*0. rpm = np.ones(nwind*nturb)*rpm_max # Optimization optProb = Optimization('Wind_Farm_APP', obj_func) optProb.addObj('obj') # Design Variables (scaled to 1) nrpm = nturb*nwind optProb.addVarGroup('xvars', nturb, 'c', lower=xlow/100., upper=xupp/100., value=turbineX/100.) # x positions optProb.addVarGroup('yvars', nturb, 'c', lower=ylow/100., upper=yupp/100., value=turbineY/100.) # y positions optProb.addVarGroup('rpm', nrpm, 'c', lower=rpmlow/10., upper=rpmupp/10., value=rpm/10.) # rpm values # Constraints (scaled to 1) num_cons_sep = (nturb-1)*nturb/2 optProb.addConGroup('sep', num_cons_sep, lower=0., upper=None) # separation between turbines num_cons_spl = nwind*nobs optProb.addConGroup('SPL', num_cons_spl, lower=0., upper=SPLlim/10.) # SPL limit opt = SNOPT() opt.setOption('Scale option',0) opt.setOption('Iterations limit',1000000) res = opt(optProb) # Printing optimization results (SNOPT format) print res