def test_regression_bug_fix_issue_2062_sql_meta_file_running_parallel(self): from openmdao.test_suite.components.paraboloid import Paraboloid prob = om.Problem() prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.model.add_design_var('x', lower=0.0, upper=1.0) prob.model.add_design_var('y', lower=0.0, upper=1.0) prob.model.add_objective('f_xy') prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.options['run_parallel'] = True prob.driver.options['procs_per_model'] = 1 prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup() # Run this again. Because of the bug fix for issue 2062, this code should NOT # throw an exception prob = om.Problem() prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.model.add_design_var('x', lower=0.0, upper=1.0) prob.model.add_design_var('y', lower=0.0, upper=1.0) prob.model.add_objective('f_xy') prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.options['run_parallel'] = True prob.driver.options['procs_per_model'] = 1 prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() if prob.comm.rank == 0: expected_warnings = [ (UserWarning, 'The existing case recorder metadata file, cases.sql_meta, ' 'is being overwritten.'), (UserWarning, 'The existing case recorder file, cases.sql_0, is being ' 'overwritten.'), ] else: expected_warnings = [ (UserWarning, 'The existing case recorder file, cases.sql_1, is being ' 'overwritten.'), ] with assert_warnings(expected_warnings): prob.run_driver() prob.cleanup()
def test_sql_meta_file_exists(self): # Check that an existing sql_meta file will be deleted/overwritten # if it already exists before a run. (see Issue #2062) prob = om.Problem() prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.model.add_design_var('x', lower=0.0, upper=1.0) prob.model.add_design_var('y', lower=0.0, upper=1.0) prob.model.add_objective('f_xy') prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.options['run_parallel'] = True prob.driver.options['procs_per_model'] = 1 prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup() # Run this again. It should NOT throw an exception. prob = om.Problem() prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.model.add_design_var('x', lower=0.0, upper=1.0) prob.model.add_design_var('y', lower=0.0, upper=1.0) prob.model.add_objective('f_xy') prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.options['run_parallel'] = True prob.driver.options['procs_per_model'] = 1 prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() if prob.comm.rank == 0: expected_warnings = [ (UserWarning, 'The existing case recorder metadata file, cases.sql_meta, ' 'is being overwritten.'), (UserWarning, 'The existing case recorder file, cases.sql_0, is being ' 'overwritten.'), ] else: expected_warnings = [ (UserWarning, 'The existing case recorder file, cases.sql_1, is being ' 'overwritten.'), ] with assert_warnings(expected_warnings): prob.run_driver() prob.cleanup()
def test_doe(self): filename = 'pyxdsm_doe' out_format = PYXDSM_OUT prob = om.Problem() indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*']) indeps.add_output('x') prob.model.add_subsystem('C1', om.ExecComp(['y=2.0*x+1.'], x=2.0), promotes=['*']) prob.driver = om.DOEDriver() prob.model.add_design_var('x', lower=0.0, upper=10.0) prob.model.add_objective('y') prob.setup() # Conclude setup but don't run model. prob.final_setup() om.write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET, show_browser=SHOW, show_parallel=True) # Check if file was created self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_index_number(self): prob = om.Problem() model = prob.model model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = om.DOEDriver(om.PlackettBurmanGenerator()) prob.driver.add_recorder(self.recorder) prob.setup() prob.run_driver() prob.cleanup() data_dict = _get_viewer_data(self.filename, case_id=3) vals = data_dict['tree']['children'][2]['children'] x_val = vals[0]['value'] y_val = vals[1]['value'] f_xy_val = vals[2]['value'] self.assertEqual(x_val, "N/A") self.assertEqual(y_val, "N/A") self.assertEqual(f_xy_val, np.array([27.]))
def test_pareto_dataset(weights): weights_arr = np.array(weights) var_shape = (3,) prob = om.Problem() prob.model.add_subsystem("indeps", om.IndepVarComp("x", nans(var_shape))) prob.model.add_subsystem( "passthrough", om.ExecComp( ["y1=x[0]", "y2=x[1:3]"], x=nans((3,)), y1=nans((1,)), y2=nans((2,)) ), ) prob.model.connect( "indeps.x", "passthrough.x", ) prob.model.add_design_var( "indeps.x", lower=np.zeros(var_shape), upper=np.ones(var_shape) ) prob.model.add_objective("passthrough.y1", scaler=weights_arr[0]) prob.model.add_objective("passthrough.y2", scaler=weights_arr[1:3]) prob.driver = driver = om.DOEDriver( om.ListGenerator( [ [("indeps.x", np.array(x))] for x in [ [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1], ] ] ) ) recorder = DatasetRecorder() driver.add_recorder(recorder) try: prob.setup() prob.run_driver() finally: prob.cleanup() ds = recorder.assemble_dataset(driver) assert len(ds[DESIGN_ID]) == 7 pareto_ds = pareto_subset(ds) assert pareto_ds is not ds assert len(pareto_ds[DESIGN_ID]) == 3 expected_pareto_set = -np.eye(3) * weights_arr # It is more convenient to simply look at the input vectors than the output # dito. We assume everything works in between. assert np.all(np.isin(expected_pareto_set, pareto_ds["indeps.x"]))
def test_dump_load(tmp_path): var_shape = (3,) prob = om.Problem() prob.model.add_subsystem("indeps", om.IndepVarComp("x", nans(var_shape))) prob.model.add_subsystem( "passthrough", om.ExecComp( ["y1=x[0]", "y2=x[1:3]"], x=nans((3,)), y1=nans((1,)), y2=nans((2,)) ), ) prob.model.connect( "indeps.x", "passthrough.x", ) prob.model.add_design_var( "indeps.x", lower=np.zeros(var_shape), upper=np.ones(var_shape) ) prob.driver = driver = om.DOEDriver( om.ListGenerator( [ [("indeps.x", np.array(x))] for x in [ [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1], ] ] ) ) recorder = scop.DatasetRecorder() driver.add_recorder(recorder) try: prob.setup() prob.run_driver() finally: prob.cleanup() ds = recorder.assemble_dataset(driver) ds_copy = ds.copy(deep=True) path = tmp_path / "dump.scop" scop.dump(ds, path) # Make sure we don't mutate the ds by dumping it assert_ds_equal(ds, ds_copy) dumped_and_loaded_ds = scop.load(path) assert_ds_equal(dumped_and_loaded_ds, ds)
def driver_setup(prob): """Change settings of the driver Here the type of the driver has to be selected, wether it will be an optimisation driver or a DoE driver. In both cases there are multiple options to choose from to tune the driver. Two recorders are then attached to the driver for results and N2 plotting. Args: prob (om.Problem object) : Instance of the Problem class that is used to define the current routine. """ if Rt.type == 'Optim': # TBD : Genetic algorithm # if len(Rt.objective) > 1 and False: # log.info("""More than 1 objective function, the driver will # automatically be set to NSGA2""") # prob.driver = om.pyOptSparseDriver() # multifunc driver : NSGA2 # prob.driver.options['optimizer'] = 'NSGA2' # prob.driver.opt_settings['PopSize'] = 7 # prob.driver.opt_settings['maxGen'] = Rt.max_iter # else: prob.driver = om.ScipyOptimizeDriver() prob.driver.options['optimizer'] = Rt.driver prob.driver.options['maxiter'] = Rt.max_iter prob.driver.options['tol'] = Rt.tol prob.driver.options['disp'] = True elif Rt.type == 'DoE': if Rt.doedriver == 'Uniform': driver_type = om.UniformGenerator(num_samples=Rt.samplesnb) elif Rt.doedriver == 'LatinHypercube': driver_type = om.LatinHypercubeGenerator(samples=Rt.samplesnb) elif Rt.doedriver == 'FullFactorial': driver_type = om.FullFactorialGenerator(levels=Rt.samplesnb) elif Rt.doedriver == 'CSVGenerated': file = opf.gen_doe_csv(Rt.user_config) driver_type = om.CSVGenerator(file) prob.driver = om.DOEDriver(driver_type) prob.driver.options['run_parallel'] = True prob.driver.options['procs_per_model'] = 1 else: log.error('Type of optimisation not recognize!!!') ## Attaching a recorder and a diagramm visualizer ## prob.driver.recording_options['record_inputs'] = True prob.driver.add_recorder( om.SqliteRecorder(optim_dir_path + '/circuit.sqlite')) prob.driver.add_recorder( om.SqliteRecorder(optim_dir_path + '/Driver_recorder.sql'))
def test_scaling_report(self): class ParaboloidDiscrete(om.ExplicitComponent): def setup(self): self.add_discrete_input('x', val=10) self.add_discrete_input('y', val=0) self.add_discrete_output('f_xy', val=0) def compute(self, inputs, outputs, discrete_inputs, discrete_outputs): x = discrete_inputs['x'] y = discrete_inputs['y'] f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0 discrete_outputs['f_xy'] = int(f_xy) prob = om.Problem() model = prob.model # Add independent variables indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*']) indeps.add_discrete_output('x', 4) indeps.add_discrete_output('y', 3) # Add components model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*']) # Specify design variable range and objective model.add_design_var('x') model.add_design_var('y') model.add_objective('f_xy') samples = [ [('x', 5), ('y', 1)], [('x', 3), ('y', 6)], [('x', -1), ('y', 3)], ] # Setup driver prob.driver = om.DOEDriver(om.ListGenerator(samples)) # run driver prob.setup() prob.run_driver() prob.cleanup() # generate scaling report prob.driver.scaling_report(show_browser=False)
def run_sequential(): # problem will run in the single proc comm for this rank prob = om.Problem(comm=my_comm) prob.model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.model.add_design_var('x', lower=0.0, upper=1.0) prob.model.add_design_var('y', lower=0.0, upper=1.0) prob.model.add_objective('f_xy') prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.options['run_parallel'] = False prob.driver.options['procs_per_model'] = 1 prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup()
def test_report_generation_basic_doedriver(self): # Test a driver that does not generate scaling report setup_default_reports() prob = self.setup_and_run_simple_problem( driver=om.DOEDriver(om.PlackettBurmanGenerator())) problem_reports_dir = pathlib.Path(_reports_dir).joinpath(prob._name) path = pathlib.Path(problem_reports_dir).joinpath(self.n2_filename) self.assertTrue(path.is_file(), f'The N2 report file, {str(path)} was not found') # DOEDriver won't cause the creation of a scaling report path = pathlib.Path(problem_reports_dir).joinpath( self.scaling_filename) self.assertFalse( path.is_file(), f'The scaling report file, {str(path)}, was found but should not have' )
def setUp(self): # override notebook flag for system, variable table and sqlite_reader from openmdao.core import system from openmdao.utils import variable_table from openmdao.recorders import sqlite_reader system.notebook = variable_table.notebook = sqlite_reader.notebook = True # capture HTML output from variable_table self.html_stream = StringIO() variable_table.HTML = lambda x: self.html_stream.write(x) sqlite_reader.HTML = lambda x: self.html_stream.write(x) # create & run problem, generate cases model = om.Group() model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob = om.Problem(model) prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3)) prob.driver.add_recorder( om.SqliteRecorder('cases.sql', record_viewer_data=False)) prob.setup() prob.run_driver() prob.cleanup() # expected results self.expected_sources = ['driver'] self.expected_cases = [ 'rank0:DOEDriver_FullFactorial|0', 'rank0:DOEDriver_FullFactorial|1', 'rank0:DOEDriver_FullFactorial|2', 'rank0:DOEDriver_FullFactorial|3', 'rank0:DOEDriver_FullFactorial|4', 'rank0:DOEDriver_FullFactorial|5', 'rank0:DOEDriver_FullFactorial|6', 'rank0:DOEDriver_FullFactorial|7', 'rank0:DOEDriver_FullFactorial|8' ]
model.add_design_var('length', lower=0.0, upper=0.254) model.add_design_var('eff', lower=0.25, upper=0.325) model.add_design_var('eps', lower=0.02, upper=0.8) model.add_design_var('R_m', lower=1., upper=250.0) model.add_design_var('R_p', lower=1., upper=250.0) model.add_design_var('R_s', lower=1., upper=250.0) if case == 'cold': model.add_design_var('r_bat', lower=0.0, upper=1.0) model.add_objective('tBat') model.add_objective('tProp') model.add_objective('tBPanel') model.add_objective('tDPanel') prob.driver = om.DOEDriver(om.CSVGenerator(input_file)) prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup(check=True) prob.run_driver() prob.cleanup() cr = om.CaseReader("cases.sql") cases = cr.list_cases('driver') values = [] if case == 'cold': for nt in cases: outputs = cr.get_case(nt).outputs values.append((outputs['eps'], outputs['length'], outputs['eff'],
prob.model.add_design_var('ratios.T') prob.model.add_design_var('ratios.L') prob.model.add_design_var('ratios.B') prob.model.add_design_var('wts.Wt') prob.model.add_design_var('const.Disp') prob.model.add_design_var('stab.GMT') prob.model.add_design_var('fuel.fuelWt') prob.model.add_design_var('fuel.MCR') prob.model.add_design_var('fuel.etaRun') prob.model.add_design_var('fuel.nStarts') # set driver for design of experiment #prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=10000)) # latin hypercube is much better at determining edge behavior prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=50)) prob.driver.add_recorder(om.SqliteRecorder("musvDOEv4cases.sql")) # this is the meat of the OpenMDAO run prob.setup() prob.run_driver() prob.cleanup() # --- set up case reading cr = om.CaseReader("musvDOEv4cases.sql") cases = cr.list_cases('driver') # # --- setup write to CSV with outputs # with open('musvDOEv4cases.csv', mode='w') as csv_file: # #set up CSV file to use writer # fieldnames = ['Cb','L','B','T','FlywheelCapacity','GMT','Wt','Disp','Excess','MCR','fuelWt','etaRun','nStarts']
def set_driver(self, wt_opt): folder_output = self.opt["general"]["folder_output"] if self.opt["driver"]["optimization"]["flag"]: step_size = self._get_step_size() # Solver has specific meaning in OpenMDAO wt_opt.model.approx_totals(method="fd", step=step_size, form=self.opt["driver"]["optimization"]["form"]) # Set optimization solver and options. First, Scipy's SLSQP if self.opt["driver"]["optimization"]["solver"] == "SLSQP": wt_opt.driver = om.ScipyOptimizeDriver() wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] wt_opt.driver.options["tol"] = self.opt["driver"]["optimization"]["tol"] wt_opt.driver.options["maxiter"] = self.opt["driver"]["optimization"]["max_iter"] # The next two optimization methods require pyOptSparse. elif self.opt["driver"]["optimization"]["solver"] == "CONMIN": try: from openmdao.api import pyOptSparseDriver except: raise ImportError( "You requested the optimization solver CONMIN, but you have not installed the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver = pyOptSparseDriver() wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] wt_opt.driver.opt_settings["ITMAX"] = self.opt["driver"]["optimization"]["max_iter"] elif self.opt["driver"]["optimization"]["solver"] == "SNOPT": try: from openmdao.api import pyOptSparseDriver except: raise ImportError( "You requested the optimization solver SNOPT, but you have not installed the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver = pyOptSparseDriver() try: wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] except: raise ImportError( "You requested the optimization solver SNOPT, but you have not installed it within the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver.opt_settings["Major optimality tolerance"] = float( self.opt["driver"]["optimization"]["tol"] ) wt_opt.driver.opt_settings["Major iterations limit"] = int( self.opt["driver"]["optimization"]["max_major_iter"] ) wt_opt.driver.opt_settings["Iterations limit"] = int( self.opt["driver"]["optimization"]["max_minor_iter"] ) wt_opt.driver.opt_settings["Major feasibility tolerance"] = float( self.opt["driver"]["optimization"]["tol"] ) wt_opt.driver.opt_settings["Summary file"] = os.path.join(folder_output, "SNOPT_Summary_file.txt") wt_opt.driver.opt_settings["Print file"] = os.path.join(folder_output, "SNOPT_Print_file.txt") if "hist_file_name" in self.opt["driver"]["optimization"]: wt_opt.driver.hist_file = self.opt["driver"]["optimization"]["hist_file_name"] if "verify_level" in self.opt["driver"]["optimization"]: wt_opt.driver.opt_settings["Verify level"] = self.opt["driver"]["optimization"]["verify_level"] else: wt_opt.driver.opt_settings["Verify level"] = -1 # wt_opt.driver.declare_coloring() if "hotstart_file" in self.opt["driver"]["optimization"]: wt_opt.driver.hotstart_file = self.opt["driver"]["optimization"]["hotstart_file"] else: raise ValueError( "The optimizer " + self.opt["driver"]["optimization"]["solver"] + "is not yet supported!" ) elif self.opt["driver"]["design_of_experiments"]["flag"]: if self.opt["driver"]["design_of_experiments"]["generator"].lower() == "uniform": generator = om.UniformGenerator( num_samples=self.opt["driver"]["design_of_experiments"]["num_samples"], seed=self.opt["driver"]["design_of_experiments"]["seed"], ) elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "fullfact": generator = om.FullFactorialGenerator(levels=self.opt["driver"]["design_of_experiments"]["num_samples"]) elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "plackettburman": generator = om.PlackettBurmanGenerator() elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "boxbehnken": generator = om.BoxBehnkenGenerator() elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "latinhypercube": generator = om.LatinHypercubeGenerator( samples=self.opt["driver"]["design_of_experiments"]["num_samples"], criterion=self.opt["driver"]["design_of_experiments"]["criterion"], seed=self.opt["driver"]["design_of_experiments"]["seed"], ) else: raise Exception( "The generator type {} is unsupported.".format( self.opt["driver"]["design_of_experiments"]["generator"] ) ) # Initialize driver wt_opt.driver = om.DOEDriver(generator) # options wt_opt.driver.options["run_parallel"] = self.opt["driver"]["design_of_experiments"]["run_parallel"] else: raise Exception( "Design variables are set to be optimized or studied, but no driver is selected. Please enable a driver." ) return wt_opt
# Constraint: CN_β > 0.0 model.add_constraint('ecrm_150.CN_beta', lower=0.0) # Constraint: CL < 1.3 model.add_constraint('ecrm_150.CL', upper=1.3) # Constraint: CL = W/qS model.add_constraint('ecrm_150.L_equals_W', equals=0.0) # Design Variables model.add_design_var('wing_cord', lower=45.0, upper=75.0) #model.add_design_var('vert_tail_area', lower=1500.0, upper=3000.0) #model.add_design_var('horiz_tail_area', lower=4500, upper=7500) model.add_design_var('ecrm_150.alpha', lower=2.0, upper=14.0) prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=4)) prob.driver.options['debug_print'] = ['desvars', 'nl_cons', 'objs'] prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.recording_options['record_derivatives'] = False prob.setup() # Set Initial Conditions prob.set_val('beta', 0.0, units='deg') prob.set_val('re', 1.0e6, units='1/m') prob.set_val('rho', 1.225, units='kg/m**3') #prob.set_val('rho', 0.625, units='kg/m**3') prob.set_val('CT', grav_constant * 17.e-6, units='1/s')
import openmdao.api as om from openmdao.test_suite.components.paraboloid import Paraboloid prob = om.Problem() model = prob.model model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=10)) #prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=10)) prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup() cr = om.CaseReader("cases.sql") cases = cr.list_cases('driver') print(len(cases)) values = [] for case in cases: outputs = cr.get_case(case).outputs values.append((outputs['x'], outputs['y'], outputs['f_xy']))
model.add_design_var('length', lower=0.0, upper=0.254) model.add_design_var('eff', lower=0.25, upper=0.32) model.add_design_var('P_un', lower=0.0, upper=1.0) model.add_design_var('eps', lower=0.02, upper=0.8) model.add_design_var('alp', lower=0.23, upper=0.48) model.add_design_var('GlMain', lower=0.004, upper=1.0) model.add_design_var('GlProp', lower=0.004, upper=1.0) model.add_design_var('GlTether', lower=0.004, upper=1.0) model.add_design_var('GlPanel', lower=0.004, upper=1.0) model.add_objective('tBat') model.add_objective('tProp') prob.setup(check=True) prob.driver = om.DOEDriver(om.CSVGenerator('morris_sample.csv')) prob.driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.run_driver() prob.cleanup() cr = om.CaseReader("cases.sql") cases = cr.list_cases('driver') values = [] for case in cases: outputs = cr.get_case(case).outputs values.append((outputs['tBat'], outputs['tProp'])) data = np.reshape(values, (len(cases), 2))
def run_routine(): """ Run optimisation with openmdao. Function 'run_routine' is used to define the optimisation problem for openmdao. The different parameter to define variables are passed through a global dictionnay (for now). Source: *http://openmdao.org/twodocs/versions/latest/getting_started/index.html """ # sInitialize dictionnaries # init_dict() # Build the model prob = om.Problem() model = prob.model # Build model components indeps = model.add_subsystem('indeps', om.IndepVarComp()) model.add_subsystem('objective', objective_function()) model.add_subsystem('const', constraint()) # Choose between optimizer or driver if Rt.type == 'DoE': if Rt.doetype == 'uniform': driver = prob.driver = om.DOEDriver( om.UniformGenerator(num_samples=Rt.samplesnb)) elif Rt.doetype == 'fullfact': # 2->9 3->81 driver = prob.driver = om.DOEDriver( om.FullFactorialGenerator(Rt.samplesnb)) elif Rt.type == 'Optim': driver = prob.driver = om.ScipyOptimizeDriver() # SLSQP,COBYLA,shgo,TNC driver.options['optimizer'] = Rt.driver # driver.options['maxiter'] = 20 driver.options['tol'] = 1e-2 if Rt.driver == 'COBYLA': driver.opt_settings['catol'] = 0.06 # Connect problem components to model components # Design variable for key, (name, listval, minval, maxval, setcommand, getcommand) in design_var_dict.items(): norm = int(np.log10(abs(listval[0]) + 1) + 1) indeps.add_output(key, listval[0], ref=norm, ref0=0) model.connect('indeps.' + key, 'objective.' + key) model.add_design_var('indeps.' + key, lower=minval, upper=maxval) # Constraints for key, (name, listval, minval, maxval, getcommand) in res_var_dict.items(): # Select only one constrain if name in Rt.constraints: norm = int(np.log10(abs(listval[0]) + 1) + 1) model.add_constraint('const.' + name, ref=norm, lower=-0.25, upper=0.25) # Objective function model.add_objective('objective.{}'.format(Rt.objective)) # Recorder path = optim_dir_path driver.add_recorder(om.SqliteRecorder(path + '/Driver_recorder.sql')) # Run prob.setup() prob.run_driver() prob.cleanup() # Results log.info('=========================================') log.info('min = ' + str(prob['objective.{}'.format(Rt.objective)])) for key, (name, listval, minval, maxval, setcommand, getcommand) in design_var_dict.items(): log.info(name + ' = ' + str(prob['indeps.' + key]) + '\n Min :' + str(minval) + ' Max : ' + str(maxval)) log.info('Variable history') for key, (name, listval, minval, maxval, setcommand, getcommand) in design_var_dict.items(): log.info(name + ' => ' + str(listval)) log.info('=========================================') # Generate plots, maybe make a dynamic plot opf.read_results(optim_dir_path, Rt.type)