def test_index_number(self): prob = om.Problem() model = prob.model model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = om.DOEDriver(om.PlackettBurmanGenerator()) prob.driver.add_recorder(self.recorder) prob.setup() prob.run_driver() prob.cleanup() data_dict = _get_viewer_data(self.filename, case_id=3) vals = data_dict['tree']['children'][2]['children'] x_val = vals[0]['value'] y_val = vals[1]['value'] f_xy_val = vals[2]['value'] self.assertEqual(x_val, "N/A") self.assertEqual(y_val, "N/A") self.assertEqual(f_xy_val, np.array([27.]))
def test_report_generation_basic_doedriver(self): # Test a driver that does not generate scaling report setup_default_reports() prob = self.setup_and_run_simple_problem( driver=om.DOEDriver(om.PlackettBurmanGenerator())) problem_reports_dir = pathlib.Path(_reports_dir).joinpath(prob._name) path = pathlib.Path(problem_reports_dir).joinpath(self.n2_filename) self.assertTrue(path.is_file(), f'The N2 report file, {str(path)} was not found') # DOEDriver won't cause the creation of a scaling report path = pathlib.Path(problem_reports_dir).joinpath( self.scaling_filename) self.assertFalse( path.is_file(), f'The scaling report file, {str(path)}, was found but should not have' )
def set_driver(self, wt_opt): folder_output = self.opt["general"]["folder_output"] if self.opt["driver"]["optimization"]["flag"]: step_size = self._get_step_size() # Solver has specific meaning in OpenMDAO wt_opt.model.approx_totals(method="fd", step=step_size, form=self.opt["driver"]["optimization"]["form"]) # Set optimization solver and options. First, Scipy's SLSQP if self.opt["driver"]["optimization"]["solver"] == "SLSQP": wt_opt.driver = om.ScipyOptimizeDriver() wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] wt_opt.driver.options["tol"] = self.opt["driver"]["optimization"]["tol"] wt_opt.driver.options["maxiter"] = self.opt["driver"]["optimization"]["max_iter"] # The next two optimization methods require pyOptSparse. elif self.opt["driver"]["optimization"]["solver"] == "CONMIN": try: from openmdao.api import pyOptSparseDriver except: raise ImportError( "You requested the optimization solver CONMIN, but you have not installed the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver = pyOptSparseDriver() wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] wt_opt.driver.opt_settings["ITMAX"] = self.opt["driver"]["optimization"]["max_iter"] elif self.opt["driver"]["optimization"]["solver"] == "SNOPT": try: from openmdao.api import pyOptSparseDriver except: raise ImportError( "You requested the optimization solver SNOPT, but you have not installed the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver = pyOptSparseDriver() try: wt_opt.driver.options["optimizer"] = self.opt["driver"]["optimization"]["solver"] except: raise ImportError( "You requested the optimization solver SNOPT, but you have not installed it within the pyOptSparseDriver. Please do so and rerun." ) wt_opt.driver.opt_settings["Major optimality tolerance"] = float( self.opt["driver"]["optimization"]["tol"] ) wt_opt.driver.opt_settings["Major iterations limit"] = int( self.opt["driver"]["optimization"]["max_major_iter"] ) wt_opt.driver.opt_settings["Iterations limit"] = int( self.opt["driver"]["optimization"]["max_minor_iter"] ) wt_opt.driver.opt_settings["Major feasibility tolerance"] = float( self.opt["driver"]["optimization"]["tol"] ) wt_opt.driver.opt_settings["Summary file"] = os.path.join(folder_output, "SNOPT_Summary_file.txt") wt_opt.driver.opt_settings["Print file"] = os.path.join(folder_output, "SNOPT_Print_file.txt") if "hist_file_name" in self.opt["driver"]["optimization"]: wt_opt.driver.hist_file = self.opt["driver"]["optimization"]["hist_file_name"] if "verify_level" in self.opt["driver"]["optimization"]: wt_opt.driver.opt_settings["Verify level"] = self.opt["driver"]["optimization"]["verify_level"] else: wt_opt.driver.opt_settings["Verify level"] = -1 # wt_opt.driver.declare_coloring() if "hotstart_file" in self.opt["driver"]["optimization"]: wt_opt.driver.hotstart_file = self.opt["driver"]["optimization"]["hotstart_file"] else: raise ValueError( "The optimizer " + self.opt["driver"]["optimization"]["solver"] + "is not yet supported!" ) elif self.opt["driver"]["design_of_experiments"]["flag"]: if self.opt["driver"]["design_of_experiments"]["generator"].lower() == "uniform": generator = om.UniformGenerator( num_samples=self.opt["driver"]["design_of_experiments"]["num_samples"], seed=self.opt["driver"]["design_of_experiments"]["seed"], ) elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "fullfact": generator = om.FullFactorialGenerator(levels=self.opt["driver"]["design_of_experiments"]["num_samples"]) elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "plackettburman": generator = om.PlackettBurmanGenerator() elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "boxbehnken": generator = om.BoxBehnkenGenerator() elif self.opt["driver"]["design_of_experiments"]["generator"].lower() == "latinhypercube": generator = om.LatinHypercubeGenerator( samples=self.opt["driver"]["design_of_experiments"]["num_samples"], criterion=self.opt["driver"]["design_of_experiments"]["criterion"], seed=self.opt["driver"]["design_of_experiments"]["seed"], ) else: raise Exception( "The generator type {} is unsupported.".format( self.opt["driver"]["design_of_experiments"]["generator"] ) ) # Initialize driver wt_opt.driver = om.DOEDriver(generator) # options wt_opt.driver.options["run_parallel"] = self.opt["driver"]["design_of_experiments"]["run_parallel"] else: raise Exception( "Design variables are set to be optimized or studied, but no driver is selected. Please enable a driver." ) return wt_opt